summaryrefslogtreecommitdiff
path: root/deps/v8/src
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2017-05-02 10:50:00 +0200
committerMichaël Zasso <targos@protonmail.com>2017-05-06 20:02:35 +0200
commit60d1aac8d225e844e68ae48e8f3d58802e635fbe (patch)
tree922f347dd054db18d88666fad7181e5a777f4022 /deps/v8/src
parent73d9c0f903ae371cd5011af64c3a6f69a1bda978 (diff)
downloadnode-new-60d1aac8d225e844e68ae48e8f3d58802e635fbe.tar.gz
deps: update V8 to 5.8.283.38
PR-URL: https://github.com/nodejs/node/pull/12784 Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl> Reviewed-By: Gibson Fahnestock <gibfahn@gmail.com>
Diffstat (limited to 'deps/v8/src')
-rw-r--r--deps/v8/src/accessors.h3
-rw-r--r--deps/v8/src/api-experimental.h4
-rw-r--r--deps/v8/src/api-natives.cc2
-rw-r--r--deps/v8/src/api.cc543
-rw-r--r--deps/v8/src/api.h87
-rw-r--r--deps/v8/src/arguments.cc1
-rw-r--r--deps/v8/src/arguments.h2
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h13
-rw-r--r--deps/v8/src/arm/assembler-arm.cc436
-rw-r--r--deps/v8/src/arm/assembler-arm.h89
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc804
-rw-r--r--deps/v8/src/arm/codegen-arm.cc21
-rw-r--r--deps/v8/src/arm/constants-arm.h18
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc2
-rw-r--r--deps/v8/src/arm/disasm-arm.cc472
-rw-r--r--deps/v8/src/arm/interface-descriptors-arm.cc57
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc107
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h30
-rw-r--r--deps/v8/src/arm/simulator-arm.cc1520
-rw-r--r--deps/v8/src/arm/simulator-arm.h98
-rw-r--r--deps/v8/src/arm64/assembler-arm64-inl.h2
-rw-r--r--deps/v8/src/arm64/assembler-arm64.h1
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.cc911
-rw-r--r--deps/v8/src/arm64/codegen-arm64.cc22
-rw-r--r--deps/v8/src/arm64/deoptimizer-arm64.cc2
-rw-r--r--deps/v8/src/arm64/interface-descriptors-arm64.cc60
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc116
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.h19
-rw-r--r--deps/v8/src/asmjs/asm-js.cc37
-rw-r--r--deps/v8/src/asmjs/asm-typer.cc60
-rw-r--r--deps/v8/src/asmjs/asm-typer.h4
-rw-r--r--deps/v8/src/asmjs/asm-wasm-builder.cc166
-rw-r--r--deps/v8/src/assembler.cc76
-rw-r--r--deps/v8/src/assembler.h95
-rw-r--r--deps/v8/src/assert-scope.cc1
-rw-r--r--deps/v8/src/ast/OWNERS1
-rw-r--r--deps/v8/src/ast/ast-literal-reindexer.cc322
-rw-r--r--deps/v8/src/ast/ast-literal-reindexer.h43
-rw-r--r--deps/v8/src/ast/ast-numbering.cc91
-rw-r--r--deps/v8/src/ast/ast-types.cc13
-rw-r--r--deps/v8/src/ast/ast-types.h33
-rw-r--r--deps/v8/src/ast/ast-value-factory.cc80
-rw-r--r--deps/v8/src/ast/ast-value-factory.h66
-rw-r--r--deps/v8/src/ast/ast.cc186
-rw-r--r--deps/v8/src/ast/ast.h411
-rw-r--r--deps/v8/src/ast/modules.cc1
-rw-r--r--deps/v8/src/ast/modules.h2
-rw-r--r--deps/v8/src/ast/prettyprinter.cc10
-rw-r--r--deps/v8/src/ast/scopes.cc163
-rw-r--r--deps/v8/src/ast/scopes.h26
-rw-r--r--deps/v8/src/ast/variables.cc5
-rw-r--r--deps/v8/src/background-parsing-task.cc7
-rw-r--r--deps/v8/src/background-parsing-task.h1
-rw-r--r--deps/v8/src/bailout-reason.h7
-rw-r--r--deps/v8/src/base/atomic-utils.h4
-rw-r--r--deps/v8/src/base/hashmap.h28
-rw-r--r--deps/v8/src/base/logging.h22
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc9
-rw-r--r--deps/v8/src/bootstrapper.cc543
-rw-r--r--deps/v8/src/bootstrapper.h2
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc341
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc332
-rw-r--r--deps/v8/src/builtins/builtins-api.cc4
-rw-r--r--deps/v8/src/builtins/builtins-arguments.cc425
-rw-r--r--deps/v8/src/builtins/builtins-arguments.h55
-rw-r--r--deps/v8/src/builtins/builtins-array.cc919
-rw-r--r--deps/v8/src/builtins/builtins-arraybuffer.cc5
-rw-r--r--deps/v8/src/builtins/builtins-async-function.cc208
-rw-r--r--deps/v8/src/builtins/builtins-async-iterator.cc326
-rw-r--r--deps/v8/src/builtins/builtins-async.cc92
-rw-r--r--deps/v8/src/builtins/builtins-async.h35
-rw-r--r--deps/v8/src/builtins/builtins-boolean.cc2
-rw-r--r--deps/v8/src/builtins/builtins-call.cc14
-rw-r--r--deps/v8/src/builtins/builtins-callsite.cc2
-rw-r--r--deps/v8/src/builtins/builtins-constructor.cc69
-rw-r--r--deps/v8/src/builtins/builtins-conversion.cc39
-rw-r--r--deps/v8/src/builtins/builtins-dataview.cc13
-rw-r--r--deps/v8/src/builtins/builtins-date.cc24
-rw-r--r--deps/v8/src/builtins/builtins-debug.cc11
-rw-r--r--deps/v8/src/builtins/builtins-error.cc2
-rw-r--r--deps/v8/src/builtins/builtins-function.cc62
-rw-r--r--deps/v8/src/builtins/builtins-generator.cc127
-rw-r--r--deps/v8/src/builtins/builtins-global.cc9
-rw-r--r--deps/v8/src/builtins/builtins-handler.cc243
-rw-r--r--deps/v8/src/builtins/builtins-ic.cc101
-rw-r--r--deps/v8/src/builtins/builtins-internal.cc15
-rw-r--r--deps/v8/src/builtins/builtins-interpreter.cc67
-rw-r--r--deps/v8/src/builtins/builtins-json.cc2
-rw-r--r--deps/v8/src/builtins/builtins-math.cc2
-rw-r--r--deps/v8/src/builtins/builtins-number.cc17
-rw-r--r--deps/v8/src/builtins/builtins-object.cc119
-rw-r--r--deps/v8/src/builtins/builtins-object.h26
-rw-r--r--deps/v8/src/builtins/builtins-promise.cc410
-rw-r--r--deps/v8/src/builtins/builtins-promise.h27
-rw-r--r--deps/v8/src/builtins/builtins-proxy.cc3
-rw-r--r--deps/v8/src/builtins/builtins-reflect.cc4
-rw-r--r--deps/v8/src/builtins/builtins-regexp.cc767
-rw-r--r--deps/v8/src/builtins/builtins-regexp.h99
-rw-r--r--deps/v8/src/builtins/builtins-sharedarraybuffer.cc10
-rw-r--r--deps/v8/src/builtins/builtins-string.cc1000
-rw-r--r--deps/v8/src/builtins/builtins-symbol.cc2
-rw-r--r--deps/v8/src/builtins/builtins-typedarray.cc272
-rw-r--r--deps/v8/src/builtins/builtins-utils.h8
-rw-r--r--deps/v8/src/builtins/builtins-wasm.cc30
-rw-r--r--deps/v8/src/builtins/builtins.cc13
-rw-r--r--deps/v8/src/builtins/builtins.h331
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc364
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc330
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc331
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc346
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc333
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc366
-rw-r--r--deps/v8/src/builtins/x87/builtins-x87.cc261
-rw-r--r--deps/v8/src/code-factory.cc264
-rw-r--r--deps/v8/src/code-factory.h47
-rw-r--r--deps/v8/src/code-stub-assembler.cc1664
-rw-r--r--deps/v8/src/code-stub-assembler.h217
-rw-r--r--deps/v8/src/code-stubs-hydrogen.cc207
-rw-r--r--deps/v8/src/code-stubs.cc407
-rw-r--r--deps/v8/src/code-stubs.h404
-rw-r--r--deps/v8/src/codegen.cc7
-rw-r--r--deps/v8/src/compilation-cache.cc158
-rw-r--r--deps/v8/src/compilation-cache.h64
-rw-r--r--deps/v8/src/compilation-info.cc21
-rw-r--r--deps/v8/src/compilation-info.h16
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc147
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h35
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.cc24
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.h4
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc95
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher.h32
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc30
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h5
-rw-r--r--deps/v8/src/compiler.cc571
-rw-r--r--deps/v8/src/compiler.h14
-rw-r--r--deps/v8/src/compiler/access-builder.cc28
-rw-r--r--deps/v8/src/compiler/access-builder.h10
-rw-r--r--deps/v8/src/compiler/access-info.cc45
-rw-r--r--deps/v8/src/compiler/access-info.h12
-rw-r--r--deps/v8/src/compiler/arm/code-generator-arm.cc394
-rw-r--r--deps/v8/src/compiler/arm/instruction-codes-arm.h77
-rw-r--r--deps/v8/src/compiler/arm/instruction-scheduler-arm.cc75
-rw-r--r--deps/v8/src/compiler/arm/instruction-selector-arm.cc607
-rw-r--r--deps/v8/src/compiler/arm64/code-generator-arm64.cc53
-rw-r--r--deps/v8/src/compiler/arm64/instruction-selector-arm64.cc438
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.cc220
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.h13
-rw-r--r--deps/v8/src/compiler/branch-elimination.cc7
-rw-r--r--deps/v8/src/compiler/bytecode-analysis.cc5
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc402
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.h40
-rw-r--r--deps/v8/src/compiler/code-assembler.cc42
-rw-r--r--deps/v8/src/compiler/code-assembler.h14
-rw-r--r--deps/v8/src/compiler/code-generator-impl.h8
-rw-r--r--deps/v8/src/compiler/code-generator.cc91
-rw-r--r--deps/v8/src/compiler/code-generator.h14
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.cc96
-rw-r--r--deps/v8/src/compiler/common-operator.cc201
-rw-r--r--deps/v8/src/compiler/common-operator.h22
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc221
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.h6
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.cc4
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.h1
-rw-r--r--deps/v8/src/compiler/escape-analysis.cc25
-rw-r--r--deps/v8/src/compiler/escape-analysis.h4
-rw-r--r--deps/v8/src/compiler/frame-elider.cc3
-rw-r--r--deps/v8/src/compiler/graph-assembler.cc20
-rw-r--r--deps/v8/src/compiler/graph-assembler.h2
-rw-r--r--deps/v8/src/compiler/graph-visualizer.cc11
-rw-r--r--deps/v8/src/compiler/graph.h53
-rw-r--r--deps/v8/src/compiler/ia32/code-generator-ia32.cc527
-rw-r--r--deps/v8/src/compiler/ia32/instruction-selector-ia32.cc394
-rw-r--r--deps/v8/src/compiler/instruction-codes.h10
-rw-r--r--deps/v8/src/compiler/instruction-selector-impl.h30
-rw-r--r--deps/v8/src/compiler/instruction-selector.cc407
-rw-r--r--deps/v8/src/compiler/instruction-selector.h30
-rw-r--r--deps/v8/src/compiler/instruction.cc22
-rw-r--r--deps/v8/src/compiler/instruction.h27
-rw-r--r--deps/v8/src/compiler/int64-lowering.cc7
-rw-r--r--deps/v8/src/compiler/int64-lowering.h2
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.cc139
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.h1
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc342
-rw-r--r--deps/v8/src/compiler/js-call-reducer.h23
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc63
-rw-r--r--deps/v8/src/compiler/js-create-lowering.h12
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc110
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.h3
-rw-r--r--deps/v8/src/compiler/js-global-object-specialization.cc294
-rw-r--r--deps/v8/src/compiler/js-global-object-specialization.h65
-rw-r--r--deps/v8/src/compiler/js-graph.cc9
-rw-r--r--deps/v8/src/compiler/js-graph.h2
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.cc61
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.h5
-rw-r--r--deps/v8/src/compiler/js-inlining.cc332
-rw-r--r--deps/v8/src/compiler/js-inlining.h11
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.cc105
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.h18
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc610
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.h33
-rw-r--r--deps/v8/src/compiler/js-operator.cc261
-rw-r--r--deps/v8/src/compiler/js-operator.h199
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.cc153
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.h54
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc406
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.h8
-rw-r--r--deps/v8/src/compiler/jump-threading.cc1
-rw-r--r--deps/v8/src/compiler/linkage.cc11
-rw-r--r--deps/v8/src/compiler/load-elimination.cc6
-rw-r--r--deps/v8/src/compiler/loop-variable-optimizer.cc6
-rw-r--r--deps/v8/src/compiler/machine-graph-verifier.cc10
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc31
-rw-r--r--deps/v8/src/compiler/machine-operator.cc146
-rw-r--r--deps/v8/src/compiler/machine-operator.h72
-rw-r--r--deps/v8/src/compiler/mips/code-generator-mips.cc47
-rw-r--r--deps/v8/src/compiler/mips/instruction-selector-mips.cc46
-rw-r--r--deps/v8/src/compiler/mips64/code-generator-mips64.cc47
-rw-r--r--deps/v8/src/compiler/mips64/instruction-selector-mips64.cc49
-rw-r--r--deps/v8/src/compiler/node-matchers.h48
-rw-r--r--deps/v8/src/compiler/node-properties.cc107
-rw-r--r--deps/v8/src/compiler/node-properties.h15
-rw-r--r--deps/v8/src/compiler/node.cc51
-rw-r--r--deps/v8/src/compiler/node.h4
-rw-r--r--deps/v8/src/compiler/opcodes.h73
-rw-r--r--deps/v8/src/compiler/operation-typer.cc23
-rw-r--r--deps/v8/src/compiler/operator-properties.cc11
-rw-r--r--deps/v8/src/compiler/pipeline.cc58
-rw-r--r--deps/v8/src/compiler/pipeline.h3
-rw-r--r--deps/v8/src/compiler/ppc/code-generator-ppc.cc48
-rw-r--r--deps/v8/src/compiler/ppc/instruction-selector-ppc.cc24
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc13
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h9
-rw-r--r--deps/v8/src/compiler/redundancy-elimination.cc1
-rw-r--r--deps/v8/src/compiler/register-allocator-verifier.cc4
-rw-r--r--deps/v8/src/compiler/register-allocator-verifier.h3
-rw-r--r--deps/v8/src/compiler/register-allocator.cc17
-rw-r--r--deps/v8/src/compiler/representation-change.cc20
-rw-r--r--deps/v8/src/compiler/s390/code-generator-s390.cc596
-rw-r--r--deps/v8/src/compiler/s390/instruction-codes-s390.h7
-rw-r--r--deps/v8/src/compiler/s390/instruction-scheduler-s390.cc7
-rw-r--r--deps/v8/src/compiler/s390/instruction-selector-s390.cc992
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.cc1
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc138
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc6
-rw-r--r--deps/v8/src/compiler/simplified-operator.h6
-rw-r--r--deps/v8/src/compiler/state-values-utils.cc36
-rw-r--r--deps/v8/src/compiler/state-values-utils.h8
-rw-r--r--deps/v8/src/compiler/typed-optimization.cc14
-rw-r--r--deps/v8/src/compiler/typed-optimization.h1
-rw-r--r--deps/v8/src/compiler/typer.cc132
-rw-r--r--deps/v8/src/compiler/typer.h2
-rw-r--r--deps/v8/src/compiler/types.cc21
-rw-r--r--deps/v8/src/compiler/types.h130
-rw-r--r--deps/v8/src/compiler/verifier.cc50
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc599
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h10
-rw-r--r--deps/v8/src/compiler/wasm-linkage.cc1
-rw-r--r--deps/v8/src/compiler/x64/code-generator-x64.cc97
-rw-r--r--deps/v8/src/compiler/x64/instruction-selector-x64.cc359
-rw-r--r--deps/v8/src/compiler/x87/code-generator-x87.cc220
-rw-r--r--deps/v8/src/compiler/x87/instruction-selector-x87.cc127
-rw-r--r--deps/v8/src/contexts-inl.h1
-rw-r--r--deps/v8/src/contexts.cc36
-rw-r--r--deps/v8/src/contexts.h59
-rw-r--r--deps/v8/src/conversions.cc1
-rw-r--r--deps/v8/src/conversions.h3
-rw-r--r--deps/v8/src/counters.h10
-rw-r--r--deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc44
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc44
-rw-r--r--deps/v8/src/crankshaft/compilation-phase.cc1
-rw-r--r--deps/v8/src/crankshaft/hydrogen-bce.cc1
-rw-r--r--deps/v8/src/crankshaft/hydrogen-canonicalize.cc2
-rw-r--r--deps/v8/src/crankshaft/hydrogen-check-elimination.cc1
-rw-r--r--deps/v8/src/crankshaft/hydrogen-dce.cc1
-rw-r--r--deps/v8/src/crankshaft/hydrogen-dehoist.cc1
-rw-r--r--deps/v8/src/crankshaft/hydrogen-environment-liveness.cc2
-rw-r--r--deps/v8/src/crankshaft/hydrogen-escape-analysis.cc4
-rw-r--r--deps/v8/src/crankshaft/hydrogen-gvn.cc29
-rw-r--r--deps/v8/src/crankshaft/hydrogen-infer-representation.cc1
-rw-r--r--deps/v8/src/crankshaft/hydrogen-infer-types.cc1
-rw-r--r--deps/v8/src/crankshaft/hydrogen-instructions.cc23
-rw-r--r--deps/v8/src/crankshaft/hydrogen-instructions.h9
-rw-r--r--deps/v8/src/crankshaft/hydrogen-load-elimination.cc1
-rw-r--r--deps/v8/src/crankshaft/hydrogen-mark-unreachable.cc1
-rw-r--r--deps/v8/src/crankshaft/hydrogen-osr.cc1
-rw-r--r--deps/v8/src/crankshaft/hydrogen-range-analysis.cc1
-rw-r--r--deps/v8/src/crankshaft/hydrogen-redundant-phi.cc1
-rw-r--r--deps/v8/src/crankshaft/hydrogen-removable-simulates.cc1
-rw-r--r--deps/v8/src/crankshaft/hydrogen-representation-changes.cc1
-rw-r--r--deps/v8/src/crankshaft/hydrogen-sce.cc1
-rw-r--r--deps/v8/src/crankshaft/hydrogen-store-elimination.cc1
-rw-r--r--deps/v8/src/crankshaft/hydrogen-uint32-analysis.cc1
-rw-r--r--deps/v8/src/crankshaft/hydrogen.cc226
-rw-r--r--deps/v8/src/crankshaft/hydrogen.h66
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc42
-rw-r--r--deps/v8/src/crankshaft/lithium-allocator.cc3
-rw-r--r--deps/v8/src/crankshaft/lithium-codegen.cc5
-rw-r--r--deps/v8/src/crankshaft/lithium.cc1
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc46
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc47
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc50
-rw-r--r--deps/v8/src/crankshaft/s390/lithium-codegen-s390.cc58
-rw-r--r--deps/v8/src/crankshaft/typing.cc13
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc48
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-gap-resolver-x64.cc1
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-x64.cc1
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc33
-rw-r--r--deps/v8/src/d8.cc155
-rw-r--r--deps/v8/src/d8.h68
-rw-r--r--deps/v8/src/dateparser-inl.h9
-rw-r--r--deps/v8/src/debug/arm/debug-arm.cc71
-rw-r--r--deps/v8/src/debug/arm64/debug-arm64.cc70
-rw-r--r--deps/v8/src/debug/debug-coverage.cc169
-rw-r--r--deps/v8/src/debug/debug-coverage.h53
-rw-r--r--deps/v8/src/debug/debug-evaluate.cc74
-rw-r--r--deps/v8/src/debug/debug-evaluate.h6
-rw-r--r--deps/v8/src/debug/debug-frames.cc29
-rw-r--r--deps/v8/src/debug/debug-frames.h8
-rw-r--r--deps/v8/src/debug/debug-interface.h176
-rw-r--r--deps/v8/src/debug/debug-scopes.cc8
-rw-r--r--deps/v8/src/debug/debug.cc1375
-rw-r--r--deps/v8/src/debug/debug.h385
-rw-r--r--deps/v8/src/debug/debug.js1367
-rw-r--r--deps/v8/src/debug/ia32/debug-ia32.cc69
-rw-r--r--deps/v8/src/debug/interface-types.h6
-rw-r--r--deps/v8/src/debug/liveedit.cc381
-rw-r--r--deps/v8/src/debug/liveedit.h49
-rw-r--r--deps/v8/src/debug/liveedit.js2
-rw-r--r--deps/v8/src/debug/mips/debug-mips.cc70
-rw-r--r--deps/v8/src/debug/mips64/debug-mips64.cc71
-rw-r--r--deps/v8/src/debug/mirrors.js695
-rw-r--r--deps/v8/src/debug/ppc/debug-ppc.cc73
-rw-r--r--deps/v8/src/debug/s390/debug-s390.cc70
-rw-r--r--deps/v8/src/debug/x64/debug-x64.cc79
-rw-r--r--deps/v8/src/deoptimizer.cc184
-rw-r--r--deps/v8/src/deoptimizer.h72
-rw-r--r--deps/v8/src/disassembler.cc7
-rw-r--r--deps/v8/src/eh-frame.h1
-rw-r--r--deps/v8/src/elements.cc13
-rw-r--r--deps/v8/src/execution.cc3
-rw-r--r--deps/v8/src/execution.h18
-rw-r--r--deps/v8/src/external-reference-table.cc11
-rw-r--r--deps/v8/src/external-reference-table.h2
-rw-r--r--deps/v8/src/factory.cc198
-rw-r--r--deps/v8/src/factory.h67
-rw-r--r--deps/v8/src/fast-accessor-assembler.cc10
-rw-r--r--deps/v8/src/fast-accessor-assembler.h5
-rw-r--r--deps/v8/src/feedback-vector-inl.h172
-rw-r--r--deps/v8/src/feedback-vector.cc385
-rw-r--r--deps/v8/src/feedback-vector.h427
-rw-r--r--deps/v8/src/ffi/OWNERS2
-rw-r--r--deps/v8/src/ffi/ffi-compiler.cc128
-rw-r--r--deps/v8/src/ffi/ffi-compiler.h37
-rw-r--r--deps/v8/src/field-type.h4
-rw-r--r--deps/v8/src/find-and-replace-pattern.h37
-rw-r--r--deps/v8/src/flag-definitions.h352
-rw-r--r--deps/v8/src/flags.cc50
-rw-r--r--deps/v8/src/frames-inl.h12
-rw-r--r--deps/v8/src/frames.cc138
-rw-r--r--deps/v8/src/frames.h58
-rw-r--r--deps/v8/src/full-codegen/arm/full-codegen-arm.cc84
-rw-r--r--deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc81
-rw-r--r--deps/v8/src/full-codegen/full-codegen.cc75
-rw-r--r--deps/v8/src/full-codegen/full-codegen.h37
-rw-r--r--deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc86
-rw-r--r--deps/v8/src/full-codegen/mips/full-codegen-mips.cc74
-rw-r--r--deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc74
-rw-r--r--deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc80
-rw-r--r--deps/v8/src/full-codegen/s390/full-codegen-s390.cc77
-rw-r--r--deps/v8/src/full-codegen/x64/full-codegen-x64.cc81
-rw-r--r--deps/v8/src/full-codegen/x87/full-codegen-x87.cc66
-rw-r--r--deps/v8/src/futex-emulation.cc7
-rw-r--r--deps/v8/src/futex-emulation.h13
-rw-r--r--deps/v8/src/globals.h84
-rw-r--r--deps/v8/src/handles-inl.h1
-rw-r--r--deps/v8/src/handles.cc3
-rw-r--r--deps/v8/src/handles.h7
-rw-r--r--deps/v8/src/heap-symbols.h110
-rw-r--r--deps/v8/src/heap/array-buffer-tracker-inl.h4
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.cc5
-rw-r--r--deps/v8/src/heap/gc-tracer.cc15
-rw-r--r--deps/v8/src/heap/gc-tracer.h7
-rw-r--r--deps/v8/src/heap/heap-inl.h20
-rw-r--r--deps/v8/src/heap/heap.cc557
-rw-r--r--deps/v8/src/heap/heap.h138
-rw-r--r--deps/v8/src/heap/incremental-marking.cc99
-rw-r--r--deps/v8/src/heap/incremental-marking.h2
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h42
-rw-r--r--deps/v8/src/heap/mark-compact.cc268
-rw-r--r--deps/v8/src/heap/mark-compact.h91
-rw-r--r--deps/v8/src/heap/object-stats.cc31
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h30
-rw-r--r--deps/v8/src/heap/objects-visiting.cc6
-rw-r--r--deps/v8/src/heap/objects-visiting.h1
-rw-r--r--deps/v8/src/heap/remembered-set.h18
-rw-r--r--deps/v8/src/heap/scavenger.cc39
-rw-r--r--deps/v8/src/heap/spaces-inl.h3
-rw-r--r--deps/v8/src/heap/spaces.cc40
-rw-r--r--deps/v8/src/heap/spaces.h59
-rw-r--r--deps/v8/src/heap/store-buffer.cc1
-rw-r--r--deps/v8/src/heap/store-buffer.h4
-rw-r--r--deps/v8/src/i18n.cc12
-rw-r--r--deps/v8/src/i18n.h4
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h12
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc2
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h11
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc882
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc25
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc2
-rw-r--r--deps/v8/src/ia32/interface-descriptors-ia32.cc57
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc61
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h21
-rw-r--r--deps/v8/src/ic/access-compiler.cc1
-rw-r--r--deps/v8/src/ic/accessor-assembler-impl.h203
-rw-r--r--deps/v8/src/ic/accessor-assembler.cc833
-rw-r--r--deps/v8/src/ic/accessor-assembler.h285
-rw-r--r--deps/v8/src/ic/arm/handler-compiler-arm.cc39
-rw-r--r--deps/v8/src/ic/arm/ic-arm.cc37
-rw-r--r--deps/v8/src/ic/arm64/handler-compiler-arm64.cc44
-rw-r--r--deps/v8/src/ic/arm64/ic-arm64.cc31
-rw-r--r--deps/v8/src/ic/call-optimization.cc2
-rw-r--r--deps/v8/src/ic/handler-compiler.cc5
-rw-r--r--deps/v8/src/ic/handler-compiler.h6
-rw-r--r--deps/v8/src/ic/handler-configuration-inl.h11
-rw-r--r--deps/v8/src/ic/handler-configuration.h5
-rw-r--r--deps/v8/src/ic/ia32/handler-compiler-ia32.cc49
-rw-r--r--deps/v8/src/ic/ia32/ic-ia32.cc47
-rw-r--r--deps/v8/src/ic/ic-compiler.cc121
-rw-r--r--deps/v8/src/ic/ic-compiler.h40
-rw-r--r--deps/v8/src/ic/ic-inl.h34
-rw-r--r--deps/v8/src/ic/ic-state.cc8
-rw-r--r--deps/v8/src/ic/ic-state.h94
-rw-r--r--deps/v8/src/ic/ic.cc898
-rw-r--r--deps/v8/src/ic/ic.h144
-rw-r--r--deps/v8/src/ic/keyed-store-generic.cc255
-rw-r--r--deps/v8/src/ic/mips/handler-compiler-mips.cc39
-rw-r--r--deps/v8/src/ic/mips/ic-mips.cc37
-rw-r--r--deps/v8/src/ic/mips64/handler-compiler-mips64.cc39
-rw-r--r--deps/v8/src/ic/mips64/ic-mips64.cc37
-rw-r--r--deps/v8/src/ic/ppc/handler-compiler-ppc.cc44
-rw-r--r--deps/v8/src/ic/ppc/ic-ppc.cc37
-rw-r--r--deps/v8/src/ic/s390/handler-compiler-s390.cc42
-rw-r--r--deps/v8/src/ic/s390/ic-s390.cc34
-rw-r--r--deps/v8/src/ic/stub-cache.cc14
-rw-r--r--deps/v8/src/ic/stub-cache.h4
-rw-r--r--deps/v8/src/ic/x64/access-compiler-x64.cc1
-rw-r--r--deps/v8/src/ic/x64/handler-compiler-x64.cc48
-rw-r--r--deps/v8/src/ic/x64/ic-x64.cc48
-rw-r--r--deps/v8/src/ic/x87/handler-compiler-x87.cc49
-rw-r--r--deps/v8/src/ic/x87/ic-x87.cc47
-rw-r--r--deps/v8/src/identity-map.cc232
-rw-r--r--deps/v8/src/identity-map.h119
-rw-r--r--deps/v8/src/inspector/DEPS3
-rw-r--r--deps/v8/src/inspector/debugger-script.js101
-rw-r--r--deps/v8/src/inspector/debugger_script_externs.js111
-rw-r--r--deps/v8/src/inspector/injected-script-source.js11
-rw-r--r--deps/v8/src/inspector/injected_script_externs.js5
-rw-r--r--deps/v8/src/inspector/java-script-call-frame.cc18
-rw-r--r--deps/v8/src/inspector/java-script-call-frame.h6
-rw-r--r--deps/v8/src/inspector/js_protocol.json74
-rw-r--r--deps/v8/src/inspector/string-util.h18
-rw-r--r--deps/v8/src/inspector/v8-console-message.cc37
-rw-r--r--deps/v8/src/inspector/v8-console.cc18
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.cc364
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.h54
-rw-r--r--deps/v8/src/inspector/v8-debugger-script.cc83
-rw-r--r--deps/v8/src/inspector/v8-debugger-script.h10
-rw-r--r--deps/v8/src/inspector/v8-debugger.cc544
-rw-r--r--deps/v8/src/inspector/v8-debugger.h42
-rw-r--r--deps/v8/src/inspector/v8-injected-script-host.cc13
-rw-r--r--deps/v8/src/inspector/v8-injected-script-host.h2
-rw-r--r--deps/v8/src/inspector/v8-inspector-impl.h1
-rw-r--r--deps/v8/src/inspector/v8-inspector-session-impl.cc5
-rw-r--r--deps/v8/src/inspector/v8-profiler-agent-impl.cc142
-rw-r--r--deps/v8/src/inspector/v8-profiler-agent-impl.h19
-rw-r--r--deps/v8/src/inspector/v8-runtime-agent-impl.cc1
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.cc31
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.h3
-rw-r--r--deps/v8/src/inspector/wasm-translation.cc96
-rw-r--r--deps/v8/src/interface-descriptors.cc62
-rw-r--r--deps/v8/src/interface-descriptors.h176
-rw-r--r--deps/v8/src/interpreter/bytecode-array-accessor.cc5
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.cc240
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.h115
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.cc24
-rw-r--r--deps/v8/src/interpreter/bytecode-decoder.cc32
-rw-r--r--deps/v8/src/interpreter/bytecode-flags.cc1
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc533
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h40
-rw-r--r--deps/v8/src/interpreter/bytecodes.cc16
-rw-r--r--deps/v8/src/interpreter/bytecodes.h54
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.cc163
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.h119
-rw-r--r--deps/v8/src/interpreter/handler-table-builder.h3
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc210
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.h70
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics.cc106
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics.h36
-rw-r--r--deps/v8/src/interpreter/interpreter.cc418
-rw-r--r--deps/v8/src/interpreter/interpreter.h6
-rw-r--r--deps/v8/src/isolate-inl.h19
-rw-r--r--deps/v8/src/isolate.cc138
-rw-r--r--deps/v8/src/isolate.h68
-rw-r--r--deps/v8/src/js/array.js52
-rw-r--r--deps/v8/src/js/async-await.js143
-rw-r--r--deps/v8/src/js/datetime-format-to-parts.js16
-rw-r--r--deps/v8/src/js/harmony-atomics.js11
-rw-r--r--deps/v8/src/js/harmony-simd.js923
-rw-r--r--deps/v8/src/js/i18n.js71
-rw-r--r--deps/v8/src/js/icu-case-mapping.js24
-rw-r--r--deps/v8/src/js/macros.py11
-rw-r--r--deps/v8/src/js/prologue.js3
-rw-r--r--deps/v8/src/js/promise.js13
-rw-r--r--deps/v8/src/js/string.js240
-rw-r--r--deps/v8/src/js/typedarray.js38
-rw-r--r--deps/v8/src/json-parser.cc8
-rw-r--r--deps/v8/src/json-stringifier.cc1
-rw-r--r--deps/v8/src/keys.cc53
-rw-r--r--deps/v8/src/keys.h5
-rw-r--r--deps/v8/src/label.h92
-rw-r--r--deps/v8/src/log-utils.h2
-rw-r--r--deps/v8/src/log.cc113
-rw-r--r--deps/v8/src/log.h21
-rw-r--r--deps/v8/src/lookup.cc82
-rw-r--r--deps/v8/src/lookup.h6
-rw-r--r--deps/v8/src/machine-type.cc6
-rw-r--r--deps/v8/src/machine-type.h35
-rw-r--r--deps/v8/src/managed.h81
-rw-r--r--deps/v8/src/map-updater.cc97
-rw-r--r--deps/v8/src/map-updater.h7
-rw-r--r--deps/v8/src/messages.h22
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h13
-rw-r--r--deps/v8/src/mips/assembler-mips.h12
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc806
-rw-r--r--deps/v8/src/mips/codegen-mips.cc21
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc2
-rw-r--r--deps/v8/src/mips/interface-descriptors-mips.cc57
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc68
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h21
-rw-r--r--deps/v8/src/mips/simulator-mips.cc502
-rw-r--r--deps/v8/src/mips/simulator-mips.h43
-rw-r--r--deps/v8/src/mips64/assembler-mips64-inl.h13
-rw-r--r--deps/v8/src/mips64/assembler-mips64.h12
-rw-r--r--deps/v8/src/mips64/code-stubs-mips64.cc818
-rw-r--r--deps/v8/src/mips64/codegen-mips64.cc21
-rw-r--r--deps/v8/src/mips64/deoptimizer-mips64.cc2
-rw-r--r--deps/v8/src/mips64/interface-descriptors-mips64.cc57
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.cc69
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.h21
-rw-r--r--deps/v8/src/mips64/simulator-mips64.cc455
-rw-r--r--deps/v8/src/mips64/simulator-mips64.h57
-rw-r--r--deps/v8/src/objects-body-descriptors-inl.h8
-rw-r--r--deps/v8/src/objects-body-descriptors.h2
-rw-r--r--deps/v8/src/objects-debug.cc51
-rw-r--r--deps/v8/src/objects-inl.h526
-rw-r--r--deps/v8/src/objects-printer.cc217
-rw-r--r--deps/v8/src/objects.cc1021
-rw-r--r--deps/v8/src/objects.h732
-rw-r--r--deps/v8/src/objects/literal-objects.cc55
-rw-r--r--deps/v8/src/objects/literal-objects.h67
-rw-r--r--deps/v8/src/objects/regexp-match-info.h76
-rw-r--r--deps/v8/src/objects/scope-info.h5
-rw-r--r--deps/v8/src/parsing/OWNERS1
-rw-r--r--deps/v8/src/parsing/func-name-inferrer.h1
-rw-r--r--deps/v8/src/parsing/parse-info.cc76
-rw-r--r--deps/v8/src/parsing/parse-info.h47
-rw-r--r--deps/v8/src/parsing/parser-base.h1236
-rw-r--r--deps/v8/src/parsing/parser.cc629
-rw-r--r--deps/v8/src/parsing/parser.h130
-rw-r--r--deps/v8/src/parsing/parsing.cc26
-rw-r--r--deps/v8/src/parsing/parsing.h14
-rw-r--r--deps/v8/src/parsing/pattern-rewriter.cc145
-rw-r--r--deps/v8/src/parsing/preparse-data.cc5
-rw-r--r--deps/v8/src/parsing/preparse-data.h9
-rw-r--r--deps/v8/src/parsing/preparsed-scope-data.cc86
-rw-r--r--deps/v8/src/parsing/preparsed-scope-data.h57
-rw-r--r--deps/v8/src/parsing/preparser.cc94
-rw-r--r--deps/v8/src/parsing/preparser.h236
-rw-r--r--deps/v8/src/parsing/scanner-character-streams.h4
-rw-r--r--deps/v8/src/parsing/scanner.cc69
-rw-r--r--deps/v8/src/parsing/scanner.h21
-rw-r--r--deps/v8/src/pending-compilation-error-handler.cc2
-rw-r--r--deps/v8/src/pending-compilation-error-handler.h15
-rw-r--r--deps/v8/src/ppc/assembler-ppc-inl.h21
-rw-r--r--deps/v8/src/ppc/assembler-ppc.cc17
-rw-r--r--deps/v8/src/ppc/assembler-ppc.h12
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.cc856
-rw-r--r--deps/v8/src/ppc/codegen-ppc.cc26
-rw-r--r--deps/v8/src/ppc/constants-ppc.h2739
-rw-r--r--deps/v8/src/ppc/deoptimizer-ppc.cc2
-rw-r--r--deps/v8/src/ppc/disasm-ppc.cc35
-rw-r--r--deps/v8/src/ppc/interface-descriptors-ppc.cc55
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.cc69
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.h18
-rw-r--r--deps/v8/src/ppc/simulator-ppc.cc37
-rw-r--r--deps/v8/src/profiler/cpu-profiler.cc18
-rw-r--r--deps/v8/src/profiler/cpu-profiler.h2
-rw-r--r--deps/v8/src/profiler/heap-profiler.cc19
-rw-r--r--deps/v8/src/profiler/heap-profiler.h6
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc118
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.h7
-rw-r--r--deps/v8/src/profiler/profile-generator.cc15
-rw-r--r--deps/v8/src/profiler/profile-generator.h3
-rw-r--r--deps/v8/src/profiler/profiler-listener.cc1
-rw-r--r--deps/v8/src/profiler/tracing-cpu-profiler.cc19
-rw-r--r--deps/v8/src/property-details.h66
-rw-r--r--deps/v8/src/property.cc31
-rw-r--r--deps/v8/src/property.h27
-rw-r--r--deps/v8/src/regexp/jsregexp-inl.h1
-rw-r--r--deps/v8/src/regexp/jsregexp.cc14
-rw-r--r--deps/v8/src/regexp/jsregexp.h2
-rw-r--r--deps/v8/src/regexp/regexp-ast.cc6
-rw-r--r--deps/v8/src/regexp/regexp-ast.h22
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.cc7
-rw-r--r--deps/v8/src/regexp/regexp-parser.cc4
-rw-r--r--deps/v8/src/regexp/regexp-utils.cc9
-rw-r--r--deps/v8/src/regexp/regexp-utils.h2
-rw-r--r--deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc2
-rw-r--r--deps/v8/src/runtime-profiler.cc6
-rw-r--r--deps/v8/src/runtime/runtime-array.cc69
-rw-r--r--deps/v8/src/runtime/runtime-classes.cc43
-rw-r--r--deps/v8/src/runtime/runtime-collections.cc48
-rw-r--r--deps/v8/src/runtime/runtime-compiler.cc11
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc160
-rw-r--r--deps/v8/src/runtime/runtime-forin.cc17
-rw-r--r--deps/v8/src/runtime/runtime-function.cc1
-rw-r--r--deps/v8/src/runtime/runtime-futex.cc16
-rw-r--r--deps/v8/src/runtime/runtime-i18n.cc14
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc61
-rw-r--r--deps/v8/src/runtime/runtime-interpreter.cc7
-rw-r--r--deps/v8/src/runtime/runtime-literals.cc132
-rw-r--r--deps/v8/src/runtime/runtime-maths.cc3
-rw-r--r--deps/v8/src/runtime/runtime-module.cc9
-rw-r--r--deps/v8/src/runtime/runtime-object.cc51
-rw-r--r--deps/v8/src/runtime/runtime-promise.cc110
-rw-r--r--deps/v8/src/runtime/runtime-regexp.cc53
-rw-r--r--deps/v8/src/runtime/runtime-scopes.cc64
-rw-r--r--deps/v8/src/runtime/runtime-simd.cc1016
-rw-r--r--deps/v8/src/runtime/runtime-strings.cc221
-rw-r--r--deps/v8/src/runtime/runtime-test.cc36
-rw-r--r--deps/v8/src/runtime/runtime-typedarray.cc61
-rw-r--r--deps/v8/src/runtime/runtime-wasm.cc69
-rw-r--r--deps/v8/src/runtime/runtime.h524
-rw-r--r--deps/v8/src/s390/assembler-s390-inl.h13
-rw-r--r--deps/v8/src/s390/assembler-s390.cc547
-rw-r--r--deps/v8/src/s390/assembler-s390.h205
-rw-r--r--deps/v8/src/s390/code-stubs-s390.cc841
-rw-r--r--deps/v8/src/s390/codegen-s390.cc28
-rw-r--r--deps/v8/src/s390/constants-s390.h16
-rw-r--r--deps/v8/src/s390/deoptimizer-s390.cc2
-rw-r--r--deps/v8/src/s390/disasm-s390.cc35
-rw-r--r--deps/v8/src/s390/interface-descriptors-s390.cc53
-rw-r--r--deps/v8/src/s390/macro-assembler-s390.cc292
-rw-r--r--deps/v8/src/s390/macro-assembler-s390.h51
-rw-r--r--deps/v8/src/s390/simulator-s390.cc144
-rw-r--r--deps/v8/src/s390/simulator-s390.h1
-rw-r--r--deps/v8/src/signature.h2
-rw-r--r--deps/v8/src/snapshot/code-serializer.cc42
-rw-r--r--deps/v8/src/snapshot/code-serializer.h20
-rw-r--r--deps/v8/src/snapshot/deserializer.cc6
-rw-r--r--deps/v8/src/snapshot/deserializer.h2
-rw-r--r--deps/v8/src/snapshot/partial-serializer.cc4
-rw-r--r--deps/v8/src/snapshot/serializer-common.cc3
-rw-r--r--deps/v8/src/snapshot/serializer.cc2
-rw-r--r--deps/v8/src/snapshot/snapshot-common.cc1
-rw-r--r--deps/v8/src/source-position-table.h3
-rw-r--r--deps/v8/src/source-position.cc37
-rw-r--r--deps/v8/src/string-builder.h2
-rw-r--r--deps/v8/src/third_party/vtune/BUILD.gn20
-rw-r--r--deps/v8/src/trap-handler/trap-handler.h4
-rw-r--r--deps/v8/src/type-hints.cc6
-rw-r--r--deps/v8/src/type-hints.h6
-rw-r--r--deps/v8/src/type-info.cc93
-rw-r--r--deps/v8/src/type-info.h45
-rw-r--r--deps/v8/src/utils.cc3
-rw-r--r--deps/v8/src/utils.h65
-rw-r--r--deps/v8/src/v8.cc3
-rw-r--r--deps/v8/src/v8.gyp43
-rw-r--r--deps/v8/src/value-serializer.cc26
-rw-r--r--deps/v8/src/value-serializer.h4
-rw-r--r--deps/v8/src/wasm/decoder.h2
-rw-r--r--deps/v8/src/wasm/function-body-decoder-impl.h325
-rw-r--r--deps/v8/src/wasm/function-body-decoder.cc163
-rw-r--r--deps/v8/src/wasm/function-body-decoder.h287
-rw-r--r--deps/v8/src/wasm/managed.h56
-rw-r--r--deps/v8/src/wasm/module-decoder.cc97
-rw-r--r--deps/v8/src/wasm/module-decoder.h28
-rw-r--r--deps/v8/src/wasm/wasm-code-specialization.cc263
-rw-r--r--deps/v8/src/wasm/wasm-code-specialization.h70
-rw-r--r--deps/v8/src/wasm/wasm-debug.cc270
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.cc421
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.h71
-rw-r--r--deps/v8/src/wasm/wasm-js.cc475
-rw-r--r--deps/v8/src/wasm/wasm-limits.h2
-rw-r--r--deps/v8/src/wasm/wasm-macro-gen.h73
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.cc42
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.h6
-rw-r--r--deps/v8/src/wasm/wasm-module.cc1694
-rw-r--r--deps/v8/src/wasm/wasm-module.h125
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc356
-rw-r--r--deps/v8/src/wasm/wasm-objects.h91
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.cc231
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h233
-rw-r--r--deps/v8/src/wasm/wasm-text.cc120
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h24
-rw-r--r--deps/v8/src/x64/assembler-x64.cc19
-rw-r--r--deps/v8/src/x64/assembler-x64.h27
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc851
-rw-r--r--deps/v8/src/x64/code-stubs-x64.h4
-rw-r--r--deps/v8/src/x64/codegen-x64.cc26
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc3
-rw-r--r--deps/v8/src/x64/eh-frame-x64.cc1
-rw-r--r--deps/v8/src/x64/interface-descriptors-x64.cc55
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc91
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h18
-rw-r--r--deps/v8/src/x87/assembler-x87-inl.h12
-rw-r--r--deps/v8/src/x87/assembler-x87.h11
-rw-r--r--deps/v8/src/x87/code-stubs-x87.cc120
-rw-r--r--deps/v8/src/x87/codegen-x87.cc25
-rw-r--r--deps/v8/src/x87/deoptimizer-x87.cc2
-rw-r--r--deps/v8/src/x87/interface-descriptors-x87.cc16
-rw-r--r--deps/v8/src/x87/macro-assembler-x87.cc38
-rw-r--r--deps/v8/src/x87/macro-assembler-x87.h15
-rw-r--r--deps/v8/src/zone/accounting-allocator.cc4
-rw-r--r--deps/v8/src/zone/zone-allocator.h15
-rw-r--r--deps/v8/src/zone/zone.cc9
-rw-r--r--deps/v8/src/zone/zone.h4
727 files changed, 43514 insertions, 39097 deletions
diff --git a/deps/v8/src/accessors.h b/deps/v8/src/accessors.h
index 218fb3572f..a4d51fd18a 100644
--- a/deps/v8/src/accessors.h
+++ b/deps/v8/src/accessors.h
@@ -8,7 +8,6 @@
#include "include/v8.h"
#include "src/allocation.h"
#include "src/globals.h"
-#include "src/handles.h"
#include "src/property-details.h"
namespace v8 {
@@ -16,6 +15,8 @@ namespace internal {
// Forward declarations.
class AccessorInfo;
+template <typename T>
+class Handle;
// The list of accessor descriptors. This is a second-order macro
// taking a macro to be applied to all accessor descriptor names.
diff --git a/deps/v8/src/api-experimental.h b/deps/v8/src/api-experimental.h
index bc0bc55739..5b1bc1b04a 100644
--- a/deps/v8/src/api-experimental.h
+++ b/deps/v8/src/api-experimental.h
@@ -5,11 +5,11 @@
#ifndef V8_API_EXPERIMENTAL_H_
#define V8_API_EXPERIMENTAL_H_
-#include "src/handles.h"
-
namespace v8 {
namespace internal {
class Code;
+template <typename T>
+class MaybeHandle;
} // internal;
namespace experimental {
class FastAccessorBuilder;
diff --git a/deps/v8/src/api-natives.cc b/deps/v8/src/api-natives.cc
index 87138bd5cf..045ff470ab 100644
--- a/deps/v8/src/api-natives.cc
+++ b/deps/v8/src/api-natives.cc
@@ -538,8 +538,6 @@ MaybeHandle<JSObject> ApiNatives::InstantiateRemoteObject(
JSFunction::SetInitialMap(object_function, object_map,
isolate->factory()->null_value());
object_map->set_is_access_check_needed(true);
- object_map->set_is_callable();
- object_map->set_is_constructor(true);
Handle<JSObject> object = isolate->factory()->NewJSObject(object_function);
JSObject::ForceSetPrototype(object, isolate->factory()->null_value());
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index 446b008a55..beefd61bdb 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -35,6 +35,7 @@
#include "src/contexts.h"
#include "src/conversions-inl.h"
#include "src/counters.h"
+#include "src/debug/debug-coverage.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/execution.h"
@@ -47,6 +48,7 @@
#include "src/json-parser.h"
#include "src/json-stringifier.h"
#include "src/messages.h"
+#include "src/objects-inl.h"
#include "src/parsing/parser.h"
#include "src/parsing/scanner-character-streams.h"
#include "src/pending-compilation-error-handler.h"
@@ -283,7 +285,8 @@ static ScriptOrigin GetScriptOriginForScript(i::Isolate* isolate,
v8::Integer::New(v8_isolate, script->id()),
Utils::ToLocal(source_map_url),
v8::Boolean::New(v8_isolate, options.IsOpaque()),
- v8::Boolean::New(v8_isolate, script->type() == i::Script::TYPE_WASM));
+ v8::Boolean::New(v8_isolate, script->type() == i::Script::TYPE_WASM),
+ v8::Boolean::New(v8_isolate, options.IsModule()));
return origin;
}
@@ -2103,8 +2106,7 @@ MaybeLocal<Value> Module::Evaluate(Local<Context> context) {
}
MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
- Isolate* v8_isolate, Source* source, CompileOptions options,
- bool is_module) {
+ Isolate* v8_isolate, Source* source, CompileOptions options) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
PREPARE_FOR_EXECUTION_WITH_ISOLATE(isolate, ScriptCompiler, CompileUnbound,
UnboundScript);
@@ -2149,7 +2151,7 @@ MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
result = i::Compiler::GetSharedFunctionInfoForScript(
str, name_obj, line_offset, column_offset, source->resource_options,
source_map_url, isolate->native_context(), NULL, &script_data, options,
- i::NOT_NATIVES_CODE, is_module);
+ i::NOT_NATIVES_CODE);
has_pending_exception = result.is_null();
if (has_pending_exception && script_data != NULL) {
// This case won't happen during normal operation; we have compiled
@@ -2178,24 +2180,34 @@ MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundScript(
Isolate* v8_isolate, Source* source, CompileOptions options) {
- return CompileUnboundInternal(v8_isolate, source, options, false);
+ Utils::ApiCheck(
+ !source->GetResourceOptions().IsModule(),
+ "v8::ScriptCompiler::CompileUnboundScript",
+ "v8::ScriptCompiler::CompileModule must be used to compile modules");
+ return CompileUnboundInternal(v8_isolate, source, options);
}
Local<UnboundScript> ScriptCompiler::CompileUnbound(Isolate* v8_isolate,
Source* source,
CompileOptions options) {
- RETURN_TO_LOCAL_UNCHECKED(
- CompileUnboundInternal(v8_isolate, source, options, false),
- UnboundScript);
+ Utils::ApiCheck(
+ !source->GetResourceOptions().IsModule(),
+ "v8::ScriptCompiler::CompileUnbound",
+ "v8::ScriptCompiler::CompileModule must be used to compile modules");
+ RETURN_TO_LOCAL_UNCHECKED(CompileUnboundInternal(v8_isolate, source, options),
+ UnboundScript);
}
MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
Source* source,
CompileOptions options) {
+ Utils::ApiCheck(
+ !source->GetResourceOptions().IsModule(), "v8::ScriptCompiler::Compile",
+ "v8::ScriptCompiler::CompileModule must be used to compile modules");
auto isolate = context->GetIsolate();
- auto maybe = CompileUnboundInternal(isolate, source, options, false);
+ auto maybe = CompileUnboundInternal(isolate, source, options);
Local<UnboundScript> result;
if (!maybe.ToLocal(&result)) return MaybeLocal<Script>();
v8::Context::Scope scope(context);
@@ -2215,7 +2227,10 @@ MaybeLocal<Module> ScriptCompiler::CompileModule(Isolate* isolate,
Source* source) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- auto maybe = CompileUnboundInternal(isolate, source, kNoCompileOptions, true);
+ Utils::ApiCheck(source->GetResourceOptions().IsModule(),
+ "v8::ScriptCompiler::CompileModule",
+ "Invalid ScriptOrigin: is_module must be true");
+ auto maybe = CompileUnboundInternal(isolate, source, kNoCompileOptions);
Local<UnboundScript> unbound;
if (!maybe.ToLocal(&unbound)) return MaybeLocal<Module>();
@@ -2271,9 +2286,14 @@ MaybeLocal<Function> ScriptCompiler::CompileFunctionInContext(
Function);
TRACE_EVENT0("v8", "V8.ScriptCompiler");
i::Handle<i::String> source_string;
+ int parameters_end_pos = i::kNoSourcePosition;
auto factory = isolate->factory();
if (arguments_count) {
- source_string = factory->NewStringFromStaticChars("(function(");
+ if (i::FLAG_harmony_function_tostring) {
+ source_string = factory->NewStringFromStaticChars("(function anonymous(");
+ } else {
+ source_string = factory->NewStringFromStaticChars("(function(");
+ }
for (size_t i = 0; i < arguments_count; ++i) {
IsIdentifierHelper helper;
if (!helper.Check(*Utils::OpenHandle(*arguments[i]))) {
@@ -2291,12 +2311,24 @@ MaybeLocal<Function> ScriptCompiler::CompileFunctionInContext(
',')).ToHandle(&source_string);
RETURN_ON_FAILED_EXECUTION(Function);
}
- auto brackets = factory->NewStringFromStaticChars("){");
+ i::Handle<i::String> brackets;
+ if (i::FLAG_harmony_function_tostring) {
+ brackets = factory->NewStringFromStaticChars("\n) {");
+ parameters_end_pos = source_string->length() - 3;
+ } else {
+ brackets = factory->NewStringFromStaticChars("){");
+ }
has_pending_exception = !factory->NewConsString(source_string, brackets)
.ToHandle(&source_string);
RETURN_ON_FAILED_EXECUTION(Function);
} else {
- source_string = factory->NewStringFromStaticChars("(function(){");
+ if (i::FLAG_harmony_function_tostring) {
+ source_string =
+ factory->NewStringFromStaticChars("(function anonymous(\n) {");
+ parameters_end_pos = source_string->length() - 3;
+ } else {
+ source_string = factory->NewStringFromStaticChars("(function(){");
+ }
}
int scope_position = source_string->length();
@@ -2346,9 +2378,9 @@ MaybeLocal<Function> ScriptCompiler::CompileFunctionInContext(
has_pending_exception =
!i::Compiler::GetFunctionFromEval(
source_string, outer_info, context, i::SLOPPY,
- i::ONLY_SINGLE_FUNCTION_LITERAL, eval_scope_position, eval_position,
- line_offset, column_offset - scope_position, name_obj,
- source->resource_options)
+ i::ONLY_SINGLE_FUNCTION_LITERAL, parameters_end_pos,
+ eval_scope_position, eval_position, line_offset,
+ column_offset - scope_position, name_obj, source->resource_options)
.ToHandle(&fun);
if (has_pending_exception) {
isolate->ReportPendingMessages();
@@ -2415,12 +2447,19 @@ MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
}
source->info->set_script(script);
+ if (source->info->literal() == nullptr) {
+ source->parser->ReportErrors(isolate, script);
+ }
+ source->parser->UpdateStatistics(isolate, script);
- // Do the parsing tasks which need to be done on the main thread. This will
- // also handle parse errors.
- source->parser->Internalize(isolate, script,
- source->info->literal() == nullptr);
- source->parser->HandleSourceURLComments(isolate, script);
+ i::DeferredHandleScope deferred_handle_scope(isolate);
+ {
+ // Internalize AST values on the main thread.
+ source->info->ReopenHandlesInNewHandleScope();
+ source->info->ast_value_factory()->Internalize(isolate);
+ source->parser->HandleSourceURLComments(isolate, script);
+ }
+ source->info->set_deferred_handles(deferred_handle_scope.Detach());
i::Handle<i::SharedFunctionInfo> result;
if (source->info->literal() != nullptr) {
@@ -2985,7 +3024,7 @@ Local<Value> NativeWeakMap::Get(Local<Value> v8_key) {
bool NativeWeakMap::Has(Local<Value> v8_key) {
i::Handle<i::JSWeakMap> weak_collection = Utils::OpenHandle(this);
i::Isolate* isolate = weak_collection->GetIsolate();
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::HandleScope scope(isolate);
i::Handle<i::Object> key = Utils::OpenHandle(*v8_key);
if (!key->IsJSReceiver() && !key->IsSymbol()) {
@@ -3006,7 +3045,7 @@ bool NativeWeakMap::Has(Local<Value> v8_key) {
bool NativeWeakMap::Delete(Local<Value> v8_key) {
i::Handle<i::JSWeakMap> weak_collection = Utils::OpenHandle(this);
i::Isolate* isolate = weak_collection->GetIsolate();
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::HandleScope scope(isolate);
i::Handle<i::Object> key = Utils::OpenHandle(*v8_key);
if (!key->IsJSReceiver() && !key->IsSymbol()) {
@@ -3234,9 +3273,10 @@ Maybe<bool> ValueDeserializer::ReadHeader(Local<Context> context) {
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
DCHECK(read_header);
- static const uint32_t kMinimumNonLegacyVersion = 13;
- if (GetWireFormatVersion() < kMinimumNonLegacyVersion &&
- !private_->supports_legacy_wire_format) {
+ // TODO(jbroman): Today, all wire formats are "legacy". When a more supported
+ // format is added, compare the version of the internal serializer to the
+ // minimum non-legacy version number.
+ if (!private_->supports_legacy_wire_format) {
isolate->Throw(*isolate->factory()->NewError(
i::MessageTemplate::kDataCloneDeserializationVersionError));
has_pending_exception = true;
@@ -4062,7 +4102,7 @@ bool Value::SameValue(Local<Value> that) const {
Local<String> Value::TypeOf(v8::Isolate* external_isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate);
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
LOG_API(isolate, Value, TypeOf);
return Utils::ToLocal(i::Object::TypeOf(isolate, Utils::OpenHandle(this)));
}
@@ -4465,12 +4505,11 @@ bool v8::Object::SetPrototype(Local<Value> value) {
return SetPrototype(context, value).FromMaybe(false);
}
-
Local<Object> v8::Object::FindInstanceInPrototypeChain(
v8::Local<FunctionTemplate> tmpl) {
- auto isolate = Utils::OpenHandle(this)->GetIsolate();
- i::PrototypeIterator iter(isolate, *Utils::OpenHandle(this),
- i::kStartAtReceiver);
+ auto self = Utils::OpenHandle(this);
+ auto isolate = self->GetIsolate();
+ i::PrototypeIterator iter(isolate, *self, i::kStartAtReceiver);
auto tmpl_info = *Utils::OpenHandle(*tmpl);
while (!tmpl_info->IsTemplateFor(iter.GetCurrent<i::JSObject>())) {
iter.Advance();
@@ -4678,7 +4717,7 @@ static Maybe<bool> ObjectSetAccessor(Local<Context> context, Object* self,
has_pending_exception =
!i::JSObject::SetAccessor(obj, info).ToHandle(&result);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
- if (result->IsUndefined(obj->GetIsolate())) return Nothing<bool>();
+ if (result->IsUndefined(obj->GetIsolate())) return Just(false);
if (fast) {
i::JSObject::MigrateSlowToFast(obj, 0, "APISetAccessor");
}
@@ -4959,8 +4998,7 @@ Local<v8::Object> v8::Object::Clone() {
Local<v8::Context> v8::Object::CreationContext() {
auto self = Utils::OpenHandle(this);
- auto context = handle(self->GetCreationContext());
- return Utils::ToLocal(context);
+ return Utils::ToLocal(self->GetCreationContext());
}
@@ -5804,7 +5842,7 @@ int String::WriteUtf8(char* buffer,
i::Handle<i::String> str = Utils::OpenHandle(this);
i::Isolate* isolate = str->GetIsolate();
LOG_API(isolate, String, WriteUtf8);
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
if (options & HINT_MANY_WRITES_EXPECTED) {
str = i::String::Flatten(str); // Flatten the string for efficiency.
}
@@ -5856,7 +5894,7 @@ static inline int WriteHelper(const String* string,
int options) {
i::Isolate* isolate = Utils::OpenHandle(string)->GetIsolate();
LOG_API(isolate, String, Write);
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
DCHECK(start >= 0 && length >= -1);
i::Handle<i::String> str = Utils::OpenHandle(string);
if (options & String::HINT_MANY_WRITES_EXPECTED) {
@@ -6542,10 +6580,13 @@ bool FunctionTemplate::HasInstance(v8::Local<v8::Value> value) {
return true;
}
if (obj->IsJSGlobalProxy()) {
- // If it's a global proxy object, then test with the global object.
+ // If it's a global proxy, then test with the global object. Note that the
+ // inner global object may not necessarily be a JSGlobalObject.
i::PrototypeIterator iter(i::JSObject::cast(*obj)->map());
- if (iter.IsAtEnd()) return false;
- return self->IsTemplateFor(iter.GetCurrent<i::JSGlobalObject>());
+ // The global proxy should always have a prototype, as it is a bug to call
+ // this on a detached JSGlobalProxy.
+ DCHECK(!iter.IsAtEnd());
+ return self->IsTemplateFor(iter.GetCurrent<i::JSObject>());
}
return false;
}
@@ -6723,11 +6764,17 @@ MaybeLocal<String> v8::String::NewExternalTwoByte(
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
LOG_API(i_isolate, String, NewExternalTwoByte);
- i::Handle<i::String> string = i_isolate->factory()
- ->NewExternalStringFromTwoByte(resource)
- .ToHandleChecked();
- i_isolate->heap()->RegisterExternalString(*string);
- return Utils::ToLocal(string);
+ if (resource->length() > 0) {
+ i::Handle<i::String> string = i_isolate->factory()
+ ->NewExternalStringFromTwoByte(resource)
+ .ToHandleChecked();
+ i_isolate->heap()->RegisterExternalString(*string);
+ return Utils::ToLocal(string);
+ } else {
+ // The resource isn't going to be used, free it immediately.
+ resource->Dispose();
+ return Utils::ToLocal(i_isolate->factory()->empty_string());
+ }
}
@@ -6747,11 +6794,17 @@ MaybeLocal<String> v8::String::NewExternalOneByte(
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
LOG_API(i_isolate, String, NewExternalOneByte);
- i::Handle<i::String> string = i_isolate->factory()
- ->NewExternalStringFromOneByte(resource)
- .ToHandleChecked();
- i_isolate->heap()->RegisterExternalString(*string);
- return Utils::ToLocal(string);
+ if (resource->length() > 0) {
+ i::Handle<i::String> string = i_isolate->factory()
+ ->NewExternalStringFromOneByte(resource)
+ .ToHandleChecked();
+ i_isolate->heap()->RegisterExternalString(*string);
+ return Utils::ToLocal(string);
+ } else {
+ // The resource isn't going to be used, free it immediately.
+ resource->Dispose();
+ return Utils::ToLocal(i_isolate->factory()->empty_string());
+ }
}
@@ -7081,7 +7134,7 @@ void Map::Clear() {
auto self = Utils::OpenHandle(this);
i::Isolate* isolate = self->GetIsolate();
LOG_API(isolate, Map, Clear);
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::JSMap::Clear(self);
}
@@ -7140,15 +7193,14 @@ Maybe<bool> Map::Delete(Local<Context> context, Local<Value> key) {
return Just(result->IsTrue(isolate));
}
-
-Local<Array> Map::AsArray() const {
- i::Handle<i::JSMap> obj = Utils::OpenHandle(this);
- i::Isolate* isolate = obj->GetIsolate();
+namespace {
+i::Handle<i::JSArray> MapAsArray(i::Isolate* isolate, i::Object* table_obj,
+ int offset, int kind) {
i::Factory* factory = isolate->factory();
- LOG_API(isolate, Map, AsArray);
- ENTER_V8(isolate);
- i::Handle<i::OrderedHashMap> table(i::OrderedHashMap::cast(obj->table()));
- int length = table->NumberOfElements() * 2;
+ i::Handle<i::OrderedHashMap> table(i::OrderedHashMap::cast(table_obj));
+ if (offset >= table->NumberOfElements()) return factory->NewJSArray(0);
+ int length = (table->NumberOfElements() - offset) *
+ (kind == i::JSMapIterator::kKindEntries ? 2 : 1);
i::Handle<i::FixedArray> result = factory->NewFixedArray(length);
int result_index = 0;
{
@@ -7158,15 +7210,30 @@ Local<Array> Map::AsArray() const {
for (int i = 0; i < capacity; ++i) {
i::Object* key = table->KeyAt(i);
if (key == the_hole) continue;
- result->set(result_index++, key);
- result->set(result_index++, table->ValueAt(i));
+ if (offset-- > 0) continue;
+ if (kind == i::JSMapIterator::kKindEntries ||
+ kind == i::JSMapIterator::kKindKeys) {
+ result->set(result_index++, key);
+ }
+ if (kind == i::JSMapIterator::kKindEntries ||
+ kind == i::JSMapIterator::kKindValues) {
+ result->set(result_index++, table->ValueAt(i));
+ }
}
}
DCHECK_EQ(result_index, result->length());
DCHECK_EQ(result_index, length);
- i::Handle<i::JSArray> result_array =
- factory->NewJSArrayWithElements(result, i::FAST_ELEMENTS, length);
- return Utils::ToLocal(result_array);
+ return factory->NewJSArrayWithElements(result, i::FAST_ELEMENTS, length);
+}
+} // namespace
+
+Local<Array> Map::AsArray() const {
+ i::Handle<i::JSMap> obj = Utils::OpenHandle(this);
+ i::Isolate* isolate = obj->GetIsolate();
+ LOG_API(isolate, Map, AsArray);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
+ return Utils::ToLocal(
+ MapAsArray(isolate, obj->table(), 0, i::JSMapIterator::kKindEntries));
}
@@ -7189,7 +7256,7 @@ void Set::Clear() {
auto self = Utils::OpenHandle(this);
i::Isolate* isolate = self->GetIsolate();
LOG_API(isolate, Set, Clear);
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::JSSet::Clear(self);
}
@@ -7232,15 +7299,13 @@ Maybe<bool> Set::Delete(Local<Context> context, Local<Value> key) {
return Just(result->IsTrue(isolate));
}
-
-Local<Array> Set::AsArray() const {
- i::Handle<i::JSSet> obj = Utils::OpenHandle(this);
- i::Isolate* isolate = obj->GetIsolate();
+namespace {
+i::Handle<i::JSArray> SetAsArray(i::Isolate* isolate, i::Object* table_obj,
+ int offset) {
i::Factory* factory = isolate->factory();
- LOG_API(isolate, Set, AsArray);
- ENTER_V8(isolate);
- i::Handle<i::OrderedHashSet> table(i::OrderedHashSet::cast(obj->table()));
- int length = table->NumberOfElements();
+ i::Handle<i::OrderedHashSet> table(i::OrderedHashSet::cast(table_obj));
+ int length = table->NumberOfElements() - offset;
+ if (length <= 0) return factory->NewJSArray(0);
i::Handle<i::FixedArray> result = factory->NewFixedArray(length);
int result_index = 0;
{
@@ -7250,14 +7315,22 @@ Local<Array> Set::AsArray() const {
for (int i = 0; i < capacity; ++i) {
i::Object* key = table->KeyAt(i);
if (key == the_hole) continue;
+ if (offset-- > 0) continue;
result->set(result_index++, key);
}
}
DCHECK_EQ(result_index, result->length());
DCHECK_EQ(result_index, length);
- i::Handle<i::JSArray> result_array =
- factory->NewJSArrayWithElements(result, i::FAST_ELEMENTS, length);
- return Utils::ToLocal(result_array);
+ return factory->NewJSArrayWithElements(result, i::FAST_ELEMENTS, length);
+}
+} // namespace
+
+Local<Array> Set::AsArray() const {
+ i::Handle<i::JSSet> obj = Utils::OpenHandle(this);
+ i::Isolate* isolate = obj->GetIsolate();
+ LOG_API(isolate, Set, AsArray);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
+ return Utils::ToLocal(SetAsArray(isolate, obj->table(), 0));
}
@@ -7374,7 +7447,7 @@ bool Promise::HasHandler() {
i::Handle<i::JSReceiver> promise = Utils::OpenHandle(this);
i::Isolate* isolate = promise->GetIsolate();
LOG_API(isolate, Promise, HasRejectHandler);
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
if (promise->IsJSPromise()) {
i::Handle<i::JSPromise> js_promise = i::Handle<i::JSPromise>::cast(promise);
return js_promise->has_handler();
@@ -7502,11 +7575,8 @@ MaybeLocal<WasmCompiledModule> WasmCompiledModule::Compile(Isolate* isolate,
size_t length) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::wasm::ErrorThrower thrower(i_isolate, "WasmCompiledModule::Deserialize()");
- i::MaybeHandle<i::JSObject> maybe_compiled =
- i::wasm::CreateModuleObjectFromBytes(
- i_isolate, start, start + length, &thrower,
- i::wasm::ModuleOrigin::kWasmOrigin, i::Handle<i::Script>::null(),
- i::Vector<const uint8_t>::empty());
+ i::MaybeHandle<i::JSObject> maybe_compiled = i::wasm::SyncCompile(
+ i_isolate, &thrower, i::wasm::ModuleWireBytes(start, start + length));
if (maybe_compiled.is_null()) return MaybeLocal<WasmCompiledModule>();
return Local<WasmCompiledModule>::Cast(
Utils::ToLocal(maybe_compiled.ToHandleChecked()));
@@ -7960,6 +8030,18 @@ v8::Local<v8::Context> Isolate::GetEnteredContext() {
return Utils::ToLocal(i::Handle<i::Context>::cast(last));
}
+v8::Local<v8::Context> Isolate::GetEnteredOrMicrotaskContext() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ i::Handle<i::Object> last;
+ if (isolate->handle_scope_implementer()
+ ->MicrotaskContextIsLastEnteredContext()) {
+ last = isolate->handle_scope_implementer()->MicrotaskContext();
+ } else {
+ last = isolate->handle_scope_implementer()->LastEnteredContext();
+ }
+ if (last.is_null()) return Local<Context>();
+ return Utils::ToLocal(i::Handle<i::Context>::cast(last));
+}
v8::Local<Value> Isolate::ThrowException(v8::Local<v8::Value> value) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
@@ -8125,6 +8207,7 @@ Isolate* Isolate::New(const Isolate::CreateParams& params) {
}
isolate->set_api_external_references(params.external_references);
+ isolate->set_allow_atomics_wait(params.allow_atomics_wait);
SetResourceConstraints(isolate, params.constraints);
// TODO(jochen): Once we got rid of Isolate::Current(), we can remove this.
Isolate::Scope isolate_scope(v8_isolate);
@@ -8534,10 +8617,14 @@ void Isolate::IsolateInBackgroundNotification() {
void Isolate::MemoryPressureNotification(MemoryPressureLevel level) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- isolate->heap()->MemoryPressureNotification(level, Locker::IsLocked(this));
+ bool on_isolate_thread =
+ v8::Locker::IsActive()
+ ? isolate->thread_manager()->IsLockedByCurrentThread()
+ : i::ThreadId::Current().Equals(isolate->thread_id());
+ isolate->heap()->MemoryPressureNotification(level, on_isolate_thread);
isolate->allocator()->MemoryPressureNotification(level);
- isolate->compiler_dispatcher()->MemoryPressureNotification(
- level, Locker::IsLocked(this));
+ isolate->compiler_dispatcher()->MemoryPressureNotification(level,
+ on_isolate_thread);
}
void Isolate::SetRAILMode(RAILMode rail_mode) {
@@ -8885,11 +8972,15 @@ bool Debug::SetDebugEventListener(Isolate* isolate, EventCallback that,
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
ENTER_V8(i_isolate);
i::HandleScope scope(i_isolate);
- i::Handle<i::Object> foreign = i_isolate->factory()->undefined_value();
- if (that != NULL) {
- foreign = i_isolate->factory()->NewForeign(FUNCTION_ADDR(that));
+ if (that == nullptr) {
+ i_isolate->debug()->SetDebugDelegate(nullptr, false);
+ } else {
+ i::Handle<i::Object> i_data = i_isolate->factory()->undefined_value();
+ if (!data.IsEmpty()) i_data = Utils::OpenHandle(*data);
+ i::NativeDebugDelegate* delegate =
+ new i::NativeDebugDelegate(i_isolate, that, i_data);
+ i_isolate->debug()->SetDebugDelegate(delegate, true);
}
- i_isolate->debug()->SetEventListener(foreign, Utils::OpenHandle(*data, true));
return true;
}
@@ -8909,24 +9000,11 @@ bool Debug::CheckDebugBreak(Isolate* isolate) {
return internal_isolate->stack_guard()->CheckDebugBreak();
}
-
void Debug::SetMessageHandler(Isolate* isolate,
- v8::Debug::MessageHandler handler) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- ENTER_V8(i_isolate);
- i_isolate->debug()->SetMessageHandler(handler);
-}
-
-
-void Debug::SendCommand(Isolate* isolate,
- const uint16_t* command,
- int length,
- ClientData* client_data) {
- i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
- internal_isolate->debug()->EnqueueCommandMessage(
- i::Vector<const uint16_t>(command, length), client_data);
-}
+ v8::Debug::MessageHandler handler) {}
+void Debug::SendCommand(Isolate* isolate, const uint16_t* command, int length,
+ ClientData* client_data) {}
MaybeLocal<Value> Debug::Call(Local<Context> context,
v8::Local<v8::Function> fun,
@@ -8947,30 +9025,7 @@ MaybeLocal<Value> Debug::Call(Local<Context> context,
}
-MaybeLocal<Value> Debug::GetMirror(Local<Context> context,
- v8::Local<v8::Value> obj) {
- PREPARE_FOR_EXECUTION(context, Debug, GetMirror, Value);
- i::Debug* isolate_debug = isolate->debug();
- has_pending_exception = !isolate_debug->Load();
- RETURN_ON_FAILED_EXECUTION(Value);
- i::Handle<i::JSObject> debug(isolate_debug->debug_context()->global_object());
- auto name = isolate->factory()->NewStringFromStaticChars("MakeMirror");
- auto fun_obj = i::JSReceiver::GetProperty(debug, name).ToHandleChecked();
- auto v8_fun = Utils::CallableToLocal(i::Handle<i::JSFunction>::cast(fun_obj));
- const int kArgc = 1;
- v8::Local<v8::Value> argv[kArgc] = {obj};
- Local<Value> result;
- has_pending_exception =
- !v8_fun->Call(context, Utils::ToLocal(debug), kArgc, argv)
- .ToLocal(&result);
- RETURN_ON_FAILED_EXECUTION(Value);
- RETURN_ESCAPED(result);
-}
-
-void Debug::ProcessDebugMessages(Isolate* isolate) {
- reinterpret_cast<i::Isolate*>(isolate)->debug()->ProcessDebugMessages(true);
-}
-
+void Debug::ProcessDebugMessages(Isolate* isolate) {}
Local<Context> Debug::GetDebugContext(Isolate* isolate) {
return debug::GetDebugContext(isolate);
@@ -9012,19 +9067,6 @@ MaybeLocal<Array> Debug::GetInternalProperties(Isolate* v8_isolate,
return Utils::ToLocal(result);
}
-bool debug::SetDebugEventListener(Isolate* isolate, debug::EventCallback that,
- Local<Value> data) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- ENTER_V8(i_isolate);
- i::HandleScope scope(i_isolate);
- i::Handle<i::Object> foreign = i_isolate->factory()->undefined_value();
- if (that != NULL) {
- foreign = i_isolate->factory()->NewForeign(FUNCTION_ADDR(that));
- }
- i_isolate->debug()->SetEventListener(foreign, Utils::OpenHandle(*data, true));
- return true;
-}
-
Local<Context> debug::GetDebugContext(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
ENTER_V8(i_isolate);
@@ -9060,6 +9102,12 @@ void debug::ChangeBreakOnException(Isolate* isolate, ExceptionBreakState type) {
type != NoBreakOnException);
}
+void debug::SetBreakPointsActive(Isolate* v8_isolate, bool is_active) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ ENTER_V8(isolate);
+ isolate->debug()->set_break_points_active(is_active);
+}
+
void debug::SetOutOfMemoryCallback(Isolate* isolate,
OutOfMemoryCallback callback, void* data) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
@@ -9077,11 +9125,17 @@ void debug::PrepareStep(Isolate* v8_isolate, StepAction action) {
isolate->debug()->PrepareStep(static_cast<i::StepAction>(action));
}
-void debug::ClearStepping(Isolate* v8_isolate) {
+bool debug::HasNonBlackboxedFrameOnStack(Isolate* v8_isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
ENTER_V8(isolate);
- // Clear all current stepping setup.
- isolate->debug()->ClearStepping();
+ i::HandleScope scope(isolate);
+ for (i::StackTraceFrameIterator it(isolate); !it.done(); it.Advance()) {
+ if (!it.is_javascript()) continue;
+ if (!isolate->debug()->IsFrameBlackboxed(it.javascript_frame())) {
+ return true;
+ }
+ }
+ return false;
}
v8::Isolate* debug::Script::GetIsolate() const {
@@ -9175,6 +9229,10 @@ bool debug::Script::IsWasm() const {
return Utils::OpenHandle(this)->type() == i::Script::TYPE_WASM;
}
+bool debug::Script::IsModule() const {
+ return Utils::OpenHandle(this)->origin_options().IsModule();
+}
+
namespace {
int GetSmiValue(i::Handle<i::FixedArray> array, int index) {
return i::Smi::cast(array->get(index))->value();
@@ -9187,8 +9245,9 @@ bool debug::Script::GetPossibleBreakpoints(
CHECK(!start.IsEmpty());
i::Handle<i::Script> script = Utils::OpenHandle(this);
if (script->type() == i::Script::TYPE_WASM) {
- // TODO(clemensh): Return the proper thing once we support wasm breakpoints.
- return false;
+ i::Handle<i::WasmCompiledModule> compiled_module(
+ i::WasmCompiledModule::cast(script->wasm_compiled_module()));
+ return compiled_module->GetPossibleBreakpoints(start, end, locations);
}
i::Script::InitLineEnds(script);
@@ -9259,26 +9318,6 @@ int debug::Script::GetSourcePosition(const debug::Location& location) const {
return std::min(prev_line_offset + column + 1, line_offset);
}
-MaybeLocal<debug::Script> debug::Script::Wrap(v8::Isolate* v8_isolate,
- v8::Local<v8::Object> script) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- ENTER_V8(isolate);
- i::HandleScope handle_scope(isolate);
- i::Handle<i::JSReceiver> script_receiver(Utils::OpenHandle(*script));
- if (!script_receiver->IsJSValue()) return MaybeLocal<Script>();
- i::Handle<i::Object> script_value(
- i::Handle<i::JSValue>::cast(script_receiver)->value(), isolate);
- if (!script_value->IsScript()) {
- return MaybeLocal<Script>();
- }
- i::Handle<i::Script> script_obj = i::Handle<i::Script>::cast(script_value);
- if (script_obj->type() != i::Script::TYPE_NORMAL &&
- script_obj->type() != i::Script::TYPE_WASM) {
- return MaybeLocal<Script>();
- }
- return ToApiHandle<debug::Script>(handle_scope.CloseAndEscape(script_obj));
-}
-
debug::WasmScript* debug::WasmScript::Cast(debug::Script* script) {
CHECK(script->IsWasm());
return static_cast<WasmScript*>(script);
@@ -9304,8 +9343,26 @@ int debug::WasmScript::NumImportedFunctions() const {
return static_cast<int>(compiled_module->module()->num_imported_functions);
}
+std::pair<int, int> debug::WasmScript::GetFunctionRange(
+ int function_index) const {
+ i::DisallowHeapAllocation no_gc;
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+ DCHECK_EQ(i::Script::TYPE_WASM, script->type());
+ i::WasmCompiledModule* compiled_module =
+ i::WasmCompiledModule::cast(script->wasm_compiled_module());
+ DCHECK_LE(0, function_index);
+ DCHECK_GT(compiled_module->module()->functions.size(), function_index);
+ i::wasm::WasmFunction& func =
+ compiled_module->module()->functions[function_index];
+ DCHECK_GE(i::kMaxInt, func.code_start_offset);
+ DCHECK_GE(i::kMaxInt, func.code_end_offset);
+ return std::make_pair(static_cast<int>(func.code_start_offset),
+ static_cast<int>(func.code_end_offset));
+}
+
debug::WasmDisassembly debug::WasmScript::DisassembleFunction(
int function_index) const {
+ i::DisallowHeapAllocation no_gc;
i::Handle<i::Script> script = Utils::OpenHandle(this);
DCHECK_EQ(i::Script::TYPE_WASM, script->type());
i::WasmCompiledModule* compiled_module =
@@ -9319,7 +9376,9 @@ debug::Location::Location(int line_number, int column_number)
CHECK(column_number >= 0);
}
-debug::Location::Location() : line_number_(-1), column_number_(-1) {}
+debug::Location::Location()
+ : line_number_(v8::Function::kLineOffsetNotFound),
+ column_number_(v8::Function::kLineOffsetNotFound) {}
int debug::Location::GetLineNumber() const {
CHECK(line_number_ >= 0);
@@ -9369,19 +9428,29 @@ MaybeLocal<UnboundScript> debug::CompileInspectorScript(Isolate* v8_isolate,
result = i::Compiler::GetSharedFunctionInfoForScript(
str, i::Handle<i::Object>(), 0, 0, origin_options,
i::Handle<i::Object>(), isolate->native_context(), NULL, &script_data,
- ScriptCompiler::kNoCompileOptions, i::INSPECTOR_CODE, false);
+ ScriptCompiler::kNoCompileOptions, i::INSPECTOR_CODE);
has_pending_exception = result.is_null();
RETURN_ON_FAILED_EXECUTION(UnboundScript);
}
RETURN_ESCAPED(ToApiHandle<UnboundScript>(result));
}
-void debug::SetAsyncTaskListener(Isolate* v8_isolate,
- debug::AsyncTaskListener listener,
- void* data) {
+void debug::SetDebugDelegate(Isolate* v8_isolate,
+ debug::DebugDelegate* delegate) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ ENTER_V8(isolate);
+ isolate->debug()->SetDebugDelegate(delegate, false);
+}
+
+void debug::ResetBlackboxedStateCache(Isolate* v8_isolate,
+ v8::Local<debug::Script> script) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
ENTER_V8(isolate);
- isolate->debug()->SetAsyncTaskListener(listener, data);
+ i::DisallowHeapAllocation no_gc;
+ i::SharedFunctionInfo::ScriptIterator iter(Utils::OpenHandle(*script));
+ while (i::SharedFunctionInfo* info = iter.Next()) {
+ info->set_computed_debug_is_blackboxed(false);
+ }
}
int debug::EstimatedValueSize(Isolate* v8_isolate, v8::Local<v8::Value> value) {
@@ -9393,6 +9462,81 @@ int debug::EstimatedValueSize(Isolate* v8_isolate, v8::Local<v8::Value> value) {
return i::Handle<i::HeapObject>::cast(object)->Size();
}
+v8::MaybeLocal<v8::Array> debug::EntriesPreview(Isolate* v8_isolate,
+ v8::Local<v8::Value> value,
+ bool* is_key_value) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ ENTER_V8(isolate);
+ if (value->IsMap()) {
+ *is_key_value = true;
+ return value.As<Map>()->AsArray();
+ }
+ if (value->IsSet()) {
+ *is_key_value = false;
+ return value.As<Set>()->AsArray();
+ }
+
+ i::Handle<i::Object> object = Utils::OpenHandle(*value);
+ if (object->IsJSWeakCollection()) {
+ *is_key_value = object->IsJSWeakMap();
+ return Utils::ToLocal(i::JSWeakCollection::GetEntries(
+ i::Handle<i::JSWeakCollection>::cast(object), 0));
+ }
+ if (object->IsJSMapIterator()) {
+ i::Handle<i::JSMapIterator> iterator =
+ i::Handle<i::JSMapIterator>::cast(object);
+ int iterator_kind = i::Smi::cast(iterator->kind())->value();
+ *is_key_value = iterator_kind == i::JSMapIterator::kKindEntries;
+ if (!iterator->HasMore()) return v8::Array::New(v8_isolate);
+ return Utils::ToLocal(MapAsArray(isolate, iterator->table(),
+ i::Smi::cast(iterator->index())->value(),
+ iterator_kind));
+ }
+ if (object->IsJSSetIterator()) {
+ i::Handle<i::JSSetIterator> it = i::Handle<i::JSSetIterator>::cast(object);
+ *is_key_value = false;
+ if (!it->HasMore()) return v8::Array::New(v8_isolate);
+ return Utils::ToLocal(
+ SetAsArray(isolate, it->table(), i::Smi::cast(it->index())->value()));
+ }
+ return v8::MaybeLocal<v8::Array>();
+}
+
+MaybeLocal<debug::Script> debug::GeneratorObject::Script() {
+ i::Handle<i::JSGeneratorObject> obj = Utils::OpenHandle(this);
+ i::Object* maybe_script = obj->function()->shared()->script();
+ if (!maybe_script->IsScript()) return MaybeLocal<debug::Script>();
+ i::Handle<i::Script> script(i::Script::cast(maybe_script), obj->GetIsolate());
+ return ToApiHandle<debug::Script>(script);
+}
+
+Local<Function> debug::GeneratorObject::Function() {
+ i::Handle<i::JSGeneratorObject> obj = Utils::OpenHandle(this);
+ return Utils::ToLocal(handle(obj->function()));
+}
+
+debug::Location debug::GeneratorObject::SuspendedLocation() {
+ i::Handle<i::JSGeneratorObject> obj = Utils::OpenHandle(this);
+ CHECK(obj->is_suspended());
+ i::Object* maybe_script = obj->function()->shared()->script();
+ if (!maybe_script->IsScript()) return debug::Location();
+ i::Handle<i::Script> script(i::Script::cast(maybe_script), obj->GetIsolate());
+ i::Script::PositionInfo info;
+ i::Script::GetPositionInfo(script, obj->source_position(), &info,
+ i::Script::WITH_OFFSET);
+ return debug::Location(info.line, info.column);
+}
+
+bool debug::GeneratorObject::IsSuspended() {
+ return Utils::OpenHandle(this)->is_suspended();
+}
+
+v8::Local<debug::GeneratorObject> debug::GeneratorObject::Cast(
+ v8::Local<v8::Value> value) {
+ CHECK(value->IsGeneratorObject());
+ return ToApiHandle<debug::GeneratorObject>(Utils::OpenHandle(*value));
+}
+
Local<String> CpuProfileNode::GetFunctionName() const {
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
i::Isolate* isolate = node->isolate();
@@ -9410,6 +9554,56 @@ Local<String> CpuProfileNode::GetFunctionName() const {
}
}
+debug::Coverage::FunctionData::FunctionData(i::CoverageFunction* function,
+ Local<debug::Script> script)
+ : function_(function) {
+ i::Handle<i::Script> i_script = v8::Utils::OpenHandle(*script);
+ i::Script::PositionInfo start;
+ i::Script::PositionInfo end;
+ i::Script::GetPositionInfo(i_script, function->start, &start,
+ i::Script::WITH_OFFSET);
+ i::Script::GetPositionInfo(i_script, function->end, &end,
+ i::Script::WITH_OFFSET);
+ start_ = Location(start.line, start.column);
+ end_ = Location(end.line, end.column);
+}
+
+uint32_t debug::Coverage::FunctionData::Count() { return function_->count; }
+
+MaybeLocal<String> debug::Coverage::FunctionData::Name() {
+ return ToApiHandle<String>(function_->name);
+}
+
+Local<debug::Script> debug::Coverage::ScriptData::GetScript() {
+ return ToApiHandle<debug::Script>(script_->script);
+}
+
+size_t debug::Coverage::ScriptData::FunctionCount() {
+ return script_->functions.size();
+}
+
+debug::Coverage::FunctionData debug::Coverage::ScriptData::GetFunctionData(
+ size_t i) {
+ return FunctionData(&script_->functions.at(i), GetScript());
+}
+
+debug::Coverage::~Coverage() { delete coverage_; }
+
+size_t debug::Coverage::ScriptCount() { return coverage_->size(); }
+
+debug::Coverage::ScriptData debug::Coverage::GetScriptData(size_t i) {
+ return ScriptData(&coverage_->at(i));
+}
+
+debug::Coverage debug::Coverage::Collect(Isolate* isolate, bool reset_count) {
+ return Coverage(i::Coverage::Collect(reinterpret_cast<i::Isolate*>(isolate),
+ reset_count));
+}
+
+void debug::Coverage::TogglePrecise(Isolate* isolate, bool enable) {
+ i::Coverage::TogglePrecise(reinterpret_cast<i::Isolate*>(isolate), enable);
+}
+
const char* CpuProfileNode::GetFunctionNameStr() const {
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
return node->entry()->name();
@@ -9820,6 +10014,11 @@ void HeapProfiler::SetWrapperClassInfoProvider(uint16_t class_id,
callback);
}
+void HeapProfiler::SetGetRetainerInfosCallback(
+ GetRetainerInfosCallback callback) {
+ reinterpret_cast<i::HeapProfiler*>(this)->SetGetRetainerInfosCallback(
+ callback);
+}
size_t HeapProfiler::GetProfilerMemorySize() {
return reinterpret_cast<i::HeapProfiler*>(this)->
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index ce9a6aae8a..8deb117bcc 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -69,47 +69,49 @@ class RegisteredExtension {
static RegisteredExtension* first_extension_;
};
-#define OPEN_HANDLE_LIST(V) \
- V(Template, TemplateInfo) \
- V(FunctionTemplate, FunctionTemplateInfo) \
- V(ObjectTemplate, ObjectTemplateInfo) \
- V(Signature, FunctionTemplateInfo) \
- V(AccessorSignature, FunctionTemplateInfo) \
- V(Data, Object) \
- V(RegExp, JSRegExp) \
- V(Object, JSReceiver) \
- V(Array, JSArray) \
- V(Map, JSMap) \
- V(Set, JSSet) \
- V(ArrayBuffer, JSArrayBuffer) \
- V(ArrayBufferView, JSArrayBufferView) \
- V(TypedArray, JSTypedArray) \
- V(Uint8Array, JSTypedArray) \
- V(Uint8ClampedArray, JSTypedArray) \
- V(Int8Array, JSTypedArray) \
- V(Uint16Array, JSTypedArray) \
- V(Int16Array, JSTypedArray) \
- V(Uint32Array, JSTypedArray) \
- V(Int32Array, JSTypedArray) \
- V(Float32Array, JSTypedArray) \
- V(Float64Array, JSTypedArray) \
- V(DataView, JSDataView) \
- V(SharedArrayBuffer, JSArrayBuffer) \
- V(Name, Name) \
- V(String, String) \
- V(Symbol, Symbol) \
- V(Script, JSFunction) \
- V(UnboundScript, SharedFunctionInfo) \
- V(Module, Module) \
- V(Function, JSReceiver) \
- V(Message, JSMessageObject) \
- V(Context, Context) \
- V(External, Object) \
- V(StackTrace, JSArray) \
- V(StackFrame, JSObject) \
- V(Proxy, JSProxy) \
- V(NativeWeakMap, JSWeakMap) \
- V(debug::Script, Script)
+#define OPEN_HANDLE_LIST(V) \
+ V(Template, TemplateInfo) \
+ V(FunctionTemplate, FunctionTemplateInfo) \
+ V(ObjectTemplate, ObjectTemplateInfo) \
+ V(Signature, FunctionTemplateInfo) \
+ V(AccessorSignature, FunctionTemplateInfo) \
+ V(Data, Object) \
+ V(RegExp, JSRegExp) \
+ V(Object, JSReceiver) \
+ V(Array, JSArray) \
+ V(Map, JSMap) \
+ V(Set, JSSet) \
+ V(ArrayBuffer, JSArrayBuffer) \
+ V(ArrayBufferView, JSArrayBufferView) \
+ V(TypedArray, JSTypedArray) \
+ V(Uint8Array, JSTypedArray) \
+ V(Uint8ClampedArray, JSTypedArray) \
+ V(Int8Array, JSTypedArray) \
+ V(Uint16Array, JSTypedArray) \
+ V(Int16Array, JSTypedArray) \
+ V(Uint32Array, JSTypedArray) \
+ V(Int32Array, JSTypedArray) \
+ V(Float32Array, JSTypedArray) \
+ V(Float64Array, JSTypedArray) \
+ V(DataView, JSDataView) \
+ V(SharedArrayBuffer, JSArrayBuffer) \
+ V(Name, Name) \
+ V(String, String) \
+ V(Symbol, Symbol) \
+ V(Script, JSFunction) \
+ V(UnboundScript, SharedFunctionInfo) \
+ V(Module, Module) \
+ V(Function, JSReceiver) \
+ V(Message, JSMessageObject) \
+ V(Context, Context) \
+ V(External, Object) \
+ V(StackTrace, JSArray) \
+ V(StackFrame, JSObject) \
+ V(Proxy, JSProxy) \
+ V(NativeWeakMap, JSWeakMap) \
+ V(debug::GeneratorObject, JSGeneratorObject) \
+ V(debug::Script, Script) \
+ V(Promise, JSPromise)
class Utils {
public:
@@ -348,8 +350,7 @@ OPEN_HANDLE_LIST(MAKE_OPEN_HANDLE)
namespace internal {
-
-class DeferredHandles {
+class V8_EXPORT_PRIVATE DeferredHandles {
public:
~DeferredHandles();
diff --git a/deps/v8/src/arguments.cc b/deps/v8/src/arguments.cc
index 815f5de577..d246aadb95 100644
--- a/deps/v8/src/arguments.cc
+++ b/deps/v8/src/arguments.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/arguments.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/arguments.h b/deps/v8/src/arguments.h
index 965232438a..1d91b20b2b 100644
--- a/deps/v8/src/arguments.h
+++ b/deps/v8/src/arguments.h
@@ -6,7 +6,7 @@
#define V8_ARGUMENTS_H_
#include "src/allocation.h"
-#include "src/objects-inl.h"
+#include "src/objects.h"
#include "src/tracing/trace-event.h"
namespace v8 {
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index 4e97f95ed3..5608256d5f 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -41,7 +41,7 @@
#include "src/assembler.h"
#include "src/debug/debug.h"
-
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -590,6 +590,17 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
}
}
+Address Assembler::target_address_at(Address pc, Code* code) {
+ Address constant_pool = code ? code->constant_pool() : NULL;
+ return target_address_at(pc, constant_pool);
+}
+
+void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
+ Address target,
+ ICacheFlushMode icache_flush_mode) {
+ Address constant_pool = code ? code->constant_pool() : NULL;
+ set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 1350dc41a3..ec75b7d07e 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -3878,8 +3878,10 @@ void Assembler::vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src) {
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
- emit(0xFU*B28 | B25 | (dt & NeonDataTypeUMask) | B23 | d*B22 |
- (dt & NeonDataTypeSizeMask)*B19 | vd*B12 | 0xA*B8 | m*B5 | B4 | vm);
+ int U = NeonU(dt);
+ int imm3 = 1 << NeonSz(dt);
+ emit(0xFU * B28 | B25 | U * B24 | B23 | d * B22 | imm3 * B19 | vd * B12 |
+ 0xA * B8 | m * B5 | B4 | vm);
}
static int EncodeScalar(NeonDataType dt, int index) {
@@ -3928,7 +3930,7 @@ void Assembler::vmov(NeonDataType dt, Register dst, DwVfpRegister src,
int vn, n;
src.split_code(&vn, &n);
int opc1_opc2 = EncodeScalar(dt, index);
- int u = (dt & NeonDataTypeUMask) != 0 ? 1 : 0;
+ int u = NeonU(dt);
emit(0xEEu * B24 | u * B23 | B20 | vn * B16 | dst.code() * B12 | 0xB * B8 |
n * B7 | B4 | opc1_opc2);
}
@@ -4209,81 +4211,199 @@ void Assembler::vorr(QwNeonRegister dst, QwNeonRegister src1,
emit(EncodeNeonBinaryBitwiseOp(VORR, dst, src1, src2));
}
-void Assembler::vadd(QwNeonRegister dst, const QwNeonRegister src1,
- const QwNeonRegister src2) {
- DCHECK(IsEnabled(NEON));
- // Qd = vadd(Qn, Qm) SIMD floating point addition.
- // Instruction details available in ARM DDI 0406C.b, A8-830.
+enum FPBinOp {
+ VADDF,
+ VSUBF,
+ VMULF,
+ VMINF,
+ VMAXF,
+ VRECPS,
+ VRSQRTS,
+ VCEQF,
+ VCGEF,
+ VCGTF
+};
+
+static Instr EncodeNeonBinOp(FPBinOp op, QwNeonRegister dst,
+ QwNeonRegister src1, QwNeonRegister src2) {
+ int op_encoding = 0;
+ switch (op) {
+ case VADDF:
+ op_encoding = 0xD * B8;
+ break;
+ case VSUBF:
+ op_encoding = B21 | 0xD * B8;
+ break;
+ case VMULF:
+ op_encoding = B24 | 0xD * B8 | B4;
+ break;
+ case VMINF:
+ op_encoding = B21 | 0xF * B8;
+ break;
+ case VMAXF:
+ op_encoding = 0xF * B8;
+ break;
+ case VRECPS:
+ op_encoding = 0xF * B8 | B4;
+ break;
+ case VRSQRTS:
+ op_encoding = B21 | 0xF * B8 | B4;
+ break;
+ case VCEQF:
+ op_encoding = 0xE * B8;
+ break;
+ case VCGEF:
+ op_encoding = B24 | 0xE * B8;
+ break;
+ case VCGTF:
+ op_encoding = B24 | B21 | 0xE * B8;
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
- emit(0x1E4U * B23 | d * B22 | vn * B16 | vd * B12 | 0xD * B8 | n * B7 | B6 |
- m * B5 | vm);
-}
-
-void Assembler::vadd(NeonSize size, QwNeonRegister dst,
- const QwNeonRegister src1, const QwNeonRegister src2) {
- DCHECK(IsEnabled(NEON));
- // Qd = vadd(Qn, Qm) SIMD integer addition.
- // Instruction details available in ARM DDI 0406C.b, A8-828.
+ return 0x1E4U * B23 | d * B22 | vn * B16 | vd * B12 | n * B7 | B6 | m * B5 |
+ vm | op_encoding;
+}
+
+enum IntegerBinOp {
+ VADD,
+ VQADD,
+ VSUB,
+ VQSUB,
+ VMUL,
+ VMIN,
+ VMAX,
+ VTST,
+ VCEQ,
+ VCGE,
+ VCGT
+};
+
+static Instr EncodeNeonBinOp(IntegerBinOp op, NeonDataType dt,
+ const QwNeonRegister dst,
+ const QwNeonRegister src1,
+ const QwNeonRegister src2) {
+ int op_encoding = 0;
+ switch (op) {
+ case VADD:
+ op_encoding = 0x8 * B8;
+ break;
+ case VQADD:
+ op_encoding = B4;
+ break;
+ case VSUB:
+ op_encoding = B24 | 0x8 * B8;
+ break;
+ case VQSUB:
+ op_encoding = 0x2 * B8 | B4;
+ break;
+ case VMUL:
+ op_encoding = 0x9 * B8 | B4;
+ break;
+ case VMIN:
+ op_encoding = 0x6 * B8 | B4;
+ break;
+ case VMAX:
+ op_encoding = 0x6 * B8;
+ break;
+ case VTST:
+ op_encoding = 0x8 * B8 | B4;
+ break;
+ case VCEQ:
+ op_encoding = B24 | 0x8 * B8 | B4;
+ break;
+ case VCGE:
+ op_encoding = 0x3 * B8 | B4;
+ break;
+ case VCGT:
+ op_encoding = 0x3 * B8;
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
- int sz = static_cast<int>(size);
- emit(0x1E4U * B23 | d * B22 | sz * B20 | vn * B16 | vd * B12 | 0x8 * B8 |
- n * B7 | B6 | m * B5 | vm);
+ int size = NeonSz(dt);
+ int u = NeonU(dt);
+ return 0x1E4U * B23 | u * B24 | d * B22 | size * B20 | vn * B16 | vd * B12 |
+ n * B7 | B6 | m * B5 | vm | op_encoding;
}
-void Assembler::vsub(QwNeonRegister dst, const QwNeonRegister src1,
- const QwNeonRegister src2) {
+static Instr EncodeNeonBinOp(IntegerBinOp op, NeonSize size,
+ const QwNeonRegister dst,
+ const QwNeonRegister src1,
+ const QwNeonRegister src2) {
+ // Map NeonSize values to the signed values in NeonDataType, so the U bit
+ // will be 0.
+ return EncodeNeonBinOp(op, static_cast<NeonDataType>(size), dst, src1, src2);
+}
+
+void Assembler::vadd(QwNeonRegister dst, QwNeonRegister src1,
+ QwNeonRegister src2) {
+ DCHECK(IsEnabled(NEON));
+ // Qd = vadd(Qn, Qm) SIMD floating point addition.
+ // Instruction details available in ARM DDI 0406C.b, A8-830.
+ emit(EncodeNeonBinOp(VADDF, dst, src1, src2));
+}
+
+void Assembler::vadd(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
+ QwNeonRegister src2) {
+ DCHECK(IsEnabled(NEON));
+ // Qd = vadd(Qn, Qm) SIMD integer addition.
+ // Instruction details available in ARM DDI 0406C.b, A8-828.
+ emit(EncodeNeonBinOp(VADD, size, dst, src1, src2));
+}
+
+void Assembler::vqadd(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
+ QwNeonRegister src2) {
+ DCHECK(IsEnabled(NEON));
+ // Qd = vqadd(Qn, Qm) SIMD integer saturating addition.
+ // Instruction details available in ARM DDI 0406C.b, A8-996.
+ emit(EncodeNeonBinOp(VQADD, dt, dst, src1, src2));
+}
+
+void Assembler::vsub(QwNeonRegister dst, QwNeonRegister src1,
+ QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vsub(Qn, Qm) SIMD floating point subtraction.
// Instruction details available in ARM DDI 0406C.b, A8-1086.
- int vd, d;
- dst.split_code(&vd, &d);
- int vn, n;
- src1.split_code(&vn, &n);
- int vm, m;
- src2.split_code(&vm, &m);
- emit(0x1E4U * B23 | d * B22 | B21 | vn * B16 | vd * B12 | 0xD * B8 | n * B7 |
- B6 | m * B5 | vm);
+ emit(EncodeNeonBinOp(VSUBF, dst, src1, src2));
}
-void Assembler::vsub(NeonSize size, QwNeonRegister dst,
- const QwNeonRegister src1, const QwNeonRegister src2) {
+void Assembler::vsub(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
+ QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vsub(Qn, Qm) SIMD integer subtraction.
// Instruction details available in ARM DDI 0406C.b, A8-1084.
- int vd, d;
- dst.split_code(&vd, &d);
- int vn, n;
- src1.split_code(&vn, &n);
- int vm, m;
- src2.split_code(&vm, &m);
- int sz = static_cast<int>(size);
- emit(0x1E6U * B23 | d * B22 | sz * B20 | vn * B16 | vd * B12 | 0x8 * B8 |
- n * B7 | B6 | m * B5 | vm);
+ emit(EncodeNeonBinOp(VSUB, size, dst, src1, src2));
}
-void Assembler::vmul(QwNeonRegister dst, const QwNeonRegister src1,
- const QwNeonRegister src2) {
+void Assembler::vqsub(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
+ QwNeonRegister src2) {
+ DCHECK(IsEnabled(NEON));
+ // Qd = vqsub(Qn, Qm) SIMD integer saturating subtraction.
+ // Instruction details available in ARM DDI 0406C.b, A8-1020.
+ emit(EncodeNeonBinOp(VQSUB, dt, dst, src1, src2));
+}
+
+void Assembler::vmul(QwNeonRegister dst, QwNeonRegister src1,
+ QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vadd(Qn, Qm) SIMD floating point multiply.
// Instruction details available in ARM DDI 0406C.b, A8-958.
- int vd, d;
- dst.split_code(&vd, &d);
- int vn, n;
- src1.split_code(&vn, &n);
- int vm, m;
- src2.split_code(&vm, &m);
- emit(0x1E6U * B23 | d * B22 | vn * B16 | vd * B12 | 0xD * B8 | n * B7 | B6 |
- m * B5 | B4 | vm);
+ emit(EncodeNeonBinOp(VMULF, dst, src1, src2));
}
void Assembler::vmul(NeonSize size, QwNeonRegister dst,
@@ -4291,43 +4411,7 @@ void Assembler::vmul(NeonSize size, QwNeonRegister dst,
DCHECK(IsEnabled(NEON));
// Qd = vadd(Qn, Qm) SIMD integer multiply.
// Instruction details available in ARM DDI 0406C.b, A8-960.
- int vd, d;
- dst.split_code(&vd, &d);
- int vn, n;
- src1.split_code(&vn, &n);
- int vm, m;
- src2.split_code(&vm, &m);
- int sz = static_cast<int>(size);
- emit(0x1E4U * B23 | d * B22 | sz * B20 | vn * B16 | vd * B12 | 0x9 * B8 |
- n * B7 | B6 | m * B5 | B4 | vm);
-}
-
-static Instr EncodeNeonMinMax(bool is_min, QwNeonRegister dst,
- QwNeonRegister src1, QwNeonRegister src2) {
- int vd, d;
- dst.split_code(&vd, &d);
- int vn, n;
- src1.split_code(&vn, &n);
- int vm, m;
- src2.split_code(&vm, &m);
- int min = is_min ? 1 : 0;
- return 0x1E4U * B23 | d * B22 | min * B21 | vn * B16 | vd * B12 | 0xF * B8 |
- n * B7 | B6 | m * B5 | vm;
-}
-
-static Instr EncodeNeonMinMax(bool is_min, NeonDataType dt, QwNeonRegister dst,
- QwNeonRegister src1, QwNeonRegister src2) {
- int vd, d;
- dst.split_code(&vd, &d);
- int vn, n;
- src1.split_code(&vn, &n);
- int vm, m;
- src2.split_code(&vm, &m);
- int min = is_min ? 1 : 0;
- int size = (dt & NeonDataTypeSizeMask) / 2;
- int U = dt & NeonDataTypeUMask;
- return 0x1E4U * B23 | U | d * B22 | size * B20 | vn * B16 | vd * B12 |
- 0x6 * B8 | B6 | m * B5 | min * B4 | vm;
+ emit(EncodeNeonBinOp(VMUL, size, dst, src1, src2));
}
void Assembler::vmin(const QwNeonRegister dst, const QwNeonRegister src1,
@@ -4335,7 +4419,7 @@ void Assembler::vmin(const QwNeonRegister dst, const QwNeonRegister src1,
DCHECK(IsEnabled(NEON));
// Qd = vmin(Qn, Qm) SIMD floating point MIN.
// Instruction details available in ARM DDI 0406C.b, A8-928.
- emit(EncodeNeonMinMax(true, dst, src1, src2));
+ emit(EncodeNeonBinOp(VMINF, dst, src1, src2));
}
void Assembler::vmin(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
@@ -4343,7 +4427,7 @@ void Assembler::vmin(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
DCHECK(IsEnabled(NEON));
// Qd = vmin(Qn, Qm) SIMD integer MIN.
// Instruction details available in ARM DDI 0406C.b, A8-926.
- emit(EncodeNeonMinMax(true, dt, dst, src1, src2));
+ emit(EncodeNeonBinOp(VMIN, dt, dst, src1, src2));
}
void Assembler::vmax(QwNeonRegister dst, QwNeonRegister src1,
@@ -4351,7 +4435,7 @@ void Assembler::vmax(QwNeonRegister dst, QwNeonRegister src1,
DCHECK(IsEnabled(NEON));
// Qd = vmax(Qn, Qm) SIMD floating point MAX.
// Instruction details available in ARM DDI 0406C.b, A8-928.
- emit(EncodeNeonMinMax(false, dst, src1, src2));
+ emit(EncodeNeonBinOp(VMAXF, dst, src1, src2));
}
void Assembler::vmax(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
@@ -4359,7 +4443,49 @@ void Assembler::vmax(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
DCHECK(IsEnabled(NEON));
// Qd = vmax(Qn, Qm) SIMD integer MAX.
// Instruction details available in ARM DDI 0406C.b, A8-926.
- emit(EncodeNeonMinMax(false, dt, dst, src1, src2));
+ emit(EncodeNeonBinOp(VMAX, dt, dst, src1, src2));
+}
+
+enum NeonShiftOp { VSHL, VSHR };
+
+static Instr EncodeNeonShiftOp(NeonShiftOp op, NeonDataType dt,
+ QwNeonRegister dst, QwNeonRegister src,
+ int shift) {
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vm, m;
+ src.split_code(&vm, &m);
+ int size_in_bits = kBitsPerByte << NeonSz(dt);
+ int op_encoding = 0;
+ int imm6 = 0;
+ if (op == VSHL) {
+ DCHECK(shift >= 0 && size_in_bits > shift);
+ imm6 = size_in_bits + shift;
+ op_encoding = 0x5 * B8;
+ } else {
+ DCHECK_EQ(VSHR, op);
+ DCHECK(shift > 0 && size_in_bits >= shift);
+ imm6 = 2 * size_in_bits - shift;
+ op_encoding = NeonU(dt) * B24;
+ }
+ return 0x1E5U * B23 | d * B22 | imm6 * B16 | vd * B12 | B6 | m * B5 | B4 |
+ vm | op_encoding;
+}
+
+void Assembler::vshl(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src,
+ int shift) {
+ DCHECK(IsEnabled(NEON));
+ // Qd = vshl(Qm, bits) SIMD shift left immediate.
+ // Instruction details available in ARM DDI 0406C.b, A8-1046.
+ emit(EncodeNeonShiftOp(VSHL, dt, dst, src, shift));
+}
+
+void Assembler::vshr(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src,
+ int shift) {
+ DCHECK(IsEnabled(NEON));
+ // Qd = vshl(Qm, bits) SIMD shift right immediate.
+ // Instruction details available in ARM DDI 0406C.b, A8-1052.
+ emit(EncodeNeonShiftOp(VSHR, dt, dst, src, shift));
}
static Instr EncodeNeonEstimateOp(bool is_rsqrt, QwNeonRegister dst,
@@ -4373,158 +4499,90 @@ static Instr EncodeNeonEstimateOp(bool is_rsqrt, QwNeonRegister dst,
rsqrt * B7 | B6 | m * B5 | vm;
}
-void Assembler::vrecpe(const QwNeonRegister dst, const QwNeonRegister src) {
+void Assembler::vrecpe(QwNeonRegister dst, QwNeonRegister src) {
DCHECK(IsEnabled(NEON));
// Qd = vrecpe(Qm) SIMD reciprocal estimate.
// Instruction details available in ARM DDI 0406C.b, A8-1024.
emit(EncodeNeonEstimateOp(false, dst, src));
}
-void Assembler::vrsqrte(const QwNeonRegister dst, const QwNeonRegister src) {
+void Assembler::vrsqrte(QwNeonRegister dst, QwNeonRegister src) {
DCHECK(IsEnabled(NEON));
// Qd = vrsqrte(Qm) SIMD reciprocal square root estimate.
// Instruction details available in ARM DDI 0406C.b, A8-1038.
emit(EncodeNeonEstimateOp(true, dst, src));
}
-static Instr EncodeNeonRefinementOp(bool is_rsqrt, QwNeonRegister dst,
- QwNeonRegister src1, QwNeonRegister src2) {
- int vd, d;
- dst.split_code(&vd, &d);
- int vn, n;
- src1.split_code(&vn, &n);
- int vm, m;
- src2.split_code(&vm, &m);
- int rsqrt = is_rsqrt ? 1 : 0;
- return 0x1E4U * B23 | d * B22 | rsqrt * B21 | vn * B16 | vd * B12 | 0xF * B8 |
- n * B7 | B6 | m * B5 | B4 | vm;
-}
-
-void Assembler::vrecps(const QwNeonRegister dst, const QwNeonRegister src1,
- const QwNeonRegister src2) {
+void Assembler::vrecps(QwNeonRegister dst, QwNeonRegister src1,
+ QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vrecps(Qn, Qm) SIMD reciprocal refinement step.
// Instruction details available in ARM DDI 0406C.b, A8-1026.
- emit(EncodeNeonRefinementOp(false, dst, src1, src2));
+ emit(EncodeNeonBinOp(VRECPS, dst, src1, src2));
}
-void Assembler::vrsqrts(const QwNeonRegister dst, const QwNeonRegister src1,
- const QwNeonRegister src2) {
+void Assembler::vrsqrts(QwNeonRegister dst, QwNeonRegister src1,
+ QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vrsqrts(Qn, Qm) SIMD reciprocal square root refinement step.
// Instruction details available in ARM DDI 0406C.b, A8-1040.
- emit(EncodeNeonRefinementOp(true, dst, src1, src2));
+ emit(EncodeNeonBinOp(VRSQRTS, dst, src1, src2));
}
-void Assembler::vtst(NeonSize size, QwNeonRegister dst,
- const QwNeonRegister src1, const QwNeonRegister src2) {
+void Assembler::vtst(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
+ QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vtst(Qn, Qm) SIMD test integer operands.
// Instruction details available in ARM DDI 0406C.b, A8-1098.
- int vd, d;
- dst.split_code(&vd, &d);
- int vn, n;
- src1.split_code(&vn, &n);
- int vm, m;
- src2.split_code(&vm, &m);
- int sz = static_cast<int>(size);
- emit(0x1E4U * B23 | d * B22 | sz * B20 | vn * B16 | vd * B12 | 0x8 * B8 |
- n * B7 | B6 | m * B5 | B4 | vm);
+ emit(EncodeNeonBinOp(VTST, size, dst, src1, src2));
}
-void Assembler::vceq(const QwNeonRegister dst, const QwNeonRegister src1,
- const QwNeonRegister src2) {
+void Assembler::vceq(QwNeonRegister dst, QwNeonRegister src1,
+ QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vceq(Qn, Qm) SIMD floating point compare equal.
// Instruction details available in ARM DDI 0406C.b, A8-844.
- int vd, d;
- dst.split_code(&vd, &d);
- int vn, n;
- src1.split_code(&vn, &n);
- int vm, m;
- src2.split_code(&vm, &m);
- emit(0x1E4U * B23 | d * B22 | vn * B16 | vd * B12 | 0xe * B8 | n * B7 | B6 |
- m * B5 | vm);
+ emit(EncodeNeonBinOp(VCEQF, dst, src1, src2));
}
-void Assembler::vceq(NeonSize size, QwNeonRegister dst,
- const QwNeonRegister src1, const QwNeonRegister src2) {
+void Assembler::vceq(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
+ QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vceq(Qn, Qm) SIMD integer compare equal.
// Instruction details available in ARM DDI 0406C.b, A8-844.
- int vd, d;
- dst.split_code(&vd, &d);
- int vn, n;
- src1.split_code(&vn, &n);
- int vm, m;
- src2.split_code(&vm, &m);
- int sz = static_cast<int>(size);
- emit(0x1E6U * B23 | d * B22 | sz * B20 | vn * B16 | vd * B12 | 0x8 * B8 |
- n * B7 | B6 | m * B5 | B4 | vm);
+ emit(EncodeNeonBinOp(VCEQ, size, dst, src1, src2));
}
-static Instr EncodeNeonCompareOp(const QwNeonRegister dst,
- const QwNeonRegister src1,
- const QwNeonRegister src2, Condition cond) {
- DCHECK(cond == ge || cond == gt);
- int vd, d;
- dst.split_code(&vd, &d);
- int vn, n;
- src1.split_code(&vn, &n);
- int vm, m;
- src2.split_code(&vm, &m);
- int is_gt = (cond == gt) ? 1 : 0;
- return 0x1E6U * B23 | d * B22 | is_gt * B21 | vn * B16 | vd * B12 | 0xe * B8 |
- n * B7 | B6 | m * B5 | vm;
-}
-
-static Instr EncodeNeonCompareOp(NeonDataType dt, const QwNeonRegister dst,
- const QwNeonRegister src1,
- const QwNeonRegister src2, Condition cond) {
- DCHECK(cond == ge || cond == gt);
- int vd, d;
- dst.split_code(&vd, &d);
- int vn, n;
- src1.split_code(&vn, &n);
- int vm, m;
- src2.split_code(&vm, &m);
- int size = (dt & NeonDataTypeSizeMask) / 2;
- int U = dt & NeonDataTypeUMask;
- int is_ge = (cond == ge) ? 1 : 0;
- return 0x1E4U * B23 | U | d * B22 | size * B20 | vn * B16 | vd * B12 |
- 0x3 * B8 | n * B7 | B6 | m * B5 | is_ge * B4 | vm;
-}
-
-void Assembler::vcge(const QwNeonRegister dst, const QwNeonRegister src1,
- const QwNeonRegister src2) {
+void Assembler::vcge(QwNeonRegister dst, QwNeonRegister src1,
+ QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vcge(Qn, Qm) SIMD floating point compare greater or equal.
// Instruction details available in ARM DDI 0406C.b, A8-848.
- emit(EncodeNeonCompareOp(dst, src1, src2, ge));
+ emit(EncodeNeonBinOp(VCGEF, dst, src1, src2));
}
-void Assembler::vcge(NeonDataType dt, QwNeonRegister dst,
- const QwNeonRegister src1, const QwNeonRegister src2) {
+void Assembler::vcge(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
+ QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vcge(Qn, Qm) SIMD integer compare greater or equal.
// Instruction details available in ARM DDI 0406C.b, A8-848.
- emit(EncodeNeonCompareOp(dt, dst, src1, src2, ge));
+ emit(EncodeNeonBinOp(VCGE, dt, dst, src1, src2));
}
-void Assembler::vcgt(const QwNeonRegister dst, const QwNeonRegister src1,
- const QwNeonRegister src2) {
+void Assembler::vcgt(QwNeonRegister dst, QwNeonRegister src1,
+ QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vcgt(Qn, Qm) SIMD floating point compare greater than.
// Instruction details available in ARM DDI 0406C.b, A8-852.
- emit(EncodeNeonCompareOp(dst, src1, src2, gt));
+ emit(EncodeNeonBinOp(VCGTF, dst, src1, src2));
}
-void Assembler::vcgt(NeonDataType dt, QwNeonRegister dst,
- const QwNeonRegister src1, const QwNeonRegister src2) {
+void Assembler::vcgt(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
+ QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vcgt(Qn, Qm) SIMD integer compare greater than.
// Instruction details available in ARM DDI 0406C.b, A8-852.
- emit(EncodeNeonCompareOp(dt, dst, src1, src2, gt));
+ emit(EncodeNeonBinOp(VCGT, dt, dst, src1, src2));
}
void Assembler::vext(QwNeonRegister dst, const QwNeonRegister src1,
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index e73c5a170f..763ef715b6 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -150,6 +150,7 @@ GENERAL_REGISTERS(DECLARE_REGISTER)
const Register no_reg = {Register::kCode_no_reg};
static const bool kSimpleFPAliasing = false;
+static const bool kSimdMaskRegisters = false;
// Single word VFP register.
struct SwVfpRegister {
@@ -728,17 +729,10 @@ class Assembler : public AssemblerBase {
INLINE(static void set_target_address_at(
Isolate* isolate, Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
- INLINE(static Address target_address_at(Address pc, Code* code)) {
- Address constant_pool = code ? code->constant_pool() : NULL;
- return target_address_at(pc, constant_pool);
- }
+ INLINE(static Address target_address_at(Address pc, Code* code));
INLINE(static void set_target_address_at(
Isolate* isolate, Address pc, Code* code, Address target,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
- Address constant_pool = code ? code->constant_pool() : NULL;
- set_target_address_at(isolate, pc, constant_pool, target,
- icache_flush_mode);
- }
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
@@ -1371,47 +1365,44 @@ class Assembler : public AssemblerBase {
void vbsl(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void veor(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vorr(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
- void vadd(const QwNeonRegister dst, const QwNeonRegister src1,
- const QwNeonRegister src2);
- void vadd(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src1,
- const QwNeonRegister src2);
- void vsub(const QwNeonRegister dst, const QwNeonRegister src1,
- const QwNeonRegister src2);
- void vsub(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src1,
- const QwNeonRegister src2);
- void vmul(const QwNeonRegister dst, const QwNeonRegister src1,
- const QwNeonRegister src2);
- void vmul(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src1,
- const QwNeonRegister src2);
- void vmin(const QwNeonRegister dst, const QwNeonRegister src1,
- const QwNeonRegister src2);
- void vmin(NeonDataType dt, const QwNeonRegister dst,
- const QwNeonRegister src1, const QwNeonRegister src2);
- void vmax(const QwNeonRegister dst, const QwNeonRegister src1,
- const QwNeonRegister src2);
- void vmax(NeonDataType dt, const QwNeonRegister dst,
- const QwNeonRegister src1, const QwNeonRegister src2);
+ void vadd(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
+ void vadd(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
+ QwNeonRegister src2);
+ void vqadd(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
+ QwNeonRegister src2);
+ void vsub(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
+ void vsub(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
+ QwNeonRegister src2);
+ void vqsub(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
+ QwNeonRegister src2);
+ void vmul(QwNeonRegister dst, QwNeonRegister src1,
+ QwNeonRegister src2);
+ void vmul(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
+ QwNeonRegister src2);
+ void vmin(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
+ void vmin(NeonDataType dt, QwNeonRegister dst,
+ QwNeonRegister src1, QwNeonRegister src2);
+ void vmax(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
+ void vmax(NeonDataType dt, QwNeonRegister dst,
+ QwNeonRegister src1, QwNeonRegister src2);
+ void vshl(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src, int shift);
+ void vshr(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src, int shift);
// vrecpe and vrsqrte only support floating point lanes.
- void vrecpe(const QwNeonRegister dst, const QwNeonRegister src);
- void vrsqrte(const QwNeonRegister dst, const QwNeonRegister src);
- void vrecps(const QwNeonRegister dst, const QwNeonRegister src1,
- const QwNeonRegister src2);
- void vrsqrts(const QwNeonRegister dst, const QwNeonRegister src1,
- const QwNeonRegister src2);
- void vtst(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src1,
- const QwNeonRegister src2);
- void vceq(const QwNeonRegister dst, const QwNeonRegister src1,
- const QwNeonRegister src2);
- void vceq(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src1,
- const QwNeonRegister src2);
- void vcge(const QwNeonRegister dst, const QwNeonRegister src1,
- const QwNeonRegister src2);
- void vcge(NeonDataType dt, const QwNeonRegister dst,
- const QwNeonRegister src1, const QwNeonRegister src2);
- void vcgt(const QwNeonRegister dst, const QwNeonRegister src1,
- const QwNeonRegister src2);
- void vcgt(NeonDataType dt, const QwNeonRegister dst,
- const QwNeonRegister src1, const QwNeonRegister src2);
+ void vrecpe(QwNeonRegister dst, QwNeonRegister src);
+ void vrsqrte(QwNeonRegister dst, QwNeonRegister src);
+ void vrecps(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
+ void vrsqrts(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
+ void vtst(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
+ QwNeonRegister src2);
+ void vceq(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
+ void vceq(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
+ QwNeonRegister src2);
+ void vcge(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
+ void vcge(NeonDataType dt, QwNeonRegister dst,
+ QwNeonRegister src1, QwNeonRegister src2);
+ void vcgt(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
+ void vcgt(NeonDataType dt, QwNeonRegister dst,
+ QwNeonRegister src1, QwNeonRegister src2);
void vext(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2, int bytes);
void vzip(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src);
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index 307330cd8b..67d661e0e8 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -195,9 +195,6 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmp(r4, Operand(SYMBOL_TYPE));
__ b(eq, slow);
- // Call runtime on identical SIMD values since we must throw a TypeError.
- __ cmp(r4, Operand(SIMD128_VALUE_TYPE));
- __ b(eq, slow);
} else {
__ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
__ b(eq, &heap_number);
@@ -208,9 +205,6 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmp(r4, Operand(SYMBOL_TYPE));
__ b(eq, slow);
- // Call runtime on identical SIMD values since we must throw a TypeError.
- __ cmp(r4, Operand(SIMD128_VALUE_TYPE));
- __ b(eq, slow);
// Normally here we fall through to return_equal, but undefined is
// special: (undefined == undefined) == true, but
// (undefined <= undefined) == false! See ECMAScript 11.8.5.
@@ -1029,12 +1023,12 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// r2: receiver
// r3: argc
// r4: argv
- int marker = type();
+ StackFrame::Type marker = type();
if (FLAG_enable_embedded_constant_pool) {
__ mov(r8, Operand::Zero());
}
- __ mov(r7, Operand(Smi::FromInt(marker)));
- __ mov(r6, Operand(Smi::FromInt(marker)));
+ __ mov(r7, Operand(StackFrame::TypeToMarker(marker)));
+ __ mov(r6, Operand(StackFrame::TypeToMarker(marker)));
__ mov(r5,
Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
__ ldr(r5, MemOperand(r5));
@@ -1054,11 +1048,11 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ cmp(r6, Operand::Zero());
__ b(ne, &non_outermost_js);
__ str(fp, MemOperand(r5));
- __ mov(ip, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+ __ mov(ip, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
Label cont;
__ b(&cont);
__ bind(&non_outermost_js);
- __ mov(ip, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
+ __ mov(ip, Operand(StackFrame::INNER_JSENTRY_FRAME));
__ bind(&cont);
__ push(ip);
@@ -1124,7 +1118,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Check if the current stack frame is marked as the outermost JS frame.
Label non_outermost_js_2;
__ pop(r5);
- __ cmp(r5, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+ __ cmp(r5, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ b(ne, &non_outermost_js_2);
__ mov(r6, Operand::Zero());
__ mov(r5, Operand(ExternalReference(js_entry_sp)));
@@ -1153,55 +1147,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ ldm(ia_w, sp, kCalleeSaved | pc.bit());
}
-
-void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
- Label miss;
- Register receiver = LoadDescriptor::ReceiverRegister();
- // Ensure that the vector and slot registers won't be clobbered before
- // calling the miss handler.
- DCHECK(!AreAliased(r4, r5, LoadWithVectorDescriptor::VectorRegister(),
- LoadWithVectorDescriptor::SlotRegister()));
-
- NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r4,
- r5, &miss);
- __ bind(&miss);
- PropertyAccessCompiler::TailCallBuiltin(
- masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
-}
-
-
-void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
- // Return address is in lr.
- Label miss;
-
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register index = LoadDescriptor::NameRegister();
- Register scratch = r5;
- Register result = r0;
- DCHECK(!scratch.is(receiver) && !scratch.is(index));
- DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
- result.is(LoadWithVectorDescriptor::SlotRegister()));
-
- // StringCharAtGenerator doesn't use the result register until it's passed
- // the different miss possibilities. If it did, we would have a conflict
- // when FLAG_vector_ics is true.
- StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
- &miss, // When not a string.
- &miss, // When not a number.
- &miss, // When index out of range.
- RECEIVER_IS_STRING);
- char_at_generator.GenerateFast(masm);
- __ Ret();
-
- StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
-
- __ bind(&miss);
- PropertyAccessCompiler::TailCallBuiltin(
- masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
-}
-
-
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
@@ -1297,7 +1242,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// (6) External string. Make it, offset-wise, look like a sequential string.
// Go to (4).
// (7) Short external string or not a string? If yes, bail out to runtime.
- // (8) Sliced string. Replace subject with parent. Go to (1).
+ // (8) Sliced or thin string. Replace subject with parent. Go to (1).
Label seq_string /* 4 */, external_string /* 6 */, check_underlying /* 1 */,
not_seq_nor_cons /* 5 */, not_long_external /* 7 */;
@@ -1319,6 +1264,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// (2) Sequential or cons? If not, go to (5).
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+ STATIC_ASSERT(kThinStringTag > kExternalStringTag);
STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
__ cmp(r1, Operand(kExternalStringTag));
@@ -1346,10 +1292,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ b(ls, &runtime);
__ SmiUntag(r1);
- STATIC_ASSERT(4 == kOneByteStringTag);
+ STATIC_ASSERT(8 == kOneByteStringTag);
STATIC_ASSERT(kTwoByteStringTag == 0);
__ and_(r0, r0, Operand(kStringEncodingMask));
- __ mov(r3, Operand(r0, ASR, 2), SetCC);
+ __ mov(r3, Operand(r0, ASR, 3), SetCC);
__ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset),
ne);
__ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
@@ -1583,12 +1529,19 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask));
__ b(ne, &runtime);
- // (8) Sliced string. Replace subject with parent. Go to (4).
+ // (8) Sliced or thin string. Replace subject with parent. Go to (4).
+ Label thin_string;
+ __ cmp(r1, Operand(kThinStringTag));
+ __ b(eq, &thin_string);
// Load offset into r9 and replace subject string with parent.
__ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset));
__ SmiUntag(r9);
__ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
__ jmp(&check_underlying); // Go to (4).
+
+ __ bind(&thin_string);
+ __ ldr(subject, FieldMemOperand(subject, ThinString::kActualOffset));
+ __ jmp(&check_underlying); // Go to (4).
#endif // V8_INTERPRETED_REGEXP
}
@@ -1750,192 +1703,6 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
-// Note: feedback_vector and slot are clobbered after the call.
-static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
- Register slot) {
- __ add(feedback_vector, feedback_vector,
- Operand::PointerOffsetFromSmiKey(slot));
- __ add(feedback_vector, feedback_vector,
- Operand(FixedArray::kHeaderSize + kPointerSize));
- __ ldr(slot, FieldMemOperand(feedback_vector, 0));
- __ add(slot, slot, Operand(Smi::FromInt(1)));
- __ str(slot, FieldMemOperand(feedback_vector, 0));
-}
-
-void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
- // r0 - number of arguments
- // r1 - function
- // r3 - slot id
- // r2 - vector
- // r4 - allocation site (loaded from vector[slot])
- __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r5);
- __ cmp(r1, r5);
- __ b(ne, miss);
-
- // Increment the call count for monomorphic function calls.
- IncrementCallCount(masm, r2, r3);
-
- __ mov(r2, r4);
- __ mov(r3, r1);
- ArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
-}
-
-
-void CallICStub::Generate(MacroAssembler* masm) {
- // r0 - number of arguments
- // r1 - function
- // r3 - slot id (Smi)
- // r2 - vector
- Label extra_checks_or_miss, call, call_function, call_count_incremented;
-
- // The checks. First, does r1 match the recorded monomorphic target?
- __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
- __ ldr(r4, FieldMemOperand(r4, FixedArray::kHeaderSize));
-
- // We don't know that we have a weak cell. We might have a private symbol
- // or an AllocationSite, but the memory is safe to examine.
- // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
- // FixedArray.
- // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
- // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
- // computed, meaning that it can't appear to be a pointer. If the low bit is
- // 0, then hash is computed, but the 0 bit prevents the field from appearing
- // to be a pointer.
- STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
- STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
- WeakCell::kValueOffset &&
- WeakCell::kValueOffset == Symbol::kHashFieldSlot);
-
- __ ldr(r5, FieldMemOperand(r4, WeakCell::kValueOffset));
- __ cmp(r1, r5);
- __ b(ne, &extra_checks_or_miss);
-
- // The compare above could have been a SMI/SMI comparison. Guard against this
- // convincing us that we have a monomorphic JSFunction.
- __ JumpIfSmi(r1, &extra_checks_or_miss);
-
- __ bind(&call_function);
-
- // Increment the call count for monomorphic function calls.
- IncrementCallCount(masm, r2, r3);
-
- __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
- tail_call_mode()),
- RelocInfo::CODE_TARGET);
-
- __ bind(&extra_checks_or_miss);
- Label uninitialized, miss, not_allocation_site;
-
- __ CompareRoot(r4, Heap::kmegamorphic_symbolRootIndex);
- __ b(eq, &call);
-
- // Verify that r4 contains an AllocationSite
- __ ldr(r5, FieldMemOperand(r4, HeapObject::kMapOffset));
- __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
- __ b(ne, &not_allocation_site);
-
- // We have an allocation site.
- HandleArrayCase(masm, &miss);
-
- __ bind(&not_allocation_site);
-
- // The following cases attempt to handle MISS cases without going to the
- // runtime.
- if (FLAG_trace_ic) {
- __ jmp(&miss);
- }
-
- __ CompareRoot(r4, Heap::kuninitialized_symbolRootIndex);
- __ b(eq, &uninitialized);
-
- // We are going megamorphic. If the feedback is a JSFunction, it is fine
- // to handle it here. More complex cases are dealt with in the runtime.
- __ AssertNotSmi(r4);
- __ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE);
- __ b(ne, &miss);
- __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
- __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
- __ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
-
- __ bind(&call);
-
- // Increment the call count for megamorphic function calls.
- IncrementCallCount(masm, r2, r3);
-
- __ bind(&call_count_incremented);
- __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
- RelocInfo::CODE_TARGET);
-
- __ bind(&uninitialized);
-
- // We are going monomorphic, provided we actually have a JSFunction.
- __ JumpIfSmi(r1, &miss);
-
- // Goto miss case if we do not have a function.
- __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
- __ b(ne, &miss);
-
- // Make sure the function is not the Array() function, which requires special
- // behavior on MISS.
- __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r4);
- __ cmp(r1, r4);
- __ b(eq, &miss);
-
- // Make sure the function belongs to the same native context.
- __ ldr(r4, FieldMemOperand(r1, JSFunction::kContextOffset));
- __ ldr(r4, ContextMemOperand(r4, Context::NATIVE_CONTEXT_INDEX));
- __ ldr(ip, NativeContextMemOperand());
- __ cmp(r4, ip);
- __ b(ne, &miss);
-
- // Store the function. Use a stub since we need a frame for allocation.
- // r2 - vector
- // r3 - slot
- // r1 - function
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- CreateWeakCellStub create_stub(masm->isolate());
- __ SmiTag(r0);
- __ Push(r0, r2, r3, cp, r1);
- __ CallStub(&create_stub);
- __ Pop(r2, r3, cp, r1);
- __ Pop(r0);
- __ SmiUntag(r0);
- }
-
- __ jmp(&call_function);
-
- // We are here because tracing is on or we encountered a MISS case we can't
- // handle here.
- __ bind(&miss);
- GenerateMiss(masm);
-
- __ jmp(&call_count_incremented);
-}
-
-
-void CallICStub::GenerateMiss(MacroAssembler* masm) {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve the number of arguments as Smi.
- __ SmiTag(r0);
-
- // Push the receiver and the function and feedback info.
- __ Push(r0, r1, r2, r3);
-
- // Call the entry.
- __ CallRuntime(Runtime::kCallIC_Miss);
-
- // Move result to edi and exit the internal frame.
- __ mov(r1, r0);
-
- // Restore number of arguments.
- __ Pop(r0);
- __ SmiUntag(r0);
-}
-
-
// StringCharCodeAtGenerator
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
// If the receiver is a smi trigger the non-string case.
@@ -2027,45 +1794,6 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
}
-
-// -------------------------------------------------------------------------
-// StringCharFromCodeGenerator
-
-void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
- // Fast case of Heap::LookupSingleCharacterStringFromCode.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiShiftSize == 0);
- DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU + 1));
- __ tst(code_, Operand(kSmiTagMask |
- ((~String::kMaxOneByteCharCodeU) << kSmiTagSize)));
- __ b(ne, &slow_case_);
-
- __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
- // At this point code register contains smi tagged one-byte char code.
- __ add(result_, result_, Operand::PointerOffsetFromSmiKey(code_));
- __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
- __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
- __ b(eq, &slow_case_);
- __ bind(&exit_);
-}
-
-
-void StringCharFromCodeGenerator::GenerateSlow(
- MacroAssembler* masm,
- const RuntimeCallHelper& call_helper) {
- __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
-
- __ bind(&slow_case_);
- call_helper.BeforeCall(masm);
- __ push(code_);
- __ CallRuntime(Runtime::kStringCharFromCode);
- __ Move(result_, r0);
- call_helper.AfterCall(masm);
- __ jmp(&exit_);
-
- __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
-}
-
void StringHelper::GenerateFlatOneByteStringEquals(
MacroAssembler* masm, Register left, Register right, Register scratch1,
Register scratch2, Register scratch3) {
@@ -2924,15 +2652,10 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
__ Ret();
}
-void CallICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadFeedbackVector(r2);
- CallICStub stub(isolate(), state());
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
-}
-
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
+ masm->MaybeCheckConstPool();
PredictableCodeSizeScope predictable(masm);
predictable.ExpectSize(masm->CallStubSize(&stub) +
2 * Assembler::kInstrSize);
@@ -3288,495 +3011,6 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
GenerateCase(masm, FAST_ELEMENTS);
}
-
-void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r1 : function
- // -- cp : context
- // -- fp : frame pointer
- // -- lr : return address
- // -----------------------------------
- __ AssertFunction(r1);
-
- // Make r2 point to the JavaScript frame.
- __ mov(r2, fp);
- if (skip_stub_frame()) {
- // For Ignition we need to skip the handler/stub frame to reach the
- // JavaScript frame for the function.
- __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
- }
- if (FLAG_debug_code) {
- Label ok;
- __ ldr(ip, MemOperand(r2, StandardFrameConstants::kFunctionOffset));
- __ cmp(ip, r1);
- __ b(eq, &ok);
- __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
- __ bind(&ok);
- }
-
- // Check if we have rest parameters (only possible if we have an
- // arguments adaptor frame below the function frame).
- Label no_rest_parameters;
- __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
- __ ldr(ip, MemOperand(r2, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ cmp(ip, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(ne, &no_rest_parameters);
-
- // Check if the arguments adaptor frame contains more arguments than
- // specified by the function's internal formal parameter count.
- Label rest_parameters;
- __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r3,
- FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
- __ sub(r0, r0, r3, SetCC);
- __ b(gt, &rest_parameters);
-
- // Return an empty rest parameter array.
- __ bind(&no_rest_parameters);
- {
- // ----------- S t a t e -------------
- // -- cp : context
- // -- lr : return address
- // -----------------------------------
-
- // Allocate an empty rest parameter array.
- Label allocate, done_allocate;
- __ Allocate(JSArray::kSize, r0, r1, r2, &allocate, NO_ALLOCATION_FLAGS);
- __ bind(&done_allocate);
-
- // Setup the rest parameter array in r0.
- __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r1);
- __ str(r1, FieldMemOperand(r0, JSArray::kMapOffset));
- __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
- __ str(r1, FieldMemOperand(r0, JSArray::kPropertiesOffset));
- __ str(r1, FieldMemOperand(r0, JSArray::kElementsOffset));
- __ mov(r1, Operand(0));
- __ str(r1, FieldMemOperand(r0, JSArray::kLengthOffset));
- STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
- __ Ret();
-
- // Fall back to %AllocateInNewSpace.
- __ bind(&allocate);
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(Smi::FromInt(JSArray::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- }
- __ jmp(&done_allocate);
- }
-
- __ bind(&rest_parameters);
- {
- // Compute the pointer to the first rest parameter (skippping the receiver).
- __ add(r2, r2, Operand(r0, LSL, kPointerSizeLog2 - 1));
- __ add(r2, r2,
- Operand(StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
-
- // ----------- S t a t e -------------
- // -- cp : context
- // -- r0 : number of rest parameters (tagged)
- // -- r1 : function
- // -- r2 : pointer to first rest parameters
- // -- lr : return address
- // -----------------------------------
-
- // Allocate space for the rest parameter array plus the backing store.
- Label allocate, done_allocate;
- __ mov(r6, Operand(JSArray::kSize + FixedArray::kHeaderSize));
- __ add(r6, r6, Operand(r0, LSL, kPointerSizeLog2 - 1));
- __ Allocate(r6, r3, r4, r5, &allocate, NO_ALLOCATION_FLAGS);
- __ bind(&done_allocate);
-
- // Setup the elements array in r3.
- __ LoadRoot(r1, Heap::kFixedArrayMapRootIndex);
- __ str(r1, FieldMemOperand(r3, FixedArray::kMapOffset));
- __ str(r0, FieldMemOperand(r3, FixedArray::kLengthOffset));
- __ add(r4, r3, Operand(FixedArray::kHeaderSize));
- {
- Label loop, done_loop;
- __ add(r1, r4, Operand(r0, LSL, kPointerSizeLog2 - 1));
- __ bind(&loop);
- __ cmp(r4, r1);
- __ b(eq, &done_loop);
- __ ldr(ip, MemOperand(r2, 1 * kPointerSize, NegPostIndex));
- __ str(ip, FieldMemOperand(r4, 0 * kPointerSize));
- __ add(r4, r4, Operand(1 * kPointerSize));
- __ b(&loop);
- __ bind(&done_loop);
- }
-
- // Setup the rest parameter array in r4.
- __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r1);
- __ str(r1, FieldMemOperand(r4, JSArray::kMapOffset));
- __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
- __ str(r1, FieldMemOperand(r4, JSArray::kPropertiesOffset));
- __ str(r3, FieldMemOperand(r4, JSArray::kElementsOffset));
- __ str(r0, FieldMemOperand(r4, JSArray::kLengthOffset));
- STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
- __ mov(r0, r4);
- __ Ret();
-
- // Fall back to %AllocateInNewSpace (if not too big).
- Label too_big_for_new_space;
- __ bind(&allocate);
- __ cmp(r6, Operand(kMaxRegularHeapObjectSize));
- __ b(gt, &too_big_for_new_space);
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(r6);
- __ Push(r0, r2, r6);
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- __ mov(r3, r0);
- __ Pop(r0, r2);
- }
- __ jmp(&done_allocate);
-
- // Fall back to %NewRestParameter.
- __ bind(&too_big_for_new_space);
- __ push(r1);
- __ TailCallRuntime(Runtime::kNewRestParameter);
- }
-}
-
-
-void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r1 : function
- // -- cp : context
- // -- fp : frame pointer
- // -- lr : return address
- // -----------------------------------
- __ AssertFunction(r1);
-
- // Make r9 point to the JavaScript frame.
- __ mov(r9, fp);
- if (skip_stub_frame()) {
- // For Ignition we need to skip the handler/stub frame to reach the
- // JavaScript frame for the function.
- __ ldr(r9, MemOperand(r9, StandardFrameConstants::kCallerFPOffset));
- }
- if (FLAG_debug_code) {
- Label ok;
- __ ldr(ip, MemOperand(r9, StandardFrameConstants::kFunctionOffset));
- __ cmp(ip, r1);
- __ b(eq, &ok);
- __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
- __ bind(&ok);
- }
-
- // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r2,
- FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
- __ add(r3, r9, Operand(r2, LSL, kPointerSizeLog2 - 1));
- __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // r1 : function
- // r2 : number of parameters (tagged)
- // r3 : parameters pointer
- // r9 : JavaScript frame pointer
- // Registers used over whole function:
- // r5 : arguments count (tagged)
- // r6 : mapped parameter count (tagged)
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ ldr(r4, MemOperand(r9, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r0, MemOperand(r4, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ cmp(r0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(eq, &adaptor_frame);
-
- // No adaptor, parameter count = argument count.
- __ mov(r5, r2);
- __ mov(r6, r2);
- __ b(&try_allocate);
-
- // We have an adaptor frame. Patch the parameters pointer.
- __ bind(&adaptor_frame);
- __ ldr(r5, MemOperand(r4, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ add(r4, r4, Operand(r5, LSL, 1));
- __ add(r3, r4, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // r5 = argument count (tagged)
- // r6 = parameter count (tagged)
- // Compute the mapped parameter count = min(r6, r5) in r6.
- __ mov(r6, r2);
- __ cmp(r6, Operand(r5));
- __ mov(r6, Operand(r5), LeaveCC, gt);
-
- __ bind(&try_allocate);
-
- // Compute the sizes of backing store, parameter map, and arguments object.
- // 1. Parameter map, has 2 extra words containing context and backing store.
- const int kParameterMapHeaderSize =
- FixedArray::kHeaderSize + 2 * kPointerSize;
- // If there are no mapped parameters, we do not need the parameter_map.
- __ cmp(r6, Operand(Smi::kZero));
- __ mov(r9, Operand::Zero(), LeaveCC, eq);
- __ mov(r9, Operand(r6, LSL, 1), LeaveCC, ne);
- __ add(r9, r9, Operand(kParameterMapHeaderSize), LeaveCC, ne);
-
- // 2. Backing store.
- __ add(r9, r9, Operand(r5, LSL, 1));
- __ add(r9, r9, Operand(FixedArray::kHeaderSize));
-
- // 3. Arguments object.
- __ add(r9, r9, Operand(JSSloppyArgumentsObject::kSize));
-
- // Do the allocation of all three objects in one go.
- __ Allocate(r9, r0, r9, r4, &runtime, NO_ALLOCATION_FLAGS);
-
- // r0 = address of new object(s) (tagged)
- // r2 = argument count (smi-tagged)
- // Get the arguments boilerplate from the current native context into r4.
- const int kNormalOffset =
- Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
- const int kAliasedOffset =
- Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
-
- __ ldr(r4, NativeContextMemOperand());
- __ cmp(r6, Operand::Zero());
- __ ldr(r4, MemOperand(r4, kNormalOffset), eq);
- __ ldr(r4, MemOperand(r4, kAliasedOffset), ne);
-
- // r0 = address of new object (tagged)
- // r2 = argument count (smi-tagged)
- // r4 = address of arguments map (tagged)
- // r6 = mapped parameter count (tagged)
- __ str(r4, FieldMemOperand(r0, JSObject::kMapOffset));
- __ LoadRoot(r9, Heap::kEmptyFixedArrayRootIndex);
- __ str(r9, FieldMemOperand(r0, JSObject::kPropertiesOffset));
- __ str(r9, FieldMemOperand(r0, JSObject::kElementsOffset));
-
- // Set up the callee in-object property.
- __ AssertNotSmi(r1);
- __ str(r1, FieldMemOperand(r0, JSSloppyArgumentsObject::kCalleeOffset));
-
- // Use the length (smi tagged) and set that as an in-object property too.
- __ AssertSmi(r5);
- __ str(r5, FieldMemOperand(r0, JSSloppyArgumentsObject::kLengthOffset));
-
- // Set up the elements pointer in the allocated arguments object.
- // If we allocated a parameter map, r4 will point there, otherwise
- // it will point to the backing store.
- __ add(r4, r0, Operand(JSSloppyArgumentsObject::kSize));
- __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
-
- // r0 = address of new object (tagged)
- // r2 = argument count (tagged)
- // r4 = address of parameter map or backing store (tagged)
- // r6 = mapped parameter count (tagged)
- // Initialize parameter map. If there are no mapped arguments, we're done.
- Label skip_parameter_map;
- __ cmp(r6, Operand(Smi::kZero));
- // Move backing store address to r1, because it is
- // expected there when filling in the unmapped arguments.
- __ mov(r1, r4, LeaveCC, eq);
- __ b(eq, &skip_parameter_map);
-
- __ LoadRoot(r5, Heap::kSloppyArgumentsElementsMapRootIndex);
- __ str(r5, FieldMemOperand(r4, FixedArray::kMapOffset));
- __ add(r5, r6, Operand(Smi::FromInt(2)));
- __ str(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
- __ str(cp, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize));
- __ add(r5, r4, Operand(r6, LSL, 1));
- __ add(r5, r5, Operand(kParameterMapHeaderSize));
- __ str(r5, FieldMemOperand(r4, FixedArray::kHeaderSize + 1 * kPointerSize));
-
- // Copy the parameter slots and the holes in the arguments.
- // We need to fill in mapped_parameter_count slots. They index the context,
- // where parameters are stored in reverse order, at
- // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
- // The mapped parameter thus need to get indices
- // MIN_CONTEXT_SLOTS+parameter_count-1 ..
- // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
- // We loop from right to left.
- Label parameters_loop, parameters_test;
- __ mov(r5, r6);
- __ add(r9, r2, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
- __ sub(r9, r9, Operand(r6));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ add(r1, r4, Operand(r5, LSL, 1));
- __ add(r1, r1, Operand(kParameterMapHeaderSize));
-
- // r1 = address of backing store (tagged)
- // r4 = address of parameter map (tagged), which is also the address of new
- // object + Heap::kSloppyArgumentsObjectSize (tagged)
- // r0 = temporary scratch (a.o., for address calculation)
- // r5 = loop variable (tagged)
- // ip = the hole value
- __ jmp(&parameters_test);
-
- __ bind(&parameters_loop);
- __ sub(r5, r5, Operand(Smi::FromInt(1)));
- __ mov(r0, Operand(r5, LSL, 1));
- __ add(r0, r0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
- __ str(r9, MemOperand(r4, r0));
- __ sub(r0, r0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
- __ str(ip, MemOperand(r1, r0));
- __ add(r9, r9, Operand(Smi::FromInt(1)));
- __ bind(&parameters_test);
- __ cmp(r5, Operand(Smi::kZero));
- __ b(ne, &parameters_loop);
-
- // Restore r0 = new object (tagged) and r5 = argument count (tagged).
- __ sub(r0, r4, Operand(JSSloppyArgumentsObject::kSize));
- __ ldr(r5, FieldMemOperand(r0, JSSloppyArgumentsObject::kLengthOffset));
-
- __ bind(&skip_parameter_map);
- // r0 = address of new object (tagged)
- // r1 = address of backing store (tagged)
- // r5 = argument count (tagged)
- // r6 = mapped parameter count (tagged)
- // r9 = scratch
- // Copy arguments header and remaining slots (if there are any).
- __ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
- __ str(r9, FieldMemOperand(r1, FixedArray::kMapOffset));
- __ str(r5, FieldMemOperand(r1, FixedArray::kLengthOffset));
-
- Label arguments_loop, arguments_test;
- __ sub(r3, r3, Operand(r6, LSL, 1));
- __ jmp(&arguments_test);
-
- __ bind(&arguments_loop);
- __ sub(r3, r3, Operand(kPointerSize));
- __ ldr(r4, MemOperand(r3, 0));
- __ add(r9, r1, Operand(r6, LSL, 1));
- __ str(r4, FieldMemOperand(r9, FixedArray::kHeaderSize));
- __ add(r6, r6, Operand(Smi::FromInt(1)));
-
- __ bind(&arguments_test);
- __ cmp(r6, Operand(r5));
- __ b(lt, &arguments_loop);
-
- // Return.
- __ Ret();
-
- // Do the runtime call to allocate the arguments object.
- // r0 = address of new object (tagged)
- // r5 = argument count (tagged)
- __ bind(&runtime);
- __ Push(r1, r3, r5);
- __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
-void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r1 : function
- // -- cp : context
- // -- fp : frame pointer
- // -- lr : return address
- // -----------------------------------
- __ AssertFunction(r1);
-
- // Make r2 point to the JavaScript frame.
- __ mov(r2, fp);
- if (skip_stub_frame()) {
- // For Ignition we need to skip the handler/stub frame to reach the
- // JavaScript frame for the function.
- __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
- }
- if (FLAG_debug_code) {
- Label ok;
- __ ldr(ip, MemOperand(r2, StandardFrameConstants::kFunctionOffset));
- __ cmp(ip, r1);
- __ b(eq, &ok);
- __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
- __ bind(&ok);
- }
-
- // Check if we have an arguments adaptor frame below the function frame.
- Label arguments_adaptor, arguments_done;
- __ ldr(r3, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
- __ ldr(ip, MemOperand(r3, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ cmp(ip, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(eq, &arguments_adaptor);
- {
- __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r0, FieldMemOperand(
- r4, SharedFunctionInfo::kFormalParameterCountOffset));
- __ add(r2, r2, Operand(r0, LSL, kPointerSizeLog2 - 1));
- __ add(r2, r2,
- Operand(StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
- }
- __ b(&arguments_done);
- __ bind(&arguments_adaptor);
- {
- __ ldr(r0, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ add(r2, r3, Operand(r0, LSL, kPointerSizeLog2 - 1));
- __ add(r2, r2,
- Operand(StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
- }
- __ bind(&arguments_done);
-
- // ----------- S t a t e -------------
- // -- cp : context
- // -- r0 : number of rest parameters (tagged)
- // -- r1 : function
- // -- r2 : pointer to first rest parameters
- // -- lr : return address
- // -----------------------------------
-
- // Allocate space for the strict arguments object plus the backing store.
- Label allocate, done_allocate;
- __ mov(r6, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
- __ add(r6, r6, Operand(r0, LSL, kPointerSizeLog2 - 1));
- __ Allocate(r6, r3, r4, r5, &allocate, NO_ALLOCATION_FLAGS);
- __ bind(&done_allocate);
-
- // Setup the elements array in r3.
- __ LoadRoot(r1, Heap::kFixedArrayMapRootIndex);
- __ str(r1, FieldMemOperand(r3, FixedArray::kMapOffset));
- __ str(r0, FieldMemOperand(r3, FixedArray::kLengthOffset));
- __ add(r4, r3, Operand(FixedArray::kHeaderSize));
- {
- Label loop, done_loop;
- __ add(r1, r4, Operand(r0, LSL, kPointerSizeLog2 - 1));
- __ bind(&loop);
- __ cmp(r4, r1);
- __ b(eq, &done_loop);
- __ ldr(ip, MemOperand(r2, 1 * kPointerSize, NegPostIndex));
- __ str(ip, FieldMemOperand(r4, 0 * kPointerSize));
- __ add(r4, r4, Operand(1 * kPointerSize));
- __ b(&loop);
- __ bind(&done_loop);
- }
-
- // Setup the strict arguments object in r4.
- __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, r1);
- __ str(r1, FieldMemOperand(r4, JSStrictArgumentsObject::kMapOffset));
- __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
- __ str(r1, FieldMemOperand(r4, JSStrictArgumentsObject::kPropertiesOffset));
- __ str(r3, FieldMemOperand(r4, JSStrictArgumentsObject::kElementsOffset));
- __ str(r0, FieldMemOperand(r4, JSStrictArgumentsObject::kLengthOffset));
- STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
- __ mov(r0, r4);
- __ Ret();
-
- // Fall back to %AllocateInNewSpace (if not too big).
- Label too_big_for_new_space;
- __ bind(&allocate);
- __ cmp(r6, Operand(kMaxRegularHeapObjectSize));
- __ b(gt, &too_big_for_new_space);
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(r6);
- __ Push(r0, r2, r6);
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- __ mov(r3, r0);
- __ Pop(r0, r2);
- }
- __ b(&done_allocate);
-
- // Fall back to %NewStrictArguments.
- __ bind(&too_big_for_new_space);
- __ push(r1);
- __ TailCallRuntime(Runtime::kNewStrictArguments);
-}
-
-
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
return ref0.address() - ref1.address();
}
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 06e92168b6..934875220c 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -322,6 +322,9 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
Register index,
Register result,
Label* call_runtime) {
+ Label indirect_string_loaded;
+ __ bind(&indirect_string_loaded);
+
// Fetch the instance type of the receiver into result register.
__ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
__ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
@@ -332,17 +335,24 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ b(eq, &check_sequential);
// Dispatch on the indirect string shape: slice or cons.
- Label cons_string;
- __ tst(result, Operand(kSlicedNotConsMask));
+ Label cons_string, thin_string;
+ __ and_(result, result, Operand(kStringRepresentationMask));
+ __ cmp(result, Operand(kConsStringTag));
__ b(eq, &cons_string);
+ __ cmp(result, Operand(kThinStringTag));
+ __ b(eq, &thin_string);
// Handle slices.
- Label indirect_string_loaded;
__ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
__ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
__ add(index, index, Operand::SmiUntag(result));
__ jmp(&indirect_string_loaded);
+ // Handle thin strings.
+ __ bind(&thin_string);
+ __ ldr(string, FieldMemOperand(string, ThinString::kActualOffset));
+ __ jmp(&indirect_string_loaded);
+
// Handle cons strings.
// Check whether the right hand side is the empty string (i.e. if
// this is really a flat string in a cons string). If that is not
@@ -354,10 +364,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ b(ne, call_runtime);
// Get the first of the two strings and load its instance type.
__ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
-
- __ bind(&indirect_string_loaded);
- __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
- __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+ __ jmp(&indirect_string_loaded);
// Distinguish sequential and external strings. Only these two string
// representations can reach here (slices and flat cons strings have been
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index e0c91fd4bf..0b86f3e149 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -327,16 +327,18 @@ enum LFlag {
// NEON data type
enum NeonDataType {
- NeonS8 = 0x1, // U = 0, imm3 = 0b001
- NeonS16 = 0x2, // U = 0, imm3 = 0b010
- NeonS32 = 0x4, // U = 0, imm3 = 0b100
- NeonU8 = 1 << 24 | 0x1, // U = 1, imm3 = 0b001
- NeonU16 = 1 << 24 | 0x2, // U = 1, imm3 = 0b010
- NeonU32 = 1 << 24 | 0x4, // U = 1, imm3 = 0b100
- NeonDataTypeSizeMask = 0x7,
- NeonDataTypeUMask = 1 << 24
+ NeonS8 = 0,
+ NeonS16 = 1,
+ NeonS32 = 2,
+ // Gap to make it easier to extract U and size.
+ NeonU8 = 4,
+ NeonU16 = 5,
+ NeonU32 = 6
};
+inline int NeonU(NeonDataType dt) { return static_cast<int>(dt) >> 2; }
+inline int NeonSz(NeonDataType dt) { return static_cast<int>(dt) & 0x3; }
+
enum NeonListType {
nlt_1 = 0x7,
nlt_2 = 0xA,
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index 1231355dc0..e0e602eae1 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -95,7 +95,7 @@ void Deoptimizer::SetPlatformCompiledStubRegisters(
void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) {
- double double_value = input_->GetDoubleRegister(i);
+ Float64 double_value = input_->GetDoubleRegister(i);
output_frame->SetDoubleRegister(i, double_value);
}
}
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index db32fc98ce..041df55858 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -1856,104 +1856,150 @@ static const char* const barrier_option_names[] = {
void Decoder::DecodeSpecialCondition(Instruction* instr) {
switch (instr->SpecialValue()) {
- case 4:
- if (instr->Bits(11, 8) == 1 && instr->Bits(21, 20) == 2 &&
- instr->Bit(6) == 1 && instr->Bit(4) == 1) {
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kSimd128Precision);
- if (Vm == Vn) {
- // vmov Qd, Qm
+ case 4: {
+ int Vd, Vm, Vn;
+ if (instr->Bit(6) == 0) {
+ Vd = instr->VFPDRegValue(kDoublePrecision);
+ Vm = instr->VFPMRegValue(kDoublePrecision);
+ Vn = instr->VFPNRegValue(kDoublePrecision);
+ } else {
+ Vd = instr->VFPDRegValue(kSimd128Precision);
+ Vm = instr->VFPMRegValue(kSimd128Precision);
+ Vn = instr->VFPNRegValue(kSimd128Precision);
+ }
+ switch (instr->Bits(11, 8)) {
+ case 0x0: {
+ if (instr->Bit(4) == 1) {
+ int size = kBitsPerByte * (1 << instr->Bits(21, 20));
+ // vqadd.s<size> Qd, Qm, Qn.
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vqadd.s%d q%d, q%d, q%d", size, Vd, Vn, Vm);
+ } else {
+ Unknown(instr);
+ }
+ break;
+ }
+ case 0x1: {
+ if (instr->Bits(21, 20) == 2 && instr->Bit(6) == 1 &&
+ instr->Bit(4) == 1) {
+ if (Vm == Vn) {
+ // vmov Qd, Qm
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vmov q%d, q%d", Vd, Vm);
+ } else {
+ // vorr Qd, Qm, Qn.
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vorr q%d, q%d, q%d", Vd, Vn, Vm);
+ }
+ } else if (instr->Bits(21, 20) == 0 && instr->Bit(6) == 1 &&
+ instr->Bit(4) == 1) {
+ // vand Qd, Qm, Qn.
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vand q%d, q%d, q%d", Vd, Vn, Vm);
+ } else {
+ Unknown(instr);
+ }
+ break;
+ }
+ case 0x2: {
+ if (instr->Bit(4) == 1) {
+ int size = kBitsPerByte * (1 << instr->Bits(21, 20));
+ // vqsub.s<size> Qd, Qm, Qn.
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vqsub.s%d q%d, q%d, q%d", size, Vd, Vn, Vm);
+ } else {
+ Unknown(instr);
+ }
+ break;
+ }
+ case 0x3: {
+ int size = kBitsPerByte * (1 << instr->Bits(21, 20));
+ const char* op = (instr->Bit(4) == 1) ? "vcge" : "vcgt";
+ // vcge/vcgt.s<size> Qd, Qm, Qn.
out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "vmov q%d, q%d", Vd, Vm);
- } else {
- // vorr Qd, Qm, Qn.
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vorr q%d, q%d, q%d", Vd, Vn, Vm);
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%s.s%d q%d, q%d, q%d",
+ op, size, Vd, Vn, Vm);
+ break;
}
- } else if (instr->Bits(11, 8) == 8) {
- const char* op = (instr->Bit(4) == 0) ? "vadd" : "vtst";
- int size = kBitsPerByte * (1 << instr->Bits(21, 20));
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kSimd128Precision);
- // vadd/vtst.i<size> Qd, Qm, Qn.
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "%s.i%d q%d, q%d, q%d", op,
- size, Vd, Vn, Vm);
- } else if (instr->Bits(11, 8) == 0xd && instr->Bit(4) == 0) {
- const char* op = (instr->Bits(21, 20) == 0) ? "vadd" : "vsub";
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kSimd128Precision);
- // vadd/vsub.f32 Qd, Qm, Qn.
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "%s.f32 q%d, q%d, q%d", op, Vd, Vn, Vm);
- } else if (instr->Bits(11, 8) == 0x9 && instr->Bit(6) == 1 &&
- instr->Bit(4) == 1) {
- int size = kBitsPerByte * (1 << instr->Bits(21, 20));
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kSimd128Precision);
- // vmul.i<size> Qd, Qm, Qn.
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vmul.i%d q%d, q%d, q%d", size, Vd, Vn, Vm);
- } else if (instr->Bits(11, 8) == 0xe && instr->Bits(21, 20) == 0 &&
- instr->Bit(4) == 0) {
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kSimd128Precision);
- // vceq.f32 Qd, Qm, Qn.
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vceq.f32 q%d, q%d, q%d", Vd, Vn, Vm);
- } else if (instr->Bits(11, 8) == 1 && instr->Bits(21, 20) == 0 &&
- instr->Bit(6) == 1 && instr->Bit(4) == 1) {
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kSimd128Precision);
- // vand Qd, Qm, Qn.
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vand q%d, q%d, q%d", Vd, Vn, Vm);
- } else if (instr->Bits(11, 8) == 0x3) {
- int size = kBitsPerByte * (1 << instr->Bits(21, 20));
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kSimd128Precision);
- const char* op = (instr->Bit(4) == 1) ? "vcge" : "vcgt";
- // vcge/vcgt.s<size> Qd, Qm, Qn.
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "%s.s%d q%d, q%d, q%d", op,
- size, Vd, Vn, Vm);
- } else if (instr->Bits(11, 8) == 0xf && instr->Bit(20) == 0 &&
- instr->Bit(6) == 1) {
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kSimd128Precision);
- if (instr->Bit(4) == 1) {
- // vrecps/vrsqrts.f32 Qd, Qm, Qn.
- const char* op = instr->Bit(21) == 0 ? "vrecps" : "vrsqrts";
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "%s.f32 q%d, q%d, q%d", op, Vd, Vn, Vm);
- } else {
- // vmin/max.f32 Qd, Qm, Qn.
- const char* op = instr->Bit(21) == 1 ? "vmin" : "vmax";
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "%s.f32 q%d, q%d, q%d", op, Vd, Vn, Vm);
+ case 0x6: {
+ int size = kBitsPerByte * (1 << instr->Bits(21, 20));
+ // vmin/vmax.s<size> Qd, Qm, Qn.
+ const char* op = instr->Bit(4) == 1 ? "vmin" : "vmax";
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%s.s%d q%d, q%d, q%d",
+ op, size, Vd, Vn, Vm);
+ break;
}
- } else if (instr->Bits(11, 8) == 0x6) {
- int size = kBitsPerByte * (1 << instr->Bits(21, 20));
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kSimd128Precision);
- // vmin/vmax.s<size> Qd, Qm, Qn.
- const char* op = instr->Bit(4) == 1 ? "vmin" : "vmax";
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "%s.s%d q%d, q%d, q%d", op,
- size, Vd, Vn, Vm);
- } else {
- Unknown(instr);
+ case 0x8: {
+ const char* op = (instr->Bit(4) == 0) ? "vadd" : "vtst";
+ int size = kBitsPerByte * (1 << instr->Bits(21, 20));
+ // vadd/vtst.i<size> Qd, Qm, Qn.
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%s.i%d q%d, q%d, q%d",
+ op, size, Vd, Vn, Vm);
+ break;
+ }
+ case 0x9: {
+ if (instr->Bit(6) == 1 && instr->Bit(4) == 1) {
+ int size = kBitsPerByte * (1 << instr->Bits(21, 20));
+ // vmul.i<size> Qd, Qm, Qn.
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vmul.i%d q%d, q%d, q%d", size, Vd, Vn, Vm);
+ } else {
+ Unknown(instr);
+ }
+ break;
+ }
+ case 0xd: {
+ if (instr->Bit(4) == 0) {
+ const char* op = (instr->Bits(21, 20) == 0) ? "vadd" : "vsub";
+ // vadd/vsub.f32 Qd, Qm, Qn.
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%s.f32 q%d, q%d, q%d", op, Vd, Vn, Vm);
+ } else {
+ Unknown(instr);
+ }
+ break;
+ }
+ case 0xe: {
+ if (instr->Bits(21, 20) == 0 && instr->Bit(4) == 0) {
+ // vceq.f32 Qd, Qm, Qn.
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vceq.f32 q%d, q%d, q%d", Vd, Vn, Vm);
+ } else {
+ Unknown(instr);
+ }
+ break;
+ }
+ case 0xf: {
+ if (instr->Bit(20) == 0 && instr->Bit(6) == 1) {
+ if (instr->Bit(4) == 1) {
+ // vrecps/vrsqrts.f32 Qd, Qm, Qn.
+ const char* op = instr->Bit(21) == 0 ? "vrecps" : "vrsqrts";
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%s.f32 q%d, q%d, q%d", op, Vd, Vn, Vm);
+ } else {
+ // vmin/max.f32 Qd, Qm, Qn.
+ const char* op = instr->Bit(21) == 1 ? "vmin" : "vmax";
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%s.f32 q%d, q%d, q%d", op, Vd, Vn, Vm);
+ }
+ } else {
+ Unknown(instr);
+ }
+ break;
+ }
+ default:
+ Unknown(instr);
+ break;
}
break;
+ }
case 5:
if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
(instr->Bit(4) == 1)) {
@@ -1963,7 +2009,7 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
int Vm = (instr->Bit(5) << 4) | instr->VmValue();
int imm3 = instr->Bits(21, 19);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vmovl.s%d q%d, d%d", imm3*8, Vd, Vm);
+ "vmovl.s%d q%d, d%d", imm3 * 8, Vd, Vm);
} else if (instr->Bits(21, 20) == 3 && instr->Bit(4) == 0) {
// vext.8 Qd, Qm, Qn, imm4
int imm4 = instr->Bits(11, 8);
@@ -1973,91 +2019,142 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vext.8 q%d, q%d, q%d, #%d",
Vd, Vn, Vm, imm4);
- } else {
- Unknown(instr);
- }
- break;
- case 6:
- if (instr->Bits(11, 8) == 8) {
- int size = kBitsPerByte * (1 << instr->Bits(21, 20));
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kSimd128Precision);
- if (instr->Bit(4) == 0) {
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "vsub.i%d q%d, q%d, q%d",
- size, Vd, Vn, Vm);
- } else {
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "vceq.i%d q%d, q%d, q%d",
- size, Vd, Vn, Vm);
- }
- } else if (instr->Bits(11, 8) == 1 && instr->Bits(21, 20) == 1 &&
- instr->Bit(4) == 1) {
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kSimd128Precision);
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vbsl q%d, q%d, q%d", Vd, Vn, Vm);
- } else if (instr->Bits(11, 8) == 1 && instr->Bits(21, 20) == 0 &&
- instr->Bit(4) == 1) {
- if (instr->Bit(6) == 0) {
- // veor Dd, Dn, Dm
- int Vd = instr->VFPDRegValue(kDoublePrecision);
- int Vn = instr->VFPNRegValue(kDoublePrecision);
- int Vm = instr->VFPMRegValue(kDoublePrecision);
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "veor d%d, d%d, d%d", Vd, Vn, Vm);
-
- } else {
- // veor Qd, Qn, Qm
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "veor q%d, q%d, q%d", Vd, Vn, Vm);
- }
- } else if (instr->Bits(11, 8) == 0xd && instr->Bit(21) == 0 &&
- instr->Bit(6) == 1 && instr->Bit(4) == 1) {
- // vmul.f32 Qd, Qn, Qm
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vmul.f32 q%d, q%d, q%d", Vd, Vn, Vm);
- } else if (instr->Bits(11, 8) == 0xe && instr->Bit(20) == 0 &&
- instr->Bit(4) == 0) {
+ } else if (instr->Bits(11, 7) == 0xA && instr->Bit(4) == 1) {
+ // vshl.i<size> Qd, Qm, shift
+ int size = base::bits::RoundDownToPowerOfTwo32(instr->Bits(21, 16));
+ int shift = instr->Bits(21, 16) - size;
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kSimd128Precision);
- const char* op = (instr->Bit(21) == 0) ? "vcge" : "vcgt";
- // vcge/vcgt.f32 Qd, Qm, Qn.
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "%s.f32 q%d, q%d, q%d", op, Vd, Vn, Vm);
- } else if (instr->Bits(11, 8) == 0x3) {
- int size = kBitsPerByte * (1 << instr->Bits(21, 20));
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kSimd128Precision);
- const char* op = (instr->Bit(4) == 1) ? "vcge" : "vcgt";
- // vcge/vcgt.u<size> Qd, Qm, Qn.
out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "%s.u%d q%d, q%d, q%d", op,
- size, Vd, Vn, Vm);
- } else if (instr->Bits(11, 8) == 0x6) {
- int size = kBitsPerByte * (1 << instr->Bits(21, 20));
+ SNPrintF(out_buffer_ + out_buffer_pos_, "vshl.i%d q%d, q%d, #%d",
+ size, Vd, Vm, shift);
+ } else if (instr->Bits(11, 7) == 0 && instr->Bit(4) == 1) {
+ // vshr.s<size> Qd, Qm, shift
+ int size = base::bits::RoundDownToPowerOfTwo32(instr->Bits(21, 16));
+ int shift = 2 * size - instr->Bits(21, 16);
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kSimd128Precision);
- // vmin/vmax.u<size> Qd, Qm, Qn.
- const char* op = instr->Bit(4) == 1 ? "vmin" : "vmax";
out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "%s.u%d q%d, q%d, q%d", op,
- size, Vd, Vn, Vm);
+ SNPrintF(out_buffer_ + out_buffer_pos_, "vshr.s%d q%d, q%d, #%d",
+ size, Vd, Vm, shift);
} else {
Unknown(instr);
}
break;
+ case 6: {
+ int Vd, Vm, Vn;
+ if (instr->Bit(6) == 0) {
+ Vd = instr->VFPDRegValue(kDoublePrecision);
+ Vm = instr->VFPMRegValue(kDoublePrecision);
+ Vn = instr->VFPNRegValue(kDoublePrecision);
+ } else {
+ Vd = instr->VFPDRegValue(kSimd128Precision);
+ Vm = instr->VFPMRegValue(kSimd128Precision);
+ Vn = instr->VFPNRegValue(kSimd128Precision);
+ }
+ switch (instr->Bits(11, 8)) {
+ case 0x0: {
+ if (instr->Bit(4) == 1) {
+ int size = kBitsPerByte * (1 << instr->Bits(21, 20));
+ // vqadd.u<size> Qd, Qm, Qn.
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vqadd.u%d q%d, q%d, q%d", size, Vd, Vn, Vm);
+ } else {
+ Unknown(instr);
+ }
+ break;
+ }
+ case 0x1: {
+ if (instr->Bits(21, 20) == 1 && instr->Bit(4) == 1) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vbsl q%d, q%d, q%d", Vd, Vn, Vm);
+ } else if (instr->Bits(21, 20) == 0 && instr->Bit(4) == 1) {
+ if (instr->Bit(6) == 0) {
+ // veor Dd, Dn, Dm
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "veor d%d, d%d, d%d", Vd, Vn, Vm);
+
+ } else {
+ // veor Qd, Qn, Qm
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "veor q%d, q%d, q%d", Vd, Vn, Vm);
+ }
+ } else {
+ Unknown(instr);
+ }
+ break;
+ }
+ case 0x2: {
+ if (instr->Bit(4) == 1) {
+ int size = kBitsPerByte * (1 << instr->Bits(21, 20));
+ // vqsub.u<size> Qd, Qm, Qn.
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vqsub.u%d q%d, q%d, q%d", size, Vd, Vn, Vm);
+ } else {
+ Unknown(instr);
+ }
+ break;
+ }
+ case 0x3: {
+ int size = kBitsPerByte * (1 << instr->Bits(21, 20));
+ const char* op = (instr->Bit(4) == 1) ? "vcge" : "vcgt";
+ // vcge/vcgt.u<size> Qd, Qm, Qn.
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%s.u%d q%d, q%d, q%d",
+ op, size, Vd, Vn, Vm);
+ break;
+ }
+ case 0x6: {
+ int size = kBitsPerByte * (1 << instr->Bits(21, 20));
+ // vmin/vmax.u<size> Qd, Qm, Qn.
+ const char* op = instr->Bit(4) == 1 ? "vmin" : "vmax";
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%s.u%d q%d, q%d, q%d",
+ op, size, Vd, Vn, Vm);
+ break;
+ }
+ case 0x8: {
+ int size = kBitsPerByte * (1 << instr->Bits(21, 20));
+ if (instr->Bit(4) == 0) {
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vsub.i%d q%d, q%d, q%d", size, Vd, Vn, Vm);
+ } else {
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vceq.i%d q%d, q%d, q%d", size, Vd, Vn, Vm);
+ }
+ break;
+ }
+ case 0xd: {
+ if (instr->Bit(21) == 0 && instr->Bit(6) == 1 && instr->Bit(4) == 1) {
+ // vmul.f32 Qd, Qn, Qm
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vmul.f32 q%d, q%d, q%d", Vd, Vn, Vm);
+ } else {
+ Unknown(instr);
+ }
+ break;
+ }
+ case 0xe: {
+ if (instr->Bit(20) == 0 && instr->Bit(4) == 0) {
+ const char* op = (instr->Bit(21) == 0) ? "vcge" : "vcgt";
+ // vcge/vcgt.f32 Qd, Qm, Qn.
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%s.f32 q%d, q%d, q%d", op, Vd, Vn, Vm);
+ } else {
+ Unknown(instr);
+ }
+ break;
+ }
+ default:
+ Unknown(instr);
+ break;
+ }
+ break;
+ }
case 7:
if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
(instr->Bit(4) == 1)) {
@@ -2067,7 +2164,7 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
int Vm = (instr->Bit(5) << 4) | instr->VmValue();
int imm3 = instr->Bits(21, 19);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vmovl.u%d q%d, d%d", imm3*8, Vd, Vm);
+ "vmovl.u%d q%d, d%d", imm3 * 8, Vd, Vm);
} else if (instr->Opc1Value() == 7 && instr->Bits(21, 20) == 0x3 &&
instr->Bit(4) == 0) {
if (instr->Bits(17, 16) == 0x2 && instr->Bits(11, 7) == 0) {
@@ -2162,15 +2259,24 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
Unknown(instr);
}
} else if (instr->Bits(19, 18) == 0x2 && instr->Bits(11, 8) == 0x5) {
+ // vrecpe/vrsqrte.f32 Qd, Qm.
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
const char* op = instr->Bit(7) == 0 ? "vrecpe" : "vrsqrte";
- // vrecpe/vrsqrte.f32 Qd, Qm.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"%s.f32 q%d, q%d", op, Vd, Vm);
} else {
Unknown(instr);
}
+ } else if (instr->Bits(11, 7) == 0 && instr->Bit(4) == 1) {
+ // vshr.u<size> Qd, Qm, shift
+ int size = base::bits::RoundDownToPowerOfTwo32(instr->Bits(21, 16));
+ int shift = 2 * size - instr->Bits(21, 16);
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "vshr.u%d q%d, q%d, #%d",
+ size, Vd, Vm, shift);
} else {
Unknown(instr);
}
@@ -2184,8 +2290,8 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
int size = instr->Bits(7, 6);
int align = instr->Bits(5, 4);
int Rm = instr->VmValue();
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vst1.%d ", (1 << size) << 3);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "vst1.%d ",
+ (1 << size) << 3);
FormatNeonList(Vd, type);
Print(", ");
FormatNeonMemory(Rn, align, Rm);
@@ -2197,8 +2303,8 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
int size = instr->Bits(7, 6);
int align = instr->Bits(5, 4);
int Rm = instr->VmValue();
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vld1.%d ", (1 << size) << 3);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "vld1.%d ",
+ (1 << size) << 3);
FormatNeonList(Vd, type);
Print(", ");
FormatNeonMemory(Rn, align, Rm);
@@ -2212,8 +2318,8 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
int Rn = instr->Bits(19, 16);
int offset = instr->Bits(11, 0);
if (offset == 0) {
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "pld [r%d]", Rn);
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "pld [r%d]", Rn);
} else if (instr->Bit(23) == 0) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"pld [r%d, #-%d]", Rn, offset);
@@ -2225,16 +2331,16 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
int option = instr->Bits(3, 0);
switch (instr->Bits(7, 4)) {
case 4:
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "dsb %s", barrier_option_names[option]);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "dsb %s",
+ barrier_option_names[option]);
break;
case 5:
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "dmb %s", barrier_option_names[option]);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "dmb %s",
+ barrier_option_names[option]);
break;
case 6:
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "isb %s", barrier_option_names[option]);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "isb %s",
+ barrier_option_names[option]);
break;
default:
Unknown(instr);
diff --git a/deps/v8/src/arm/interface-descriptors-arm.cc b/deps/v8/src/arm/interface-descriptors-arm.cc
index 506c891038..8281f2a43b 100644
--- a/deps/v8/src/arm/interface-descriptors-arm.cc
+++ b/deps/v8/src/arm/interface-descriptors-arm.cc
@@ -70,27 +70,6 @@ void FastNewClosureDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void FastNewRestParameterDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r1};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r1};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r1};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return r0; }
@@ -142,15 +121,13 @@ void CallFunctionDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
+void CallICTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {r1, r3};
+ Register registers[] = {r1, r0, r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
+void CallICDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1, r0, r3, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
@@ -179,6 +156,13 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void CallForwardVarargsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r2 : start index (to support rest parameters)
+ // r1 : the target to call
+ Register registers[] = {r1, r2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void ConstructStubDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -213,13 +197,12 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(0, nullptr, nullptr);
}
-#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
- void Allocate##Type##Descriptor::InitializePlatformSpecific( \
- CallInterfaceDescriptorData* data) { \
- data->InitializePlatformSpecific(0, nullptr, nullptr); \
- }
-SIMD128_TYPES(SIMD128_ALLOC_DESC)
-#undef SIMD128_ALLOC_DESC
+void ArrayConstructorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite
+ Register registers[] = {r1, r3, r0, r2};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -430,6 +413,14 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ r1, // loaded new FP
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index b5bba88369..9d036607f7 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -88,11 +88,11 @@ int MacroAssembler::CallStubSize(
return CallSize(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
}
-
-void MacroAssembler::Call(Address target,
- RelocInfo::Mode rmode,
- Condition cond,
- TargetAddressStorageMode mode) {
+void MacroAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
+ TargetAddressStorageMode mode,
+ bool check_constant_pool) {
+ // Check if we have to emit the constant pool before we block it.
+ if (check_constant_pool) MaybeCheckConstPool();
// Block constant pool for the call instruction sequence.
BlockConstPoolScope block_const_pool(this);
Label start;
@@ -138,12 +138,10 @@ int MacroAssembler::CallSize(Handle<Code> code,
return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
}
-
-void MacroAssembler::Call(Handle<Code> code,
- RelocInfo::Mode rmode,
- TypeFeedbackId ast_id,
- Condition cond,
- TargetAddressStorageMode mode) {
+void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
+ TypeFeedbackId ast_id, Condition cond,
+ TargetAddressStorageMode mode,
+ bool check_constant_pool) {
Label start;
bind(&start);
DCHECK(RelocInfo::IsCodeTarget(rmode));
@@ -1146,12 +1144,11 @@ void MacroAssembler::VmovExtended(const MemOperand& dst, int src_code,
void MacroAssembler::ExtractLane(Register dst, QwNeonRegister src,
NeonDataType dt, int lane) {
- int bytes_per_lane = dt & NeonDataTypeSizeMask; // 1, 2, 4
- int log2_bytes_per_lane = bytes_per_lane / 2; // 0, 1, 2
- int byte = lane << log2_bytes_per_lane;
+ int size = NeonSz(dt); // 0, 1, 2
+ int byte = lane << size;
int double_word = byte >> kDoubleSizeLog2;
int double_byte = byte & (kDoubleSize - 1);
- int double_lane = double_byte >> log2_bytes_per_lane;
+ int double_lane = double_byte >> size;
DwVfpRegister double_source =
DwVfpRegister::from_code(src.code() * 2 + double_word);
vmov(dt, dst, double_source, double_lane);
@@ -1166,12 +1163,11 @@ void MacroAssembler::ExtractLane(SwVfpRegister dst, QwNeonRegister src,
void MacroAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
Register src_lane, NeonDataType dt, int lane) {
Move(dst, src);
- int bytes_per_lane = dt & NeonDataTypeSizeMask; // 1, 2, 4
- int log2_bytes_per_lane = bytes_per_lane / 2; // 0, 1, 2
- int byte = lane << log2_bytes_per_lane;
+ int size = NeonSz(dt); // 0, 1, 2
+ int byte = lane << size;
int double_word = byte >> kDoubleSizeLog2;
int double_byte = byte & (kDoubleSize - 1);
- int double_lane = double_byte >> log2_bytes_per_lane;
+ int double_lane = double_byte >> size;
DwVfpRegister double_dst =
DwVfpRegister::from_code(dst.code() * 2 + double_word);
vmov(dt, double_dst, double_lane, src_lane);
@@ -1399,7 +1395,7 @@ void MacroAssembler::LoadConstantPoolPointerRegister() {
}
void MacroAssembler::StubPrologue(StackFrame::Type type) {
- mov(ip, Operand(Smi::FromInt(type)));
+ mov(ip, Operand(StackFrame::TypeToMarker(type)));
PushCommonFrame(ip);
if (FLAG_enable_embedded_constant_pool) {
LoadConstantPoolPointerRegister();
@@ -1431,15 +1427,15 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- ldr(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
- ldr(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
+ ldr(vector, FieldMemOperand(vector, JSFunction::kFeedbackVectorOffset));
+ ldr(vector, FieldMemOperand(vector, Cell::kValueOffset));
}
void MacroAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) {
// r0-r3: preserved
- mov(ip, Operand(Smi::FromInt(type)));
+ mov(ip, Operand(StackFrame::TypeToMarker(type)));
PushCommonFrame(ip);
if (FLAG_enable_embedded_constant_pool && load_constant_pool_pointer_reg) {
LoadConstantPoolPointerRegister();
@@ -1494,7 +1490,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
- mov(ip, Operand(Smi::FromInt(frame_type)));
+ mov(ip, Operand(StackFrame::TypeToMarker(frame_type)));
PushCommonFrame(ip);
// Reserve room for saved entry sp and code object.
sub(sp, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp));
@@ -1539,21 +1535,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
-
-void MacroAssembler::InitializeNewString(Register string,
- Register length,
- Heap::RootListIndex map_index,
- Register scratch1,
- Register scratch2) {
- SmiTag(scratch1, length);
- LoadRoot(scratch2, map_index);
- str(scratch1, FieldMemOperand(string, String::kLengthOffset));
- mov(scratch1, Operand(String::kEmptyHashField));
- str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
- str(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
-}
-
-
int MacroAssembler::ActivationFrameAlignment() {
#if V8_HOST_ARCH_ARM
// Running on the real platform. Use the alignment as mandated by the local
@@ -1921,17 +1902,17 @@ void MacroAssembler::IsObjectNameType(Register object,
b(hi, fail);
}
-
-void MacroAssembler::DebugBreak() {
- mov(r0, Operand::Zero());
- mov(r1,
- Operand(ExternalReference(Runtime::kHandleDebuggerStatement, isolate())));
- CEntryStub ces(isolate(), 1);
- DCHECK(AllowThisStubCall(&ces));
- Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
+void MacroAssembler::MaybeDropFrames() {
+ // Check whether we need to drop frames to restart a function on the stack.
+ ExternalReference restart_fp =
+ ExternalReference::debug_restart_fp_address(isolate());
+ mov(r1, Operand(restart_fp));
+ ldr(r1, MemOperand(r1));
+ tst(r1, r1);
+ Jump(isolate()->builtins()->FrameDropperTrampoline(), RelocInfo::CODE_TARGET,
+ ne);
}
-
void MacroAssembler::PushStackHandler() {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
@@ -2430,38 +2411,12 @@ void MacroAssembler::GetMapConstructor(Register result, Register map,
bind(&done);
}
-
-void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
- Register scratch, Label* miss) {
- // Get the prototype or initial map from the function.
- ldr(result,
- FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
- // If the prototype or initial map is the hole, don't return it and
- // simply miss the cache instead. This will allow us to allocate a
- // prototype object on-demand in the runtime system.
- LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- cmp(result, ip);
- b(eq, miss);
-
- // If the function does not have an initial map, we're done.
- Label done;
- CompareObjectType(result, scratch, scratch, MAP_TYPE);
- b(ne, &done);
-
- // Get the prototype from the initial map.
- ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
-
- // All done.
- bind(&done);
-}
-
-
void MacroAssembler::CallStub(CodeStub* stub,
TypeFeedbackId ast_id,
Condition cond) {
DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
- Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond,
+ CAN_INLINE_TARGET_ADDRESS, false);
}
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index bda65968b9..821a1096d3 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -107,12 +107,13 @@ class MacroAssembler: public Assembler {
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
void Call(Register target, Condition cond = al);
- void Call(Address target, RelocInfo::Mode rmode,
- Condition cond = al,
- TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
+ void Call(Address target, RelocInfo::Mode rmode, Condition cond = al,
+ TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS,
+ bool check_constant_pool = true);
void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
TypeFeedbackId ast_id = TypeFeedbackId::None(), Condition cond = al,
- TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
+ TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS,
+ bool check_constant_pool = true);
int CallSize(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
TypeFeedbackId ast_id = TypeFeedbackId::None(),
@@ -714,12 +715,9 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* fail);
- // ---------------------------------------------------------------------------
- // Debugger Support
-
- void DebugBreak();
+ // Frame restart support
+ void MaybeDropFrames();
- // ---------------------------------------------------------------------------
// Exception handling
// Push a new stack handler and link into stack handler chain.
@@ -834,14 +832,6 @@ class MacroAssembler: public Assembler {
void GetMapConstructor(Register result, Register map, Register temp,
Register temp2);
- // Try to get function prototype of a function and puts the value in
- // the result register. Checks that the function really is a
- // function and jumps to the miss label if the fast checks fail. The
- // function register will be untouched; the other registers may be
- // clobbered.
- void TryGetFunctionPrototype(Register function, Register result,
- Register scratch, Label* miss);
-
// Compare object type for heap object. heap_object contains a non-Smi
// whose object type should be compared with the given type. This both
// sets the flags and leaves the object type in the type_reg register.
@@ -1430,12 +1420,6 @@ class MacroAssembler: public Assembler {
InvokeFlag flag,
const CallWrapper& call_wrapper);
- void InitializeNewString(Register string,
- Register length,
- Heap::RootListIndex map_index,
- Register scratch1,
- Register scratch2);
-
// Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
void InNewSpace(Register object,
Register scratch,
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index 39e7a8e837..3a3a90225b 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -22,6 +22,10 @@
namespace v8 {
namespace internal {
+// static
+base::LazyInstance<Simulator::GlobalMonitor>::type Simulator::global_monitor_ =
+ LAZY_INSTANCE_INITIALIZER;
+
// This macro provides a platform independent use of sscanf. The reason for
// SScanF not being implemented in a platform independent way through
// ::v8::internal::OS in the same way as SNPrintF is that the
@@ -569,7 +573,6 @@ static bool AllOnOnePage(uintptr_t start, int size) {
return start_page == end_page;
}
-
void Simulator::set_last_debugger_input(char* input) {
DeleteArray(last_debugger_input_);
last_debugger_input_ = input;
@@ -710,9 +713,10 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
last_debugger_input_ = NULL;
}
-
-Simulator::~Simulator() { free(stack_); }
-
+Simulator::~Simulator() {
+ global_monitor_.Pointer()->RemoveProcessor(&global_monitor_processor_);
+ free(stack_);
+}
// When the generated code calls an external reference we need to catch that in
// the simulator. The external reference will be a function compiled for the
@@ -1040,78 +1044,166 @@ void Simulator::TrashCallerSaveRegisters() {
int Simulator::ReadW(int32_t addr, Instruction* instr) {
// All supported ARM targets allow unaligned accesses, so we don't need to
// check the alignment here.
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ local_monitor_.NotifyLoad(addr);
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
return *ptr;
}
+int Simulator::ReadExW(int32_t addr, Instruction* instr) {
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ local_monitor_.NotifyLoadExcl(addr, TransactionSize::Word);
+ global_monitor_.Pointer()->NotifyLoadExcl_Locked(addr,
+ &global_monitor_processor_);
+ intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
+ return *ptr;
+}
void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
// All supported ARM targets allow unaligned accesses, so we don't need to
// check the alignment here.
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ local_monitor_.NotifyStore(addr);
+ global_monitor_.Pointer()->NotifyStore_Locked(addr,
+ &global_monitor_processor_);
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
*ptr = value;
}
+int Simulator::WriteExW(int32_t addr, int value, Instruction* instr) {
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::Word) &&
+ global_monitor_.Pointer()->NotifyStoreExcl_Locked(
+ addr, &global_monitor_processor_)) {
+ intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
+ *ptr = value;
+ return 0;
+ } else {
+ return 1;
+ }
+}
uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
// All supported ARM targets allow unaligned accesses, so we don't need to
// check the alignment here.
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ local_monitor_.NotifyLoad(addr);
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
return *ptr;
}
-
int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
// All supported ARM targets allow unaligned accesses, so we don't need to
// check the alignment here.
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ local_monitor_.NotifyLoad(addr);
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
return *ptr;
}
+uint16_t Simulator::ReadExHU(int32_t addr, Instruction* instr) {
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ local_monitor_.NotifyLoadExcl(addr, TransactionSize::HalfWord);
+ global_monitor_.Pointer()->NotifyLoadExcl_Locked(addr,
+ &global_monitor_processor_);
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ return *ptr;
+}
void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
// All supported ARM targets allow unaligned accesses, so we don't need to
// check the alignment here.
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ local_monitor_.NotifyStore(addr);
+ global_monitor_.Pointer()->NotifyStore_Locked(addr,
+ &global_monitor_processor_);
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
*ptr = value;
}
-
void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) {
// All supported ARM targets allow unaligned accesses, so we don't need to
// check the alignment here.
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ local_monitor_.NotifyStore(addr);
+ global_monitor_.Pointer()->NotifyStore_Locked(addr,
+ &global_monitor_processor_);
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
*ptr = value;
}
+int Simulator::WriteExH(int32_t addr, uint16_t value, Instruction* instr) {
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::HalfWord) &&
+ global_monitor_.Pointer()->NotifyStoreExcl_Locked(
+ addr, &global_monitor_processor_)) {
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ *ptr = value;
+ return 0;
+ } else {
+ return 1;
+ }
+}
uint8_t Simulator::ReadBU(int32_t addr) {
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ local_monitor_.NotifyLoad(addr);
uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
return *ptr;
}
-
int8_t Simulator::ReadB(int32_t addr) {
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ local_monitor_.NotifyLoad(addr);
int8_t* ptr = reinterpret_cast<int8_t*>(addr);
return *ptr;
}
+uint8_t Simulator::ReadExBU(int32_t addr) {
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ local_monitor_.NotifyLoadExcl(addr, TransactionSize::Byte);
+ global_monitor_.Pointer()->NotifyLoadExcl_Locked(addr,
+ &global_monitor_processor_);
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ return *ptr;
+}
void Simulator::WriteB(int32_t addr, uint8_t value) {
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ local_monitor_.NotifyStore(addr);
+ global_monitor_.Pointer()->NotifyStore_Locked(addr,
+ &global_monitor_processor_);
uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
*ptr = value;
}
-
void Simulator::WriteB(int32_t addr, int8_t value) {
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ local_monitor_.NotifyStore(addr);
+ global_monitor_.Pointer()->NotifyStore_Locked(addr,
+ &global_monitor_processor_);
int8_t* ptr = reinterpret_cast<int8_t*>(addr);
*ptr = value;
}
+int Simulator::WriteExB(int32_t addr, uint8_t value) {
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::Byte) &&
+ global_monitor_.Pointer()->NotifyStoreExcl_Locked(
+ addr, &global_monitor_processor_)) {
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ *ptr = value;
+ return 0;
+ } else {
+ return 1;
+ }
+}
int32_t* Simulator::ReadDW(int32_t addr) {
// All supported ARM targets allow unaligned accesses, so we don't need to
// check the alignment here.
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ local_monitor_.NotifyLoad(addr);
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
return ptr;
}
@@ -1120,6 +1212,10 @@ int32_t* Simulator::ReadDW(int32_t addr) {
void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) {
// All supported ARM targets allow unaligned accesses, so we don't need to
// check the alignment here.
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ local_monitor_.NotifyStore(addr);
+ global_monitor_.Pointer()->NotifyStore_Locked(addr,
+ &global_monitor_processor_);
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
*ptr++ = value1;
*ptr = value2;
@@ -2073,7 +2169,72 @@ void Simulator::DecodeType01(Instruction* instr) {
}
}
} else {
- UNIMPLEMENTED(); // Not used by V8.
+ if (instr->Bits(24, 23) == 3) {
+ if (instr->Bit(20) == 1) {
+ // ldrex
+ int rt = instr->RtValue();
+ int rn = instr->RnValue();
+ int32_t addr = get_register(rn);
+ switch (instr->Bits(22, 21)) {
+ case 0: {
+ // Format(instr, "ldrex'cond 'rt, ['rn]");
+ int value = ReadExW(addr, instr);
+ set_register(rt, value);
+ break;
+ }
+ case 2: {
+ // Format(instr, "ldrexb'cond 'rt, ['rn]");
+ uint8_t value = ReadExBU(addr);
+ set_register(rt, value);
+ break;
+ }
+ case 3: {
+ // Format(instr, "ldrexh'cond 'rt, ['rn]");
+ uint16_t value = ReadExHU(addr, instr);
+ set_register(rt, value);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else {
+ // The instruction is documented as strex rd, rt, [rn], but the
+ // "rt" register is using the rm bits.
+ int rd = instr->RdValue();
+ int rt = instr->RmValue();
+ int rn = instr->RnValue();
+ int32_t addr = get_register(rn);
+ switch (instr->Bits(22, 21)) {
+ case 0: {
+ // Format(instr, "strex'cond 'rd, 'rm, ['rn]");
+ int value = get_register(rt);
+ int status = WriteExW(addr, value, instr);
+ set_register(rd, status);
+ break;
+ }
+ case 2: {
+ // Format(instr, "strexb'cond 'rd, 'rm, ['rn]");
+ uint8_t value = get_register(rt);
+ int status = WriteExB(addr, value);
+ set_register(rd, status);
+ break;
+ }
+ case 3: {
+ // Format(instr, "strexh'cond 'rd, 'rm, ['rn]");
+ uint16_t value = get_register(rt);
+ int status = WriteExH(addr, value, instr);
+ set_register(rd, status);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+ } else {
+ UNIMPLEMENTED(); // Not used by V8.
+ }
}
} else {
// extra load/store instructions
@@ -3827,61 +3988,177 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
}
}
+// Templated operations for NEON instructions.
+// TODO(bbudge) Add more templates for use in DecodeSpecialCondition.
+template <typename T>
+int64_t Widen(T value) {
+ static_assert(sizeof(int64_t) > sizeof(T), "T must be int32_t or smaller");
+ return static_cast<int64_t>(value);
+}
+
+template <typename T>
+T Clamp(int64_t value) {
+ static_assert(sizeof(int64_t) > sizeof(T), "T must be int32_t or smaller");
+ int64_t min = static_cast<int64_t>(std::numeric_limits<T>::min());
+ int64_t max = static_cast<int64_t>(std::numeric_limits<T>::max());
+ int64_t clamped = std::max(min, std::min(max, value));
+ return static_cast<T>(clamped);
+}
+
+template <typename T>
+void AddSaturate(Simulator* simulator, int Vd, int Vm, int Vn) {
+ static const int kLanes = 16 / sizeof(T);
+ T src1[kLanes], src2[kLanes];
+ simulator->get_q_register(Vn, src1);
+ simulator->get_q_register(Vm, src2);
+ for (int i = 0; i < kLanes; i++) {
+ src1[i] = Clamp<T>(Widen(src1[i]) + Widen(src2[i]));
+ }
+ simulator->set_q_register(Vd, src1);
+}
+
+template <typename T>
+void SubSaturate(Simulator* simulator, int Vd, int Vm, int Vn) {
+ static const int kLanes = 16 / sizeof(T);
+ T src1[kLanes], src2[kLanes];
+ simulator->get_q_register(Vn, src1);
+ simulator->get_q_register(Vm, src2);
+ for (int i = 0; i < kLanes; i++) {
+ src1[i] = Clamp<T>(Widen(src1[i]) - Widen(src2[i]));
+ }
+ simulator->set_q_register(Vd, src1);
+}
+
void Simulator::DecodeSpecialCondition(Instruction* instr) {
switch (instr->SpecialValue()) {
- case 4:
- if (instr->Bits(11, 8) == 1 && instr->Bits(21, 20) == 2 &&
- instr->Bit(4) == 1) {
- // vmov Qd, Qm.
- // vorr, Qd, Qm, Qn.
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kSimd128Precision);
- uint32_t src1[4];
- get_q_register(Vm, src1);
- if (Vm != Vn) {
- uint32_t src2[4];
- get_q_register(Vn, src2);
- for (int i = 0; i < 4; i++) {
- src1[i] = src1[i] | src2[i];
+ case 4: {
+ int Vd, Vm, Vn;
+ if (instr->Bit(6) == 0) {
+ Vd = instr->VFPDRegValue(kDoublePrecision);
+ Vm = instr->VFPMRegValue(kDoublePrecision);
+ Vn = instr->VFPNRegValue(kDoublePrecision);
+ } else {
+ Vd = instr->VFPDRegValue(kSimd128Precision);
+ Vm = instr->VFPMRegValue(kSimd128Precision);
+ Vn = instr->VFPNRegValue(kSimd128Precision);
+ }
+ switch (instr->Bits(11, 8)) {
+ case 0x0: {
+ if (instr->Bit(4) == 1) {
+ // vqadd.s<size> Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ switch (size) {
+ case Neon8:
+ AddSaturate<int8_t>(this, Vd, Vm, Vn);
+ break;
+ case Neon16:
+ AddSaturate<int16_t>(this, Vd, Vm, Vn);
+ break;
+ case Neon32:
+ AddSaturate<int32_t>(this, Vd, Vm, Vn);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else {
+ UNIMPLEMENTED();
}
+ break;
}
- set_q_register(Vd, src1);
- } else if (instr->Bits(11, 8) == 8) {
- // vadd/vtst
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kSimd128Precision);
- if (instr->Bit(4) == 0) {
- // vadd.i<size> Qd, Qm, Qn.
+ case 0x1: {
+ if (instr->Bits(21, 20) == 2 && instr->Bit(6) == 1 &&
+ instr->Bit(4) == 1) {
+ // vmov Qd, Qm.
+ // vorr, Qd, Qm, Qn.
+ uint32_t src1[4];
+ get_q_register(Vm, src1);
+ if (Vm != Vn) {
+ uint32_t src2[4];
+ get_q_register(Vn, src2);
+ for (int i = 0; i < 4; i++) {
+ src1[i] = src1[i] | src2[i];
+ }
+ }
+ set_q_register(Vd, src1);
+ } else if (instr->Bits(21, 20) == 0 && instr->Bit(6) == 1 &&
+ instr->Bit(4) == 1) {
+ // vand Qd, Qm, Qn.
+ uint32_t src1[4], src2[4];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 4; i++) {
+ src1[i] = src1[i] & src2[i];
+ }
+ set_q_register(Vd, src1);
+ } else {
+ UNIMPLEMENTED();
+ }
+ break;
+ }
+ case 0x2: {
+ if (instr->Bit(4) == 1) {
+ // vqsub.s<size> Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ switch (size) {
+ case Neon8:
+ SubSaturate<int8_t>(this, Vd, Vm, Vn);
+ break;
+ case Neon16:
+ SubSaturate<int16_t>(this, Vd, Vm, Vn);
+ break;
+ case Neon32:
+ SubSaturate<int32_t>(this, Vd, Vm, Vn);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else {
+ UNIMPLEMENTED();
+ }
+ break;
+ }
+ case 0x3: {
+ // vcge/vcgt.s<size> Qd, Qm, Qn.
+ bool ge = instr->Bit(4) == 1;
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
switch (size) {
case Neon8: {
- uint8_t src1[16], src2[16];
+ int8_t src1[16], src2[16];
get_q_register(Vn, src1);
get_q_register(Vm, src2);
for (int i = 0; i < 16; i++) {
- src1[i] += src2[i];
+ if (ge)
+ src1[i] = src1[i] >= src2[i] ? 0xFF : 0;
+ else
+ src1[i] = src1[i] > src2[i] ? 0xFF : 0;
}
set_q_register(Vd, src1);
break;
}
case Neon16: {
- uint16_t src1[8], src2[8];
+ int16_t src1[8], src2[8];
get_q_register(Vn, src1);
get_q_register(Vm, src2);
for (int i = 0; i < 8; i++) {
- src1[i] += src2[i];
+ if (ge)
+ src1[i] = src1[i] >= src2[i] ? 0xFFFF : 0;
+ else
+ src1[i] = src1[i] > src2[i] ? 0xFFFF : 0;
}
set_q_register(Vd, src1);
break;
}
case Neon32: {
- uint32_t src1[4], src2[4];
+ int32_t src1[4], src2[4];
get_q_register(Vn, src1);
get_q_register(Vm, src2);
for (int i = 0; i < 4; i++) {
- src1[i] += src2[i];
+ if (ge)
+ src1[i] = src1[i] >= src2[i] ? 0xFFFFFFFF : 0;
+ else
+ src1[i] = src1[i] > src2[i] ? 0xFFFFFFFF : 0;
}
set_q_register(Vd, src1);
break;
@@ -3890,35 +4167,48 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
UNREACHABLE();
break;
}
- } else {
- // vtst.i<size> Qd, Qm, Qn.
+ break;
+ }
+ case 0x6: {
+ // vmin/vmax.s<size> Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ bool min = instr->Bit(4) != 0;
switch (size) {
case Neon8: {
- uint8_t src1[16], src2[16];
+ int8_t src1[16], src2[16];
get_q_register(Vn, src1);
get_q_register(Vm, src2);
for (int i = 0; i < 16; i++) {
- src1[i] = (src1[i] & src2[i]) != 0 ? 0xFFu : 0;
+ if (min)
+ src1[i] = std::min(src1[i], src2[i]);
+ else
+ src1[i] = std::max(src1[i], src2[i]);
}
set_q_register(Vd, src1);
break;
}
case Neon16: {
- uint16_t src1[8], src2[8];
+ int16_t src1[8], src2[8];
get_q_register(Vn, src1);
get_q_register(Vm, src2);
for (int i = 0; i < 8; i++) {
- src1[i] = (src1[i] & src2[i]) != 0 ? 0xFFFFu : 0;
+ if (min)
+ src1[i] = std::min(src1[i], src2[i]);
+ else
+ src1[i] = std::max(src1[i], src2[i]);
}
set_q_register(Vd, src1);
break;
}
case Neon32: {
- uint32_t src1[4], src2[4];
+ int32_t src1[4], src2[4];
get_q_register(Vn, src1);
get_q_register(Vm, src2);
for (int i = 0; i < 4; i++) {
- src1[i] = (src1[i] & src2[i]) != 0 ? 0xFFFFFFFFu : 0;
+ if (min)
+ src1[i] = std::min(src1[i], src2[i]);
+ else
+ src1[i] = std::max(src1[i], src2[i]);
}
set_q_register(Vd, src1);
break;
@@ -3927,234 +4217,210 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
UNREACHABLE();
break;
}
+ break;
}
- } else if (instr->Bits(11, 8) == 0xd && instr->Bit(20) == 0 &&
- instr->Bit(4) == 0) {
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kSimd128Precision);
- float src1[4], src2[4];
- get_q_register(Vn, src1);
- get_q_register(Vm, src2);
- for (int i = 0; i < 4; i++) {
- if (instr->Bit(21) == 0) {
- // vadd.f32 Qd, Qm, Qn.
- src1[i] = src1[i] + src2[i];
+ case 0x8: {
+ // vadd/vtst
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ if (instr->Bit(4) == 0) {
+ // vadd.i<size> Qd, Qm, Qn.
+ switch (size) {
+ case Neon8: {
+ uint8_t src1[16], src2[16];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 16; i++) {
+ src1[i] += src2[i];
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ case Neon16: {
+ uint16_t src1[8], src2[8];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 8; i++) {
+ src1[i] += src2[i];
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ case Neon32: {
+ uint32_t src1[4], src2[4];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 4; i++) {
+ src1[i] += src2[i];
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
} else {
- // vsub.f32 Qd, Qm, Qn.
- src1[i] = src1[i] - src2[i];
- }
- }
- set_q_register(Vd, src1);
- } else if (instr->Bits(11, 8) == 0x9 && instr->Bit(6) == 1 &&
- instr->Bit(4) == 1) {
- // vmul.i<size> Qd, Qm, Qn.
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kSimd128Precision);
- switch (size) {
- case Neon8: {
- uint8_t src1[16], src2[16];
- get_q_register(Vn, src1);
- get_q_register(Vm, src2);
- for (int i = 0; i < 16; i++) {
- src1[i] *= src2[i];
+ // vtst.i<size> Qd, Qm, Qn.
+ switch (size) {
+ case Neon8: {
+ uint8_t src1[16], src2[16];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 16; i++) {
+ src1[i] = (src1[i] & src2[i]) != 0 ? 0xFFu : 0;
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ case Neon16: {
+ uint16_t src1[8], src2[8];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 8; i++) {
+ src1[i] = (src1[i] & src2[i]) != 0 ? 0xFFFFu : 0;
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ case Neon32: {
+ uint32_t src1[4], src2[4];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 4; i++) {
+ src1[i] = (src1[i] & src2[i]) != 0 ? 0xFFFFFFFFu : 0;
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
}
- set_q_register(Vd, src1);
- break;
}
- case Neon16: {
- uint16_t src1[8], src2[8];
- get_q_register(Vn, src1);
- get_q_register(Vm, src2);
- for (int i = 0; i < 8; i++) {
- src1[i] *= src2[i];
+ break;
+ }
+ case 0x9: {
+ if (instr->Bit(6) == 1 && instr->Bit(4) == 1) {
+ // vmul.i<size> Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ switch (size) {
+ case Neon8: {
+ uint8_t src1[16], src2[16];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 16; i++) {
+ src1[i] *= src2[i];
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ case Neon16: {
+ uint16_t src1[8], src2[8];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 8; i++) {
+ src1[i] *= src2[i];
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ case Neon32: {
+ uint32_t src1[4], src2[4];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 4; i++) {
+ src1[i] *= src2[i];
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
}
- set_q_register(Vd, src1);
- break;
+ } else {
+ UNIMPLEMENTED();
}
- case Neon32: {
- uint32_t src1[4], src2[4];
+ break;
+ }
+ case 0xd: {
+ if (instr->Bit(4) == 0) {
+ float src1[4], src2[4];
get_q_register(Vn, src1);
get_q_register(Vm, src2);
for (int i = 0; i < 4; i++) {
- src1[i] *= src2[i];
+ if (instr->Bit(21) == 0) {
+ // vadd.f32 Qd, Qm, Qn.
+ src1[i] = src1[i] + src2[i];
+ } else {
+ // vsub.f32 Qd, Qm, Qn.
+ src1[i] = src1[i] - src2[i];
+ }
}
set_q_register(Vd, src1);
- break;
- }
- default:
+ } else {
UNIMPLEMENTED();
- break;
- }
- } else if (instr->Bits(11, 8) == 0xe && instr->Bits(21, 20) == 0 &&
- instr->Bit(4) == 0) {
- // vceq.f32.
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kSimd128Precision);
- float src1[4], src2[4];
- get_q_register(Vn, src1);
- get_q_register(Vm, src2);
- uint32_t dst[4];
- for (int i = 0; i < 4; i++) {
- dst[i] = (src1[i] == src2[i]) ? 0xFFFFFFFF : 0;
- }
- set_q_register(Vd, dst);
- } else if (instr->Bits(11, 8) == 1 && instr->Bits(21, 20) == 0 &&
- instr->Bit(6) == 1 && instr->Bit(4) == 1) {
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kSimd128Precision);
- // vand Qd, Qm, Qn.
- uint32_t src1[4], src2[4];
- get_q_register(Vn, src1);
- get_q_register(Vm, src2);
- for (int i = 0; i < 4; i++) {
- src1[i] = src1[i] & src2[i];
- }
- set_q_register(Vd, src1);
- } else if (instr->Bits(11, 8) == 0x3) {
- // vcge/vcgt.s<size> Qd, Qm, Qn.
- bool ge = instr->Bit(4) == 1;
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kSimd128Precision);
- switch (size) {
- case Neon8: {
- int8_t src1[16], src2[16];
- get_q_register(Vn, src1);
- get_q_register(Vm, src2);
- for (int i = 0; i < 16; i++) {
- if (ge)
- src1[i] = src1[i] >= src2[i] ? 0xFF : 0;
- else
- src1[i] = src1[i] > src2[i] ? 0xFF : 0;
- }
- set_q_register(Vd, src1);
- break;
}
- case Neon16: {
- int16_t src1[8], src2[8];
- get_q_register(Vn, src1);
- get_q_register(Vm, src2);
- for (int i = 0; i < 8; i++) {
- if (ge)
- src1[i] = src1[i] >= src2[i] ? 0xFFFF : 0;
- else
- src1[i] = src1[i] > src2[i] ? 0xFFFF : 0;
- }
- set_q_register(Vd, src1);
- break;
- }
- case Neon32: {
- int32_t src1[4], src2[4];
+ break;
+ }
+ case 0xe: {
+ if (instr->Bits(21, 20) == 0 && instr->Bit(4) == 0) {
+ // vceq.f32.
+ float src1[4], src2[4];
get_q_register(Vn, src1);
get_q_register(Vm, src2);
+ uint32_t dst[4];
for (int i = 0; i < 4; i++) {
- if (ge)
- src1[i] = src1[i] >= src2[i] ? 0xFFFFFFFF : 0;
- else
- src1[i] = src1[i] > src2[i] ? 0xFFFFFFFF : 0;
- }
- set_q_register(Vd, src1);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- } else if (instr->Bits(11, 8) == 0xf && instr->Bit(20) == 0 &&
- instr->Bit(6) == 1) {
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kSimd128Precision);
- float src1[4], src2[4];
- get_q_register(Vn, src1);
- get_q_register(Vm, src2);
- if (instr->Bit(4) == 1) {
- if (instr->Bit(21) == 0) {
- // vrecps.f32 Qd, Qm, Qn.
- for (int i = 0; i < 4; i++) {
- src1[i] = 2.0f - src1[i] * src2[i];
- }
- } else {
- // vrsqrts.f32 Qd, Qm, Qn.
- for (int i = 0; i < 4; i++) {
- src1[i] = (3.0f - src1[i] * src2[i]) * 0.5f;
- }
- }
- } else {
- if (instr->Bit(21) == 1) {
- // vmin.f32 Qd, Qm, Qn.
- for (int i = 0; i < 4; i++) {
- src1[i] = std::min(src1[i], src2[i]);
+ dst[i] = (src1[i] == src2[i]) ? 0xFFFFFFFF : 0;
}
+ set_q_register(Vd, dst);
} else {
- // vmax.f32 Qd, Qm, Qn.
- for (int i = 0; i < 4; i++) {
- src1[i] = std::max(src1[i], src2[i]);
- }
+ UNIMPLEMENTED();
}
+ break;
}
- set_q_register(Vd, src1);
- } else if (instr->Bits(11, 8) == 0x6) {
- // vmin/vmax.s<size> Qd, Qm, Qn.
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kSimd128Precision);
- bool min = instr->Bit(4) != 0;
- switch (size) {
- case Neon8: {
- int8_t src1[16], src2[16];
- get_q_register(Vn, src1);
- get_q_register(Vm, src2);
- for (int i = 0; i < 16; i++) {
- if (min)
- src1[i] = std::min(src1[i], src2[i]);
- else
- src1[i] = std::max(src1[i], src2[i]);
- }
- set_q_register(Vd, src1);
- break;
- }
- case Neon16: {
- int16_t src1[8], src2[8];
- get_q_register(Vn, src1);
- get_q_register(Vm, src2);
- for (int i = 0; i < 8; i++) {
- if (min)
- src1[i] = std::min(src1[i], src2[i]);
- else
- src1[i] = std::max(src1[i], src2[i]);
- }
- set_q_register(Vd, src1);
- break;
- }
- case Neon32: {
- int32_t src1[4], src2[4];
+ case 0xf: {
+ if (instr->Bit(20) == 0 && instr->Bit(6) == 1) {
+ float src1[4], src2[4];
get_q_register(Vn, src1);
get_q_register(Vm, src2);
- for (int i = 0; i < 4; i++) {
- if (min)
- src1[i] = std::min(src1[i], src2[i]);
- else
- src1[i] = std::max(src1[i], src2[i]);
+ if (instr->Bit(4) == 1) {
+ if (instr->Bit(21) == 0) {
+ // vrecps.f32 Qd, Qm, Qn.
+ for (int i = 0; i < 4; i++) {
+ src1[i] = 2.0f - src1[i] * src2[i];
+ }
+ } else {
+ // vrsqrts.f32 Qd, Qm, Qn.
+ for (int i = 0; i < 4; i++) {
+ src1[i] = (3.0f - src1[i] * src2[i]) * 0.5f;
+ }
+ }
+ } else {
+ if (instr->Bit(21) == 1) {
+ // vmin.f32 Qd, Qm, Qn.
+ for (int i = 0; i < 4; i++) {
+ src1[i] = std::min(src1[i], src2[i]);
+ }
+ } else {
+ // vmax.f32 Qd, Qm, Qn.
+ for (int i = 0; i < 4; i++) {
+ src1[i] = std::max(src1[i], src2[i]);
+ }
+ }
}
set_q_register(Vd, src1);
- break;
+ } else {
+ UNIMPLEMENTED();
}
- default:
- UNREACHABLE();
- break;
+ break;
}
- } else {
- UNIMPLEMENTED();
+ default:
+ UNIMPLEMENTED();
+ break;
}
break;
+ }
case 5:
if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
(instr->Bit(4) == 1)) {
@@ -4193,269 +4459,401 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
dst[i] = src2[i - boundary];
}
set_q_register(Vd, dst);
- } else {
- UNIMPLEMENTED();
- }
- break;
- case 6:
- if (instr->Bits(11, 8) == 8 && instr->Bit(4) == 0) {
- // vsub.size Qd, Qm, Qn.
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ } else if (instr->Bits(11, 7) == 0xA && instr->Bit(4) == 1) {
+ // vshl.i<size> Qd, Qm, shift
+ int size = base::bits::RoundDownToPowerOfTwo32(instr->Bits(21, 16));
+ int shift = instr->Bits(21, 16) - size;
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kSimd128Precision);
- switch (size) {
+ NeonSize ns = static_cast<NeonSize>(size / 16);
+ switch (ns) {
case Neon8: {
- uint8_t src1[16], src2[16];
- get_q_register(Vn, src1);
- get_q_register(Vm, src2);
+ uint8_t src[16];
+ get_q_register(Vm, src);
for (int i = 0; i < 16; i++) {
- src1[i] -= src2[i];
+ src[i] <<= shift;
}
- set_q_register(Vd, src1);
+ set_q_register(Vd, src);
break;
}
case Neon16: {
- uint16_t src1[8], src2[8];
- get_q_register(Vn, src1);
- get_q_register(Vm, src2);
+ uint16_t src[8];
+ get_q_register(Vm, src);
for (int i = 0; i < 8; i++) {
- src1[i] -= src2[i];
+ src[i] <<= shift;
}
- set_q_register(Vd, src1);
+ set_q_register(Vd, src);
break;
}
case Neon32: {
- uint32_t src1[4], src2[4];
- get_q_register(Vn, src1);
- get_q_register(Vm, src2);
+ uint32_t src[4];
+ get_q_register(Vm, src);
for (int i = 0; i < 4; i++) {
- src1[i] -= src2[i];
+ src[i] <<= shift;
}
- set_q_register(Vd, src1);
+ set_q_register(Vd, src);
break;
}
default:
UNREACHABLE();
break;
}
- } else if (instr->Bits(11, 8) == 8 && instr->Bit(4) == 1) {
- // vceq.size Qd, Qm, Qn.
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ } else if (instr->Bits(11, 7) == 0 && instr->Bit(4) == 1) {
+ // vshr.s<size> Qd, Qm, shift
+ int size = base::bits::RoundDownToPowerOfTwo32(instr->Bits(21, 16));
+ int shift = 2 * size - instr->Bits(21, 16);
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kSimd128Precision);
- switch (size) {
+ NeonSize ns = static_cast<NeonSize>(size / 16);
+ switch (ns) {
case Neon8: {
- uint8_t src1[16], src2[16];
- get_q_register(Vn, src1);
- get_q_register(Vm, src2);
+ int8_t src[16];
+ get_q_register(Vm, src);
for (int i = 0; i < 16; i++) {
- src1[i] = (src1[i] == src2[i]) ? 0xFFu : 0;
+ src[i] = ArithmeticShiftRight(src[i], shift);
}
- set_q_register(Vd, src1);
+ set_q_register(Vd, src);
break;
}
case Neon16: {
- uint16_t src1[8], src2[8];
- get_q_register(Vn, src1);
- get_q_register(Vm, src2);
+ int16_t src[8];
+ get_q_register(Vm, src);
for (int i = 0; i < 8; i++) {
- src1[i] = (src1[i] == src2[i]) ? 0xFFFFu : 0;
+ src[i] = ArithmeticShiftRight(src[i], shift);
}
- set_q_register(Vd, src1);
+ set_q_register(Vd, src);
break;
}
case Neon32: {
- uint32_t src1[4], src2[4];
- get_q_register(Vn, src1);
- get_q_register(Vm, src2);
+ int32_t src[4];
+ get_q_register(Vm, src);
for (int i = 0; i < 4; i++) {
- src1[i] = (src1[i] == src2[i]) ? 0xFFFFFFFFu : 0;
+ src[i] = ArithmeticShiftRight(src[i], shift);
}
- set_q_register(Vd, src1);
+ set_q_register(Vd, src);
break;
}
default:
UNREACHABLE();
break;
}
- } else if (instr->Bits(11, 8) == 1 && instr->Bits(21, 20) == 1 &&
- instr->Bit(4) == 1) {
- // vbsl.size Qd, Qm, Qn.
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kSimd128Precision);
- uint32_t dst[4], src1[4], src2[4];
- get_q_register(Vd, dst);
- get_q_register(Vn, src1);
- get_q_register(Vm, src2);
- for (int i = 0; i < 4; i++) {
- dst[i] = (dst[i] & src1[i]) | (~dst[i] & src2[i]);
- }
- set_q_register(Vd, dst);
- } else if (instr->Bits(11, 8) == 1 && instr->Bits(21, 20) == 0 &&
- instr->Bit(4) == 1) {
- if (instr->Bit(6) == 0) {
- // veor Dd, Dn, Dm
- int Vd = instr->VFPDRegValue(kDoublePrecision);
- int Vn = instr->VFPNRegValue(kDoublePrecision);
- int Vm = instr->VFPMRegValue(kDoublePrecision);
- uint64_t src1, src2;
- get_d_register(Vn, &src1);
- get_d_register(Vm, &src2);
- src1 ^= src2;
- set_d_register(Vd, &src1);
-
- } else {
- // veor Qd, Qn, Qm
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- uint32_t src1[4], src2[4];
- get_q_register(Vn, src1);
- get_q_register(Vm, src2);
- for (int i = 0; i < 4; i++) src1[i] ^= src2[i];
- set_q_register(Vd, src1);
- }
- } else if (instr->Bits(11, 8) == 0xd && instr->Bit(21) == 0 &&
- instr->Bit(6) == 1 && instr->Bit(4) == 1) {
- // vmul.f32 Qd, Qn, Qm
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- float src1[4], src2[4];
- get_q_register(Vn, src1);
- get_q_register(Vm, src2);
- for (int i = 0; i < 4; i++) {
- src1[i] = src1[i] * src2[i];
- }
- set_q_register(Vd, src1);
- } else if (instr->Bits(11, 8) == 0xe && instr->Bit(20) == 0 &&
- instr->Bit(4) == 0) {
- // vcge/vcgt.f32 Qd, Qm, Qn
- bool ge = instr->Bit(21) == 0;
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kSimd128Precision);
- float src1[4], src2[4];
- get_q_register(Vn, src1);
- get_q_register(Vm, src2);
- uint32_t dst[4];
- for (int i = 0; i < 4; i++) {
- if (ge) {
- dst[i] = src1[i] >= src2[i] ? 0xFFFFFFFFu : 0;
+ } else {
+ UNIMPLEMENTED();
+ }
+ break;
+ case 6: {
+ int Vd, Vm, Vn;
+ if (instr->Bit(6) == 0) {
+ Vd = instr->VFPDRegValue(kDoublePrecision);
+ Vm = instr->VFPMRegValue(kDoublePrecision);
+ Vn = instr->VFPNRegValue(kDoublePrecision);
+ } else {
+ Vd = instr->VFPDRegValue(kSimd128Precision);
+ Vm = instr->VFPMRegValue(kSimd128Precision);
+ Vn = instr->VFPNRegValue(kSimd128Precision);
+ }
+ switch (instr->Bits(11, 8)) {
+ case 0x0: {
+ if (instr->Bit(4) == 1) {
+ // vqadd.u<size> Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ switch (size) {
+ case Neon8:
+ AddSaturate<uint8_t>(this, Vd, Vm, Vn);
+ break;
+ case Neon16:
+ AddSaturate<uint16_t>(this, Vd, Vm, Vn);
+ break;
+ case Neon32:
+ AddSaturate<uint32_t>(this, Vd, Vm, Vn);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
} else {
- dst[i] = src1[i] > src2[i] ? 0xFFFFFFFFu : 0;
+ UNIMPLEMENTED();
}
+ break;
}
- set_q_register(Vd, dst);
- } else if (instr->Bits(11, 8) == 0x3) {
- // vcge/vcgt.u<size> Qd, Qm, Qn.
- bool ge = instr->Bit(4) == 1;
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kSimd128Precision);
- switch (size) {
- case Neon8: {
- uint8_t src1[16], src2[16];
+ case 0x1: {
+ if (instr->Bits(21, 20) == 1 && instr->Bit(4) == 1) {
+ // vbsl.size Qd, Qm, Qn.
+ uint32_t dst[4], src1[4], src2[4];
+ get_q_register(Vd, dst);
get_q_register(Vn, src1);
get_q_register(Vm, src2);
- for (int i = 0; i < 16; i++) {
- if (ge)
- src1[i] = src1[i] >= src2[i] ? 0xFFu : 0;
- else
- src1[i] = src1[i] > src2[i] ? 0xFFu : 0;
+ for (int i = 0; i < 4; i++) {
+ dst[i] = (dst[i] & src1[i]) | (~dst[i] & src2[i]);
}
- set_q_register(Vd, src1);
- break;
+ set_q_register(Vd, dst);
+ } else if (instr->Bits(21, 20) == 0 && instr->Bit(4) == 1) {
+ if (instr->Bit(6) == 0) {
+ // veor Dd, Dn, Dm
+ uint64_t src1, src2;
+ get_d_register(Vn, &src1);
+ get_d_register(Vm, &src2);
+ src1 ^= src2;
+ set_d_register(Vd, &src1);
+
+ } else {
+ // veor Qd, Qn, Qm
+ uint32_t src1[4], src2[4];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 4; i++) src1[i] ^= src2[i];
+ set_q_register(Vd, src1);
+ }
+ } else {
+ UNIMPLEMENTED();
}
- case Neon16: {
- uint16_t src1[8], src2[8];
- get_q_register(Vn, src1);
- get_q_register(Vm, src2);
- for (int i = 0; i < 8; i++) {
- if (ge)
- src1[i] = src1[i] >= src2[i] ? 0xFFFFu : 0;
- else
- src1[i] = src1[i] > src2[i] ? 0xFFFFu : 0;
+ break;
+ }
+ case 0x2: {
+ if (instr->Bit(4) == 1) {
+ // vqsub.u<size> Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ switch (size) {
+ case Neon8:
+ SubSaturate<uint8_t>(this, Vd, Vm, Vn);
+ break;
+ case Neon16:
+ SubSaturate<uint16_t>(this, Vd, Vm, Vn);
+ break;
+ case Neon32:
+ SubSaturate<uint32_t>(this, Vd, Vm, Vn);
+ break;
+ default:
+ UNREACHABLE();
+ break;
}
- set_q_register(Vd, src1);
- break;
+ } else {
+ UNIMPLEMENTED();
}
- case Neon32: {
- uint32_t src1[4], src2[4];
- get_q_register(Vn, src1);
- get_q_register(Vm, src2);
- for (int i = 0; i < 4; i++) {
- if (ge)
- src1[i] = src1[i] >= src2[i] ? 0xFFFFFFFFu : 0;
- else
- src1[i] = src1[i] > src2[i] ? 0xFFFFFFFFu : 0;
+ break;
+ }
+ case 0x3: {
+ // vcge/vcgt.u<size> Qd, Qm, Qn.
+ bool ge = instr->Bit(4) == 1;
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ switch (size) {
+ case Neon8: {
+ uint8_t src1[16], src2[16];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 16; i++) {
+ if (ge)
+ src1[i] = src1[i] >= src2[i] ? 0xFFu : 0;
+ else
+ src1[i] = src1[i] > src2[i] ? 0xFFu : 0;
+ }
+ set_q_register(Vd, src1);
+ break;
}
- set_q_register(Vd, src1);
- break;
+ case Neon16: {
+ uint16_t src1[8], src2[8];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 8; i++) {
+ if (ge)
+ src1[i] = src1[i] >= src2[i] ? 0xFFFFu : 0;
+ else
+ src1[i] = src1[i] > src2[i] ? 0xFFFFu : 0;
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ case Neon32: {
+ uint32_t src1[4], src2[4];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 4; i++) {
+ if (ge)
+ src1[i] = src1[i] >= src2[i] ? 0xFFFFFFFFu : 0;
+ else
+ src1[i] = src1[i] > src2[i] ? 0xFFFFFFFFu : 0;
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
}
- default:
- UNREACHABLE();
- break;
+ break;
}
- } else if (instr->Bits(11, 8) == 0x6) {
- // vmin/vmax.u<size> Qd, Qm, Qn.
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kSimd128Precision);
- bool min = instr->Bit(4) != 0;
- switch (size) {
- case Neon8: {
- uint8_t src1[16], src2[16];
- get_q_register(Vn, src1);
- get_q_register(Vm, src2);
- for (int i = 0; i < 16; i++) {
- if (min)
- src1[i] = std::min(src1[i], src2[i]);
- else
- src1[i] = std::max(src1[i], src2[i]);
+ case 0x6: {
+ // vmin/vmax.u<size> Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ bool min = instr->Bit(4) != 0;
+ switch (size) {
+ case Neon8: {
+ uint8_t src1[16], src2[16];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 16; i++) {
+ if (min)
+ src1[i] = std::min(src1[i], src2[i]);
+ else
+ src1[i] = std::max(src1[i], src2[i]);
+ }
+ set_q_register(Vd, src1);
+ break;
}
- set_q_register(Vd, src1);
- break;
+ case Neon16: {
+ uint16_t src1[8], src2[8];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 8; i++) {
+ if (min)
+ src1[i] = std::min(src1[i], src2[i]);
+ else
+ src1[i] = std::max(src1[i], src2[i]);
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ case Neon32: {
+ uint32_t src1[4], src2[4];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 4; i++) {
+ if (min)
+ src1[i] = std::min(src1[i], src2[i]);
+ else
+ src1[i] = std::max(src1[i], src2[i]);
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
}
- case Neon16: {
- uint16_t src1[8], src2[8];
+ break;
+ }
+ case 0x8: {
+ if (instr->Bit(4) == 0) {
+ // vsub.size Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ switch (size) {
+ case Neon8: {
+ uint8_t src1[16], src2[16];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 16; i++) {
+ src1[i] -= src2[i];
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ case Neon16: {
+ uint16_t src1[8], src2[8];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 8; i++) {
+ src1[i] -= src2[i];
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ case Neon32: {
+ uint32_t src1[4], src2[4];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 4; i++) {
+ src1[i] -= src2[i];
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else {
+ // vceq.size Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ switch (size) {
+ case Neon8: {
+ uint8_t src1[16], src2[16];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 16; i++) {
+ src1[i] = (src1[i] == src2[i]) ? 0xFFu : 0;
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ case Neon16: {
+ uint16_t src1[8], src2[8];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 8; i++) {
+ src1[i] = (src1[i] == src2[i]) ? 0xFFFFu : 0;
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ case Neon32: {
+ uint32_t src1[4], src2[4];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 4; i++) {
+ src1[i] = (src1[i] == src2[i]) ? 0xFFFFFFFFu : 0;
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+ break;
+ }
+ case 0xd: {
+ if (instr->Bit(21) == 0 && instr->Bit(6) == 1 && instr->Bit(4) == 1) {
+ // vmul.f32 Qd, Qn, Qm
+ float src1[4], src2[4];
get_q_register(Vn, src1);
get_q_register(Vm, src2);
- for (int i = 0; i < 8; i++) {
- if (min)
- src1[i] = std::min(src1[i], src2[i]);
- else
- src1[i] = std::max(src1[i], src2[i]);
+ for (int i = 0; i < 4; i++) {
+ src1[i] = src1[i] * src2[i];
}
set_q_register(Vd, src1);
- break;
+ } else {
+ UNIMPLEMENTED();
}
- case Neon32: {
- uint32_t src1[4], src2[4];
+ break;
+ }
+ case 0xe: {
+ if (instr->Bit(20) == 0 && instr->Bit(4) == 0) {
+ // vcge/vcgt.f32 Qd, Qm, Qn
+ bool ge = instr->Bit(21) == 0;
+ float src1[4], src2[4];
get_q_register(Vn, src1);
get_q_register(Vm, src2);
+ uint32_t dst[4];
for (int i = 0; i < 4; i++) {
- if (min)
- src1[i] = std::min(src1[i], src2[i]);
- else
- src1[i] = std::max(src1[i], src2[i]);
+ if (ge) {
+ dst[i] = src1[i] >= src2[i] ? 0xFFFFFFFFu : 0;
+ } else {
+ dst[i] = src1[i] > src2[i] ? 0xFFFFFFFFu : 0;
+ }
}
- set_q_register(Vd, src1);
- break;
+ set_q_register(Vd, dst);
+ } else {
+ UNIMPLEMENTED();
}
- default:
- UNREACHABLE();
- break;
+ break;
}
- } else {
- UNIMPLEMENTED();
+ default:
+ UNREACHABLE();
+ break;
}
break;
+ }
case 7:
if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
(instr->Bit(4) == 1)) {
@@ -4831,6 +5229,45 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
} else {
UNIMPLEMENTED();
}
+ } else if (instr->Bits(11, 7) == 0 && instr->Bit(4) == 1) {
+ // vshr.u<size> Qd, Qm, shift
+ int size = base::bits::RoundDownToPowerOfTwo32(instr->Bits(21, 16));
+ int shift = 2 * size - instr->Bits(21, 16);
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ NeonSize ns = static_cast<NeonSize>(size / 16);
+ switch (ns) {
+ case Neon8: {
+ uint8_t src[16];
+ get_q_register(Vm, src);
+ for (int i = 0; i < 16; i++) {
+ src[i] >>= shift;
+ }
+ set_q_register(Vd, src);
+ break;
+ }
+ case Neon16: {
+ uint16_t src[8];
+ get_q_register(Vm, src);
+ for (int i = 0; i < 8; i++) {
+ src[i] >>= shift;
+ }
+ set_q_register(Vd, src);
+ break;
+ }
+ case Neon32: {
+ uint32_t src[4];
+ get_q_register(Vm, src);
+ for (int i = 0; i < 4; i++) {
+ src[i] >>= shift;
+ }
+ set_q_register(Vd, src);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
} else {
UNIMPLEMENTED();
}
@@ -5345,6 +5782,207 @@ uintptr_t Simulator::PopAddress() {
return address;
}
+Simulator::LocalMonitor::LocalMonitor()
+ : access_state_(MonitorAccess::Open),
+ tagged_addr_(0),
+ size_(TransactionSize::None) {}
+
+void Simulator::LocalMonitor::Clear() {
+ access_state_ = MonitorAccess::Open;
+ tagged_addr_ = 0;
+ size_ = TransactionSize::None;
+}
+
+void Simulator::LocalMonitor::NotifyLoad(int32_t addr) {
+ if (access_state_ == MonitorAccess::Exclusive) {
+ // A load could cause a cache eviction which will affect the monitor. As a
+ // result, it's most strict to unconditionally clear the local monitor on
+ // load.
+ Clear();
+ }
+}
+
+void Simulator::LocalMonitor::NotifyLoadExcl(int32_t addr,
+ TransactionSize size) {
+ access_state_ = MonitorAccess::Exclusive;
+ tagged_addr_ = addr;
+ size_ = size;
+}
+
+void Simulator::LocalMonitor::NotifyStore(int32_t addr) {
+ if (access_state_ == MonitorAccess::Exclusive) {
+ // It is implementation-defined whether a non-exclusive store to an address
+ // covered by the local monitor during exclusive access transitions to open
+ // or exclusive access. See ARM DDI 0406C.b, A3.4.1.
+ //
+ // However, a store could cause a cache eviction which will affect the
+ // monitor. As a result, it's most strict to unconditionally clear the
+ // local monitor on store.
+ Clear();
+ }
+}
+
+bool Simulator::LocalMonitor::NotifyStoreExcl(int32_t addr,
+ TransactionSize size) {
+ if (access_state_ == MonitorAccess::Exclusive) {
+ // It is allowed for a processor to require that the address matches
+ // exactly (A3.4.5), so this comparison does not mask addr.
+ if (addr == tagged_addr_ && size_ == size) {
+ Clear();
+ return true;
+ } else {
+ // It is implementation-defined whether an exclusive store to a
+ // non-tagged address will update memory. Behavior is unpredictable if
+ // the transaction size of the exclusive store differs from that of the
+ // exclusive load. See ARM DDI 0406C.b, A3.4.5.
+ Clear();
+ return false;
+ }
+ } else {
+ DCHECK(access_state_ == MonitorAccess::Open);
+ return false;
+ }
+}
+
+Simulator::GlobalMonitor::Processor::Processor()
+ : access_state_(MonitorAccess::Open),
+ tagged_addr_(0),
+ next_(nullptr),
+ prev_(nullptr),
+ failure_counter_(0) {}
+
+void Simulator::GlobalMonitor::Processor::Clear_Locked() {
+ access_state_ = MonitorAccess::Open;
+ tagged_addr_ = 0;
+}
+
+void Simulator::GlobalMonitor::Processor::NotifyLoadExcl_Locked(int32_t addr) {
+ access_state_ = MonitorAccess::Exclusive;
+ tagged_addr_ = addr;
+}
+
+void Simulator::GlobalMonitor::Processor::NotifyStore_Locked(
+ int32_t addr, bool is_requesting_processor) {
+ if (access_state_ == MonitorAccess::Exclusive) {
+ // It is implementation-defined whether a non-exclusive store by the
+ // requesting processor to an address covered by the global monitor
+ // during exclusive access transitions to open or exclusive access.
+ //
+ // For any other processor, the access state always transitions to open
+ // access.
+ //
+ // See ARM DDI 0406C.b, A3.4.2.
+ //
+ // However, similar to the local monitor, it is possible that a store
+ // caused a cache eviction, which can affect the montior, so
+ // conservatively, we always clear the monitor.
+ Clear_Locked();
+ }
+}
+
+bool Simulator::GlobalMonitor::Processor::NotifyStoreExcl_Locked(
+ int32_t addr, bool is_requesting_processor) {
+ if (access_state_ == MonitorAccess::Exclusive) {
+ if (is_requesting_processor) {
+ // It is allowed for a processor to require that the address matches
+ // exactly (A3.4.5), so this comparison does not mask addr.
+ if (addr == tagged_addr_) {
+ // The access state for the requesting processor after a successful
+ // exclusive store is implementation-defined, but according to the ARM
+ // DDI, this has no effect on the subsequent operation of the global
+ // monitor.
+ Clear_Locked();
+ // Introduce occasional strex failures. This is to simulate the
+ // behavior of hardware, which can randomly fail due to background
+ // cache evictions.
+ if (failure_counter_++ >= kMaxFailureCounter) {
+ failure_counter_ = 0;
+ return false;
+ } else {
+ return true;
+ }
+ }
+ } else if ((addr & kExclusiveTaggedAddrMask) ==
+ (tagged_addr_ & kExclusiveTaggedAddrMask)) {
+ // Check the masked addresses when responding to a successful lock by
+ // another processor so the implementation is more conservative (i.e. the
+ // granularity of locking is as large as possible.)
+ Clear_Locked();
+ return false;
+ }
+ }
+ return false;
+}
+
+Simulator::GlobalMonitor::GlobalMonitor() : head_(nullptr) {}
+
+void Simulator::GlobalMonitor::NotifyLoadExcl_Locked(int32_t addr,
+ Processor* processor) {
+ processor->NotifyLoadExcl_Locked(addr);
+ PrependProcessor_Locked(processor);
+}
+
+void Simulator::GlobalMonitor::NotifyStore_Locked(int32_t addr,
+ Processor* processor) {
+ // Notify each processor of the store operation.
+ for (Processor* iter = head_; iter; iter = iter->next_) {
+ bool is_requesting_processor = iter == processor;
+ iter->NotifyStore_Locked(addr, is_requesting_processor);
+ }
+}
+
+bool Simulator::GlobalMonitor::NotifyStoreExcl_Locked(int32_t addr,
+ Processor* processor) {
+ DCHECK(IsProcessorInLinkedList_Locked(processor));
+ if (processor->NotifyStoreExcl_Locked(addr, true)) {
+ // Notify the other processors that this StoreExcl succeeded.
+ for (Processor* iter = head_; iter; iter = iter->next_) {
+ if (iter != processor) {
+ iter->NotifyStoreExcl_Locked(addr, false);
+ }
+ }
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool Simulator::GlobalMonitor::IsProcessorInLinkedList_Locked(
+ Processor* processor) const {
+ return head_ == processor || processor->next_ || processor->prev_;
+}
+
+void Simulator::GlobalMonitor::PrependProcessor_Locked(Processor* processor) {
+ if (IsProcessorInLinkedList_Locked(processor)) {
+ return;
+ }
+
+ if (head_) {
+ head_->prev_ = processor;
+ }
+ processor->prev_ = nullptr;
+ processor->next_ = head_;
+ head_ = processor;
+}
+
+void Simulator::GlobalMonitor::RemoveProcessor(Processor* processor) {
+ base::LockGuard<base::Mutex> lock_guard(&mutex);
+ if (!IsProcessorInLinkedList_Locked(processor)) {
+ return;
+ }
+
+ if (processor->prev_) {
+ processor->prev_->next_ = processor->next_;
+ } else {
+ head_ = processor->next_;
+ }
+ if (processor->next_) {
+ processor->next_->prev_ = processor->prev_;
+ }
+ processor->prev_ = nullptr;
+ processor->next_ = nullptr;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index 48c2d0f44a..39d9b7f65c 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -14,6 +14,8 @@
#define V8_ARM_SIMULATOR_ARM_H_
#include "src/allocation.h"
+#include "src/base/lazy-instance.h"
+#include "src/base/platform/mutex.h"
#if !defined(USE_SIMULATOR)
// Running without a simulator on a native arm platform.
@@ -302,19 +304,27 @@ class Simulator {
void PrintStopInfo(uint32_t code);
// Read and write memory.
+ // The *Ex functions are exclusive access. The writes return the strex status:
+ // 0 if the write succeeds, and 1 if the write fails.
inline uint8_t ReadBU(int32_t addr);
inline int8_t ReadB(int32_t addr);
+ uint8_t ReadExBU(int32_t addr);
inline void WriteB(int32_t addr, uint8_t value);
inline void WriteB(int32_t addr, int8_t value);
+ int WriteExB(int32_t addr, uint8_t value);
inline uint16_t ReadHU(int32_t addr, Instruction* instr);
inline int16_t ReadH(int32_t addr, Instruction* instr);
+ uint16_t ReadExHU(int32_t addr, Instruction* instr);
// Note: Overloaded on the sign of the value.
inline void WriteH(int32_t addr, uint16_t value, Instruction* instr);
inline void WriteH(int32_t addr, int16_t value, Instruction* instr);
+ int WriteExH(int32_t addr, uint16_t value, Instruction* instr);
inline int ReadW(int32_t addr, Instruction* instr);
+ int ReadExW(int32_t addr, Instruction* instr);
inline void WriteW(int32_t addr, int value, Instruction* instr);
+ int WriteExW(int32_t addr, int value, Instruction* instr);
int32_t* ReadDW(int32_t addr);
void WriteDW(int32_t addr, int32_t value1, int32_t value2);
@@ -437,6 +447,94 @@ class Simulator {
char* desc;
};
StopCountAndDesc watched_stops_[kNumOfWatchedStops];
+
+ // Syncronization primitives. See ARM DDI 0406C.b, A2.9.
+ enum class MonitorAccess {
+ Open,
+ Exclusive,
+ };
+
+ enum class TransactionSize {
+ None = 0,
+ Byte = 1,
+ HalfWord = 2,
+ Word = 4,
+ };
+
+ // The least-significant bits of the address are ignored. The number of bits
+ // is implementation-defined, between 3 and 11. See ARM DDI 0406C.b, A3.4.3.
+ static const int32_t kExclusiveTaggedAddrMask = ~((1 << 11) - 1);
+
+ class LocalMonitor {
+ public:
+ LocalMonitor();
+
+ // These functions manage the state machine for the local monitor, but do
+ // not actually perform loads and stores. NotifyStoreExcl only returns
+ // true if the exclusive store is allowed; the global monitor will still
+ // have to be checked to see whether the memory should be updated.
+ void NotifyLoad(int32_t addr);
+ void NotifyLoadExcl(int32_t addr, TransactionSize size);
+ void NotifyStore(int32_t addr);
+ bool NotifyStoreExcl(int32_t addr, TransactionSize size);
+
+ private:
+ void Clear();
+
+ MonitorAccess access_state_;
+ int32_t tagged_addr_;
+ TransactionSize size_;
+ };
+
+ class GlobalMonitor {
+ public:
+ GlobalMonitor();
+
+ class Processor {
+ public:
+ Processor();
+
+ private:
+ friend class GlobalMonitor;
+ // These functions manage the state machine for the global monitor, but do
+ // not actually perform loads and stores.
+ void Clear_Locked();
+ void NotifyLoadExcl_Locked(int32_t addr);
+ void NotifyStore_Locked(int32_t addr, bool is_requesting_processor);
+ bool NotifyStoreExcl_Locked(int32_t addr, bool is_requesting_processor);
+
+ MonitorAccess access_state_;
+ int32_t tagged_addr_;
+ Processor* next_;
+ Processor* prev_;
+ // A strex can fail due to background cache evictions. Rather than
+ // simulating this, we'll just occasionally introduce cases where an
+ // exclusive store fails. This will happen once after every
+ // kMaxFailureCounter exclusive stores.
+ static const int kMaxFailureCounter = 5;
+ int failure_counter_;
+ };
+
+ // Exposed so it can be accessed by Simulator::{Read,Write}Ex*.
+ base::Mutex mutex;
+
+ void NotifyLoadExcl_Locked(int32_t addr, Processor* processor);
+ void NotifyStore_Locked(int32_t addr, Processor* processor);
+ bool NotifyStoreExcl_Locked(int32_t addr, Processor* processor);
+
+ // Called when the simulator is destroyed.
+ void RemoveProcessor(Processor* processor);
+
+ private:
+ bool IsProcessorInLinkedList_Locked(Processor* processor) const;
+ void PrependProcessor_Locked(Processor* processor);
+
+ Processor* head_;
+ };
+
+ LocalMonitor local_monitor_;
+ GlobalMonitor::Processor global_monitor_processor_;
+ static base::LazyInstance<GlobalMonitor>::type global_monitor_;
};
diff --git a/deps/v8/src/arm64/assembler-arm64-inl.h b/deps/v8/src/arm64/assembler-arm64-inl.h
index a639e3e7ac..5242387a42 100644
--- a/deps/v8/src/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/assembler-arm64-inl.h
@@ -8,7 +8,7 @@
#include "src/arm64/assembler-arm64.h"
#include "src/assembler.h"
#include "src/debug/debug.h"
-
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/arm64/assembler-arm64.h
index a55f8138f2..460ac44d7a 100644
--- a/deps/v8/src/arm64/assembler-arm64.h
+++ b/deps/v8/src/arm64/assembler-arm64.h
@@ -198,6 +198,7 @@ struct Register : public CPURegister {
};
static const bool kSimpleFPAliasing = true;
+static const bool kSimdMaskRegisters = false;
struct FPRegister : public CPURegister {
enum Code {
diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc
index b9ab39c22b..082565f20c 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.cc
+++ b/deps/v8/src/arm64/code-stubs-arm64.cc
@@ -163,9 +163,6 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left,
// Call runtime on identical symbols since we need to throw a TypeError.
__ Cmp(right_type, SYMBOL_TYPE);
__ B(eq, slow);
- // Call runtime on identical SIMD values since we must throw a TypeError.
- __ Cmp(right_type, SIMD128_VALUE_TYPE);
- __ B(eq, slow);
} else if (cond == eq) {
__ JumpIfHeapNumber(right, &heap_number);
} else {
@@ -177,9 +174,6 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left,
// Call runtime on identical symbols since we need to throw a TypeError.
__ Cmp(right_type, SYMBOL_TYPE);
__ B(eq, slow);
- // Call runtime on identical SIMD values since we must throw a TypeError.
- __ Cmp(right_type, SIMD128_VALUE_TYPE);
- __ B(eq, slow);
// Normally here we fall through to return_equal, but undefined is
// special: (undefined == undefined) == true, but
// (undefined <= undefined) == false! See ECMAScript 11.8.5.
@@ -1139,10 +1133,10 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Fmov(fp_zero, 0.0);
// Build an entry frame (see layout below).
- int marker = type();
+ StackFrame::Type marker = type();
int64_t bad_frame_pointer = -1L; // Bad frame pointer to fail if it is used.
__ Mov(x13, bad_frame_pointer);
- __ Mov(x12, Smi::FromInt(marker));
+ __ Mov(x12, StackFrame::TypeToMarker(marker));
__ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate()));
__ Ldr(x10, MemOperand(x11));
@@ -1158,12 +1152,12 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Ldr(x11, MemOperand(x10));
__ Cbnz(x11, &non_outermost_js);
__ Str(fp, MemOperand(x10));
- __ Mov(x12, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ __ Mov(x12, StackFrame::OUTERMOST_JSENTRY_FRAME);
__ Push(x12);
__ B(&done);
__ Bind(&non_outermost_js);
// We spare one instruction by pushing xzr since the marker is 0.
- DCHECK(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME) == NULL);
+ DCHECK(StackFrame::INNER_JSENTRY_FRAME == 0);
__ Push(xzr);
__ Bind(&done);
@@ -1245,7 +1239,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Check if the current stack frame is marked as the outermost JS frame.
Label non_outermost_js_2;
__ Pop(x10);
- __ Cmp(x10, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ __ Cmp(x10, StackFrame::OUTERMOST_JSENTRY_FRAME);
__ B(ne, &non_outermost_js_2);
__ Mov(x11, ExternalReference(js_entry_sp));
__ Str(xzr, MemOperand(x11));
@@ -1268,56 +1262,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Ret();
}
-
-void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
- Label miss;
- Register receiver = LoadDescriptor::ReceiverRegister();
- // Ensure that the vector and slot registers won't be clobbered before
- // calling the miss handler.
- DCHECK(!AreAliased(x10, x11, LoadWithVectorDescriptor::VectorRegister(),
- LoadWithVectorDescriptor::SlotRegister()));
-
- NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10,
- x11, &miss);
-
- __ Bind(&miss);
- PropertyAccessCompiler::TailCallBuiltin(
- masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
-}
-
-
-void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
- // Return address is in lr.
- Label miss;
-
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register index = LoadDescriptor::NameRegister();
- Register result = x0;
- Register scratch = x10;
- DCHECK(!scratch.is(receiver) && !scratch.is(index));
- DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
- result.is(LoadWithVectorDescriptor::SlotRegister()));
-
- // StringCharAtGenerator doesn't use the result register until it's passed
- // the different miss possibilities. If it did, we would have a conflict
- // when FLAG_vector_ics is true.
- StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
- &miss, // When not a string.
- &miss, // When not a number.
- &miss, // When index out of range.
- RECEIVER_IS_STRING);
- char_at_generator.GenerateFast(masm);
- __ Ret();
-
- StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
-
- __ Bind(&miss);
- PropertyAccessCompiler::TailCallBuiltin(
- masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
-}
-
-
void RegExpExecStub::Generate(MacroAssembler* masm) {
#ifdef V8_INTERPRETED_REGEXP
__ TailCallRuntime(Runtime::kRegExpExec);
@@ -1445,7 +1389,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// (6) External string. Make it, offset-wise, look like a sequential string.
// Go to (4).
// (7) Short external string or not a string? If yes, bail out to runtime.
- // (8) Sliced string. Replace subject with parent. Go to (1).
+ // (8) Sliced or thin string. Replace subject with parent. Go to (1).
Label check_underlying; // (1)
Label seq_string; // (4)
@@ -1479,6 +1423,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// (2) Sequential or cons? If not, go to (5).
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+ STATIC_ASSERT(kThinStringTag > kExternalStringTag);
STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
__ Cmp(string_representation, kExternalStringTag);
@@ -1506,10 +1451,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// before entering the exit frame.
__ SmiUntag(x1, x10);
- // The third bit determines the string encoding in string_type.
- STATIC_ASSERT(kOneByteStringTag == 0x04);
+ // The fourth bit determines the string encoding in string_type.
+ STATIC_ASSERT(kOneByteStringTag == 0x08);
STATIC_ASSERT(kTwoByteStringTag == 0x00);
- STATIC_ASSERT(kStringEncodingMask == 0x04);
+ STATIC_ASSERT(kStringEncodingMask == 0x08);
// Find the code object based on the assumptions above.
// kDataOneByteCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset
@@ -1517,7 +1462,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(JSRegExp::kDataOneByteCodeOffset + kPointerSize ==
JSRegExp::kDataUC16CodeOffset);
__ Mov(x10, kPointerSize);
- // We will need the encoding later: Latin1 = 0x04
+ // We will need the encoding later: Latin1 = 0x08
// UC16 = 0x00
__ Ands(string_encoding, string_type, kStringEncodingMask);
__ CzeroX(x10, ne);
@@ -1565,10 +1510,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Ldr(length, UntagSmiFieldMemOperand(subject, String::kLengthOffset));
// Handle UC16 encoding, two bytes make one character.
- // string_encoding: if Latin1: 0x04
+ // string_encoding: if Latin1: 0x08
// if UC16: 0x00
- STATIC_ASSERT(kStringEncodingMask == 0x04);
- __ Ubfx(string_encoding, string_encoding, 2, 1);
+ STATIC_ASSERT(kStringEncodingMask == 0x08);
+ __ Ubfx(string_encoding, string_encoding, 3, 1);
__ Eor(string_encoding, string_encoding, 1);
// string_encoding: if Latin1: 0
// if UC16: 1
@@ -1781,11 +1726,18 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
kShortExternalStringMask | kIsNotStringMask,
&runtime);
- // (8) Sliced string. Replace subject with parent.
+ // (8) Sliced or thin string. Replace subject with parent.
+ Label thin_string;
+ __ Cmp(string_representation, kThinStringTag);
+ __ B(eq, &thin_string);
__ Ldr(sliced_string_offset,
UntagSmiFieldMemOperand(subject, SlicedString::kOffsetOffset));
__ Ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
__ B(&check_underlying); // Go to (1).
+
+ __ bind(&thin_string);
+ __ Ldr(subject, FieldMemOperand(subject, ThinString::kActualOffset));
+ __ B(&check_underlying); // Go to (1).
#endif
}
@@ -1963,212 +1915,6 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
-// Note: feedback_vector and slot are clobbered after the call.
-static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
- Register slot) {
- __ Add(feedback_vector, feedback_vector,
- Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
- __ Add(feedback_vector, feedback_vector,
- Operand(FixedArray::kHeaderSize + kPointerSize));
- __ Ldr(slot, FieldMemOperand(feedback_vector, 0));
- __ Add(slot, slot, Operand(Smi::FromInt(1)));
- __ Str(slot, FieldMemOperand(feedback_vector, 0));
-}
-
-void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
- // x0 - number of arguments
- // x1 - function
- // x3 - slot id
- // x2 - vector
- // x4 - allocation site (loaded from vector[slot])
- Register function = x1;
- Register feedback_vector = x2;
- Register index = x3;
- Register allocation_site = x4;
- Register scratch = x5;
-
- __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, scratch);
- __ Cmp(function, scratch);
- __ B(ne, miss);
-
- // Increment the call count for monomorphic function calls.
- IncrementCallCount(masm, feedback_vector, index);
-
- // Set up arguments for the array constructor stub.
- Register allocation_site_arg = feedback_vector;
- Register new_target_arg = index;
- __ Mov(allocation_site_arg, allocation_site);
- __ Mov(new_target_arg, function);
- ArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
-}
-
-
-void CallICStub::Generate(MacroAssembler* masm) {
- ASM_LOCATION("CallICStub");
-
- // x0 - number of arguments
- // x1 - function
- // x3 - slot id (Smi)
- // x2 - vector
- Label extra_checks_or_miss, call, call_function, call_count_incremented;
-
- Register function = x1;
- Register feedback_vector = x2;
- Register index = x3;
-
- // The checks. First, does x1 match the recorded monomorphic target?
- __ Add(x4, feedback_vector,
- Operand::UntagSmiAndScale(index, kPointerSizeLog2));
- __ Ldr(x4, FieldMemOperand(x4, FixedArray::kHeaderSize));
-
- // We don't know that we have a weak cell. We might have a private symbol
- // or an AllocationSite, but the memory is safe to examine.
- // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
- // FixedArray.
- // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
- // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
- // computed, meaning that it can't appear to be a pointer. If the low bit is
- // 0, then hash is computed, but the 0 bit prevents the field from appearing
- // to be a pointer.
- STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
- STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
- WeakCell::kValueOffset &&
- WeakCell::kValueOffset == Symbol::kHashFieldSlot);
-
- __ Ldr(x5, FieldMemOperand(x4, WeakCell::kValueOffset));
- __ Cmp(x5, function);
- __ B(ne, &extra_checks_or_miss);
-
- // The compare above could have been a SMI/SMI comparison. Guard against this
- // convincing us that we have a monomorphic JSFunction.
- __ JumpIfSmi(function, &extra_checks_or_miss);
-
- __ Bind(&call_function);
-
- // Increment the call count for monomorphic function calls.
- IncrementCallCount(masm, feedback_vector, index);
-
- __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
- tail_call_mode()),
- RelocInfo::CODE_TARGET);
-
- __ bind(&extra_checks_or_miss);
- Label uninitialized, miss, not_allocation_site;
-
- __ JumpIfRoot(x4, Heap::kmegamorphic_symbolRootIndex, &call);
-
- __ Ldr(x5, FieldMemOperand(x4, HeapObject::kMapOffset));
- __ JumpIfNotRoot(x5, Heap::kAllocationSiteMapRootIndex, &not_allocation_site);
-
- HandleArrayCase(masm, &miss);
-
- __ bind(&not_allocation_site);
-
- // The following cases attempt to handle MISS cases without going to the
- // runtime.
- if (FLAG_trace_ic) {
- __ jmp(&miss);
- }
-
- // TODO(mvstanton): the code below is effectively disabled. Investigate.
- __ JumpIfRoot(x4, Heap::kuninitialized_symbolRootIndex, &miss);
-
- // We are going megamorphic. If the feedback is a JSFunction, it is fine
- // to handle it here. More complex cases are dealt with in the runtime.
- __ AssertNotSmi(x4);
- __ JumpIfNotObjectType(x4, x5, x5, JS_FUNCTION_TYPE, &miss);
- __ Add(x4, feedback_vector,
- Operand::UntagSmiAndScale(index, kPointerSizeLog2));
- __ LoadRoot(x5, Heap::kmegamorphic_symbolRootIndex);
- __ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize));
-
- __ Bind(&call);
-
- // Increment the call count for megamorphic function calls.
- IncrementCallCount(masm, feedback_vector, index);
-
- __ Bind(&call_count_incremented);
- __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
- RelocInfo::CODE_TARGET);
-
- __ bind(&uninitialized);
-
- // We are going monomorphic, provided we actually have a JSFunction.
- __ JumpIfSmi(function, &miss);
-
- // Goto miss case if we do not have a function.
- __ JumpIfNotObjectType(function, x5, x5, JS_FUNCTION_TYPE, &miss);
-
- // Make sure the function is not the Array() function, which requires special
- // behavior on MISS.
- __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, x5);
- __ Cmp(function, x5);
- __ B(eq, &miss);
-
- // Make sure the function belongs to the same native context.
- __ Ldr(x4, FieldMemOperand(function, JSFunction::kContextOffset));
- __ Ldr(x4, ContextMemOperand(x4, Context::NATIVE_CONTEXT_INDEX));
- __ Ldr(x5, NativeContextMemOperand());
- __ Cmp(x4, x5);
- __ B(ne, &miss);
-
- // Store the function. Use a stub since we need a frame for allocation.
- // x2 - vector
- // x3 - slot
- // x1 - function
- // x0 - number of arguments
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- CreateWeakCellStub create_stub(masm->isolate());
- __ SmiTag(x0);
- __ Push(x0);
- __ Push(feedback_vector, index);
-
- __ Push(cp, function);
- __ CallStub(&create_stub);
- __ Pop(cp, function);
-
- __ Pop(feedback_vector, index);
- __ Pop(x0);
- __ SmiUntag(x0);
- }
-
- __ B(&call_function);
-
- // We are here because tracing is on or we encountered a MISS case we can't
- // handle here.
- __ bind(&miss);
- GenerateMiss(masm);
-
- // The runtime increments the call count in the vector for us.
- __ B(&call_count_incremented);
-}
-
-
-void CallICStub::GenerateMiss(MacroAssembler* masm) {
- ASM_LOCATION("CallICStub[Miss]");
-
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve the number of arguments as Smi.
- __ SmiTag(x0);
-
- // Push the receiver and the function and feedback info.
- __ Push(x0, x1, x2, x3);
-
- // Call the entry.
- __ CallRuntime(Runtime::kCallIC_Miss);
-
- // Move result to edi and exit the internal frame.
- __ Mov(x1, x0);
-
- // Restore number of arguments.
- __ Pop(x0);
- __ SmiUntag(x0);
-}
-
-
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
// If the receiver is a smi trigger the non-string case.
if (check_mode_ == RECEIVER_IS_UNKNOWN) {
@@ -2254,38 +2000,6 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
}
-
-void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
- __ JumpIfNotSmi(code_, &slow_case_);
- __ Cmp(code_, Smi::FromInt(String::kMaxOneByteCharCode));
- __ B(hi, &slow_case_);
-
- __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
- // At this point code register contains smi tagged one-byte char code.
- __ Add(result_, result_, Operand::UntagSmiAndScale(code_, kPointerSizeLog2));
- __ Ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
- __ JumpIfRoot(result_, Heap::kUndefinedValueRootIndex, &slow_case_);
- __ Bind(&exit_);
-}
-
-
-void StringCharFromCodeGenerator::GenerateSlow(
- MacroAssembler* masm,
- const RuntimeCallHelper& call_helper) {
- __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
-
- __ Bind(&slow_case_);
- call_helper.BeforeCall(masm);
- __ Push(code_);
- __ CallRuntime(Runtime::kStringCharFromCode);
- __ Mov(result_, x0);
- call_helper.AfterCall(masm);
- __ B(&exit_);
-
- __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
-}
-
-
void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
// Inputs are in x0 (lhs) and x1 (rhs).
DCHECK_EQ(CompareICState::BOOLEAN, state());
@@ -2966,12 +2680,6 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
__ Ret();
}
-void CallICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadFeedbackVector(x2);
- CallICStub stub(isolate(), state());
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
-}
-
// The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
// a "Push lr" instruction, followed by a call.
static const unsigned int kProfileEntryHookCallSize =
@@ -3554,581 +3262,6 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
GenerateCase(masm, FAST_ELEMENTS);
}
-void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x1 : function
- // -- cp : context
- // -- fp : frame pointer
- // -- lr : return address
- // -----------------------------------
- __ AssertFunction(x1);
-
- // Make x2 point to the JavaScript frame.
- __ Mov(x2, fp);
- if (skip_stub_frame()) {
- // For Ignition we need to skip the handler/stub frame to reach the
- // JavaScript frame for the function.
- __ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
- }
- if (FLAG_debug_code) {
- Label ok;
- __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kFunctionOffset));
- __ Cmp(x3, x1);
- __ B(eq, &ok);
- __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
- __ Bind(&ok);
- }
-
- // Check if we have rest parameters (only possible if we have an
- // arguments adaptor frame below the function frame).
- Label no_rest_parameters;
- __ Ldr(x2, MemOperand(x2, CommonFrameConstants::kCallerFPOffset));
- __ Ldr(x3, MemOperand(x2, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ Cmp(x3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ B(ne, &no_rest_parameters);
-
- // Check if the arguments adaptor frame contains more arguments than
- // specified by the function's internal formal parameter count.
- Label rest_parameters;
- __ Ldrsw(x0, UntagSmiMemOperand(
- x2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ Ldr(x3, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldrsw(
- x3, FieldMemOperand(x3, SharedFunctionInfo::kFormalParameterCountOffset));
- __ Subs(x0, x0, x3);
- __ B(gt, &rest_parameters);
-
- // Return an empty rest parameter array.
- __ Bind(&no_rest_parameters);
- {
- // ----------- S t a t e -------------
- // -- cp : context
- // -- lr : return address
- // -----------------------------------
-
- // Allocate an empty rest parameter array.
- Label allocate, done_allocate;
- __ Allocate(JSArray::kSize, x0, x1, x2, &allocate, NO_ALLOCATION_FLAGS);
- __ Bind(&done_allocate);
-
- // Setup the rest parameter array in x0.
- __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, x1);
- __ Str(x1, FieldMemOperand(x0, JSArray::kMapOffset));
- __ LoadRoot(x1, Heap::kEmptyFixedArrayRootIndex);
- __ Str(x1, FieldMemOperand(x0, JSArray::kPropertiesOffset));
- __ Str(x1, FieldMemOperand(x0, JSArray::kElementsOffset));
- __ Mov(x1, Smi::kZero);
- __ Str(x1, FieldMemOperand(x0, JSArray::kLengthOffset));
- STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
- __ Ret();
-
- // Fall back to %AllocateInNewSpace.
- __ Bind(&allocate);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(Smi::FromInt(JSArray::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- }
- __ B(&done_allocate);
- }
-
- __ Bind(&rest_parameters);
- {
- // Compute the pointer to the first rest parameter (skippping the receiver).
- __ Add(x2, x2, Operand(x0, LSL, kPointerSizeLog2));
- __ Add(x2, x2, StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize);
-
- // ----------- S t a t e -------------
- // -- cp : context
- // -- x0 : number of rest parameters
- // -- x1 : function
- // -- x2 : pointer to first rest parameters
- // -- lr : return address
- // -----------------------------------
-
- // Allocate space for the rest parameter array plus the backing store.
- Label allocate, done_allocate;
- __ Mov(x6, JSArray::kSize + FixedArray::kHeaderSize);
- __ Add(x6, x6, Operand(x0, LSL, kPointerSizeLog2));
- __ Allocate(x6, x3, x4, x5, &allocate, NO_ALLOCATION_FLAGS);
- __ Bind(&done_allocate);
-
- // Compute arguments.length in x6.
- __ SmiTag(x6, x0);
-
- // Setup the elements array in x3.
- __ LoadRoot(x1, Heap::kFixedArrayMapRootIndex);
- __ Str(x1, FieldMemOperand(x3, FixedArray::kMapOffset));
- __ Str(x6, FieldMemOperand(x3, FixedArray::kLengthOffset));
- __ Add(x4, x3, FixedArray::kHeaderSize);
- {
- Label loop, done_loop;
- __ Add(x0, x4, Operand(x0, LSL, kPointerSizeLog2));
- __ Bind(&loop);
- __ Cmp(x4, x0);
- __ B(eq, &done_loop);
- __ Ldr(x5, MemOperand(x2, 0 * kPointerSize));
- __ Str(x5, FieldMemOperand(x4, 0 * kPointerSize));
- __ Sub(x2, x2, Operand(1 * kPointerSize));
- __ Add(x4, x4, Operand(1 * kPointerSize));
- __ B(&loop);
- __ Bind(&done_loop);
- }
-
- // Setup the rest parameter array in x0.
- __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, x1);
- __ Str(x1, FieldMemOperand(x0, JSArray::kMapOffset));
- __ LoadRoot(x1, Heap::kEmptyFixedArrayRootIndex);
- __ Str(x1, FieldMemOperand(x0, JSArray::kPropertiesOffset));
- __ Str(x3, FieldMemOperand(x0, JSArray::kElementsOffset));
- __ Str(x6, FieldMemOperand(x0, JSArray::kLengthOffset));
- STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
- __ Ret();
-
- // Fall back to %AllocateInNewSpace (if not too big).
- Label too_big_for_new_space;
- __ Bind(&allocate);
- __ Cmp(x6, Operand(kMaxRegularHeapObjectSize));
- __ B(gt, &too_big_for_new_space);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(x0);
- __ SmiTag(x6);
- __ Push(x0, x2, x6);
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- __ Mov(x3, x0);
- __ Pop(x2, x0);
- __ SmiUntag(x0);
- }
- __ B(&done_allocate);
-
- // Fall back to %NewRestParameter.
- __ Bind(&too_big_for_new_space);
- __ Push(x1);
- __ TailCallRuntime(Runtime::kNewRestParameter);
- }
-}
-
-
-void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x1 : function
- // -- cp : context
- // -- fp : frame pointer
- // -- lr : return address
- // -----------------------------------
- __ AssertFunction(x1);
-
- // Make x6 point to the JavaScript frame.
- __ Mov(x6, fp);
- if (skip_stub_frame()) {
- // For Ignition we need to skip the handler/stub frame to reach the
- // JavaScript frame for the function.
- __ Ldr(x6, MemOperand(x6, StandardFrameConstants::kCallerFPOffset));
- }
- if (FLAG_debug_code) {
- Label ok;
- __ Ldr(x3, MemOperand(x6, StandardFrameConstants::kFunctionOffset));
- __ Cmp(x3, x1);
- __ B(eq, &ok);
- __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
- __ Bind(&ok);
- }
-
- // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
- __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldrsw(
- x2, FieldMemOperand(x2, SharedFunctionInfo::kFormalParameterCountOffset));
- __ Add(x3, x6, Operand(x2, LSL, kPointerSizeLog2));
- __ Add(x3, x3, Operand(StandardFrameConstants::kCallerSPOffset));
- __ SmiTag(x2);
-
- // x1 : function
- // x2 : number of parameters (tagged)
- // x3 : parameters pointer
- // x6 : JavaScript frame pointer
- //
- // Returns pointer to result object in x0.
-
- // Make an untagged copy of the parameter count.
- // Note: arg_count_smi is an alias of param_count_smi.
- Register function = x1;
- Register arg_count_smi = x2;
- Register param_count_smi = x2;
- Register recv_arg = x3;
- Register param_count = x7;
- __ SmiUntag(param_count, param_count_smi);
-
- // Check if the calling frame is an arguments adaptor frame.
- Register caller_fp = x11;
- Register caller_ctx = x12;
- Label runtime;
- Label adaptor_frame, try_allocate;
- __ Ldr(caller_fp, MemOperand(x6, StandardFrameConstants::kCallerFPOffset));
- __ Ldr(
- caller_ctx,
- MemOperand(caller_fp, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ B(eq, &adaptor_frame);
-
- // No adaptor, parameter count = argument count.
-
- // x1 function function pointer
- // x2 arg_count_smi number of function arguments (smi)
- // x3 recv_arg pointer to receiver arguments
- // x4 mapped_params number of mapped params, min(params, args) (uninit)
- // x7 param_count number of function parameters
- // x11 caller_fp caller's frame pointer
- // x14 arg_count number of function arguments (uninit)
-
- Register arg_count = x14;
- Register mapped_params = x4;
- __ Mov(arg_count, param_count);
- __ Mov(mapped_params, param_count);
- __ B(&try_allocate);
-
- // We have an adaptor frame. Patch the parameters pointer.
- __ Bind(&adaptor_frame);
- __ Ldr(arg_count_smi,
- MemOperand(caller_fp,
- ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(arg_count, arg_count_smi);
- __ Add(x10, caller_fp, Operand(arg_count, LSL, kPointerSizeLog2));
- __ Add(recv_arg, x10, StandardFrameConstants::kCallerSPOffset);
-
- // Compute the mapped parameter count = min(param_count, arg_count)
- __ Cmp(param_count, arg_count);
- __ Csel(mapped_params, param_count, arg_count, lt);
-
- __ Bind(&try_allocate);
-
- // x0 alloc_obj pointer to allocated objects: param map, backing
- // store, arguments (uninit)
- // x1 function function pointer
- // x2 arg_count_smi number of function arguments (smi)
- // x3 recv_arg pointer to receiver arguments
- // x4 mapped_params number of mapped parameters, min(params, args)
- // x7 param_count number of function parameters
- // x10 size size of objects to allocate (uninit)
- // x14 arg_count number of function arguments
-
- // Compute the size of backing store, parameter map, and arguments object.
- // 1. Parameter map, has two extra words containing context and backing
- // store.
- const int kParameterMapHeaderSize =
- FixedArray::kHeaderSize + 2 * kPointerSize;
-
- // Calculate the parameter map size, assuming it exists.
- Register size = x10;
- __ Mov(size, Operand(mapped_params, LSL, kPointerSizeLog2));
- __ Add(size, size, kParameterMapHeaderSize);
-
- // If there are no mapped parameters, set the running size total to zero.
- // Otherwise, use the parameter map size calculated earlier.
- __ Cmp(mapped_params, 0);
- __ CzeroX(size, eq);
-
- // 2. Add the size of the backing store and arguments object.
- __ Add(size, size, Operand(arg_count, LSL, kPointerSizeLog2));
- __ Add(size, size, FixedArray::kHeaderSize + JSSloppyArgumentsObject::kSize);
-
- // Do the allocation of all three objects in one go. Assign this to x0, as it
- // will be returned to the caller.
- Register alloc_obj = x0;
- __ Allocate(size, alloc_obj, x11, x12, &runtime, NO_ALLOCATION_FLAGS);
-
- // Get the arguments boilerplate from the current (global) context.
-
- // x0 alloc_obj pointer to allocated objects (param map, backing
- // store, arguments)
- // x1 function function pointer
- // x2 arg_count_smi number of function arguments (smi)
- // x3 recv_arg pointer to receiver arguments
- // x4 mapped_params number of mapped parameters, min(params, args)
- // x7 param_count number of function parameters
- // x11 sloppy_args_map offset to args (or aliased args) map (uninit)
- // x14 arg_count number of function arguments
-
- Register global_ctx = x10;
- Register sloppy_args_map = x11;
- Register aliased_args_map = x10;
- __ Ldr(global_ctx, NativeContextMemOperand());
-
- __ Ldr(sloppy_args_map,
- ContextMemOperand(global_ctx, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
- __ Ldr(
- aliased_args_map,
- ContextMemOperand(global_ctx, Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX));
- __ Cmp(mapped_params, 0);
- __ CmovX(sloppy_args_map, aliased_args_map, ne);
-
- // Copy the JS object part.
- __ Str(sloppy_args_map, FieldMemOperand(alloc_obj, JSObject::kMapOffset));
- __ LoadRoot(x10, Heap::kEmptyFixedArrayRootIndex);
- __ Str(x10, FieldMemOperand(alloc_obj, JSObject::kPropertiesOffset));
- __ Str(x10, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
-
- // Set up the callee in-object property.
- __ AssertNotSmi(function);
- __ Str(function,
- FieldMemOperand(alloc_obj, JSSloppyArgumentsObject::kCalleeOffset));
-
- // Use the length and set that as an in-object property.
- __ Str(arg_count_smi,
- FieldMemOperand(alloc_obj, JSSloppyArgumentsObject::kLengthOffset));
-
- // Set up the elements pointer in the allocated arguments object.
- // If we allocated a parameter map, "elements" will point there, otherwise
- // it will point to the backing store.
-
- // x0 alloc_obj pointer to allocated objects (param map, backing
- // store, arguments)
- // x1 function function pointer
- // x2 arg_count_smi number of function arguments (smi)
- // x3 recv_arg pointer to receiver arguments
- // x4 mapped_params number of mapped parameters, min(params, args)
- // x5 elements pointer to parameter map or backing store (uninit)
- // x6 backing_store pointer to backing store (uninit)
- // x7 param_count number of function parameters
- // x14 arg_count number of function arguments
-
- Register elements = x5;
- __ Add(elements, alloc_obj, JSSloppyArgumentsObject::kSize);
- __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
-
- // Initialize parameter map. If there are no mapped arguments, we're done.
- Label skip_parameter_map;
- __ Cmp(mapped_params, 0);
- // Set up backing store address, because it is needed later for filling in
- // the unmapped arguments.
- Register backing_store = x6;
- __ CmovX(backing_store, elements, eq);
- __ B(eq, &skip_parameter_map);
-
- __ LoadRoot(x10, Heap::kSloppyArgumentsElementsMapRootIndex);
- __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset));
- __ Add(x10, mapped_params, 2);
- __ SmiTag(x10);
- __ Str(x10, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ Str(cp, FieldMemOperand(elements,
- FixedArray::kHeaderSize + 0 * kPointerSize));
- __ Add(x10, elements, Operand(mapped_params, LSL, kPointerSizeLog2));
- __ Add(x10, x10, kParameterMapHeaderSize);
- __ Str(x10, FieldMemOperand(elements,
- FixedArray::kHeaderSize + 1 * kPointerSize));
-
- // Copy the parameter slots and the holes in the arguments.
- // We need to fill in mapped_parameter_count slots. Then index the context,
- // where parameters are stored in reverse order, at:
- //
- // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS + parameter_count - 1
- //
- // The mapped parameter thus needs to get indices:
- //
- // MIN_CONTEXT_SLOTS + parameter_count - 1 ..
- // MIN_CONTEXT_SLOTS + parameter_count - mapped_parameter_count
- //
- // We loop from right to left.
-
- // x0 alloc_obj pointer to allocated objects (param map, backing
- // store, arguments)
- // x1 function function pointer
- // x2 arg_count_smi number of function arguments (smi)
- // x3 recv_arg pointer to receiver arguments
- // x4 mapped_params number of mapped parameters, min(params, args)
- // x5 elements pointer to parameter map or backing store (uninit)
- // x6 backing_store pointer to backing store (uninit)
- // x7 param_count number of function parameters
- // x11 loop_count parameter loop counter (uninit)
- // x12 index parameter index (smi, uninit)
- // x13 the_hole hole value (uninit)
- // x14 arg_count number of function arguments
-
- Register loop_count = x11;
- Register index = x12;
- Register the_hole = x13;
- Label parameters_loop, parameters_test;
- __ Mov(loop_count, mapped_params);
- __ Add(index, param_count, static_cast<int>(Context::MIN_CONTEXT_SLOTS));
- __ Sub(index, index, mapped_params);
- __ SmiTag(index);
- __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
- __ Add(backing_store, elements, Operand(loop_count, LSL, kPointerSizeLog2));
- __ Add(backing_store, backing_store, kParameterMapHeaderSize);
-
- __ B(&parameters_test);
-
- __ Bind(&parameters_loop);
- __ Sub(loop_count, loop_count, 1);
- __ Mov(x10, Operand(loop_count, LSL, kPointerSizeLog2));
- __ Add(x10, x10, kParameterMapHeaderSize - kHeapObjectTag);
- __ Str(index, MemOperand(elements, x10));
- __ Sub(x10, x10, kParameterMapHeaderSize - FixedArray::kHeaderSize);
- __ Str(the_hole, MemOperand(backing_store, x10));
- __ Add(index, index, Smi::FromInt(1));
- __ Bind(&parameters_test);
- __ Cbnz(loop_count, &parameters_loop);
-
- __ Bind(&skip_parameter_map);
- // Copy arguments header and remaining slots (if there are any.)
- __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex);
- __ Str(x10, FieldMemOperand(backing_store, FixedArray::kMapOffset));
- __ Str(arg_count_smi, FieldMemOperand(backing_store,
- FixedArray::kLengthOffset));
-
- // x0 alloc_obj pointer to allocated objects (param map, backing
- // store, arguments)
- // x1 function function pointer
- // x2 arg_count_smi number of function arguments (smi)
- // x3 recv_arg pointer to receiver arguments
- // x4 mapped_params number of mapped parameters, min(params, args)
- // x6 backing_store pointer to backing store (uninit)
- // x14 arg_count number of function arguments
-
- Label arguments_loop, arguments_test;
- __ Mov(x10, mapped_params);
- __ Sub(recv_arg, recv_arg, Operand(x10, LSL, kPointerSizeLog2));
- __ B(&arguments_test);
-
- __ Bind(&arguments_loop);
- __ Sub(recv_arg, recv_arg, kPointerSize);
- __ Ldr(x11, MemOperand(recv_arg));
- __ Add(x12, backing_store, Operand(x10, LSL, kPointerSizeLog2));
- __ Str(x11, FieldMemOperand(x12, FixedArray::kHeaderSize));
- __ Add(x10, x10, 1);
-
- __ Bind(&arguments_test);
- __ Cmp(x10, arg_count);
- __ B(lt, &arguments_loop);
-
- __ Ret();
-
- // Do the runtime call to allocate the arguments object.
- __ Bind(&runtime);
- __ Push(function, recv_arg, arg_count_smi);
- __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
-void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x1 : function
- // -- cp : context
- // -- fp : frame pointer
- // -- lr : return address
- // -----------------------------------
- __ AssertFunction(x1);
-
- // Make x2 point to the JavaScript frame.
- __ Mov(x2, fp);
- if (skip_stub_frame()) {
- // For Ignition we need to skip the handler/stub frame to reach the
- // JavaScript frame for the function.
- __ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
- }
- if (FLAG_debug_code) {
- Label ok;
- __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kFunctionOffset));
- __ Cmp(x3, x1);
- __ B(eq, &ok);
- __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
- __ Bind(&ok);
- }
-
- // Check if we have an arguments adaptor frame below the function frame.
- Label arguments_adaptor, arguments_done;
- __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
- __ Ldr(x4, MemOperand(x3, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ Cmp(x4, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ B(eq, &arguments_adaptor);
- {
- __ Ldr(x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldrsw(x0, FieldMemOperand(
- x4, SharedFunctionInfo::kFormalParameterCountOffset));
- __ Add(x2, x2, Operand(x0, LSL, kPointerSizeLog2));
- __ Add(x2, x2, StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize);
- }
- __ B(&arguments_done);
- __ Bind(&arguments_adaptor);
- {
- __ Ldrsw(x0, UntagSmiMemOperand(
- x3, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ Add(x2, x3, Operand(x0, LSL, kPointerSizeLog2));
- __ Add(x2, x2, StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize);
- }
- __ Bind(&arguments_done);
-
- // ----------- S t a t e -------------
- // -- cp : context
- // -- x0 : number of rest parameters
- // -- x1 : function
- // -- x2 : pointer to first rest parameters
- // -- lr : return address
- // -----------------------------------
-
- // Allocate space for the strict arguments object plus the backing store.
- Label allocate, done_allocate;
- __ Mov(x6, JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize);
- __ Add(x6, x6, Operand(x0, LSL, kPointerSizeLog2));
- __ Allocate(x6, x3, x4, x5, &allocate, NO_ALLOCATION_FLAGS);
- __ Bind(&done_allocate);
-
- // Compute arguments.length in x6.
- __ SmiTag(x6, x0);
-
- // Setup the elements array in x3.
- __ LoadRoot(x1, Heap::kFixedArrayMapRootIndex);
- __ Str(x1, FieldMemOperand(x3, FixedArray::kMapOffset));
- __ Str(x6, FieldMemOperand(x3, FixedArray::kLengthOffset));
- __ Add(x4, x3, FixedArray::kHeaderSize);
- {
- Label loop, done_loop;
- __ Add(x0, x4, Operand(x0, LSL, kPointerSizeLog2));
- __ Bind(&loop);
- __ Cmp(x4, x0);
- __ B(eq, &done_loop);
- __ Ldr(x5, MemOperand(x2, 0 * kPointerSize));
- __ Str(x5, FieldMemOperand(x4, 0 * kPointerSize));
- __ Sub(x2, x2, Operand(1 * kPointerSize));
- __ Add(x4, x4, Operand(1 * kPointerSize));
- __ B(&loop);
- __ Bind(&done_loop);
- }
-
- // Setup the strict arguments object in x0.
- __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, x1);
- __ Str(x1, FieldMemOperand(x0, JSStrictArgumentsObject::kMapOffset));
- __ LoadRoot(x1, Heap::kEmptyFixedArrayRootIndex);
- __ Str(x1, FieldMemOperand(x0, JSStrictArgumentsObject::kPropertiesOffset));
- __ Str(x3, FieldMemOperand(x0, JSStrictArgumentsObject::kElementsOffset));
- __ Str(x6, FieldMemOperand(x0, JSStrictArgumentsObject::kLengthOffset));
- STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
- __ Ret();
-
- // Fall back to %AllocateInNewSpace (if not too big).
- Label too_big_for_new_space;
- __ Bind(&allocate);
- __ Cmp(x6, Operand(kMaxRegularHeapObjectSize));
- __ B(gt, &too_big_for_new_space);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(x0);
- __ SmiTag(x6);
- __ Push(x0, x2, x6);
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- __ Mov(x3, x0);
- __ Pop(x2, x0);
- __ SmiUntag(x0);
- }
- __ B(&done_allocate);
-
- // Fall back to %NewStrictArguments.
- __ Bind(&too_big_for_new_space);
- __ Push(x1);
- __ TailCallRuntime(Runtime::kNewStrictArguments);
-}
-
-
// The number of register that CallApiFunctionAndReturn will need to save on
// the stack. The space for these registers need to be allocated in the
// ExitFrame before calling CallApiFunctionAndReturn.
diff --git a/deps/v8/src/arm64/codegen-arm64.cc b/deps/v8/src/arm64/codegen-arm64.cc
index e6ddcfadb8..4fb9a2d939 100644
--- a/deps/v8/src/arm64/codegen-arm64.cc
+++ b/deps/v8/src/arm64/codegen-arm64.cc
@@ -99,6 +99,9 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
Register result,
Label* call_runtime) {
DCHECK(string.Is64Bits() && index.Is32Bits() && result.Is64Bits());
+ Label indirect_string_loaded;
+ __ Bind(&indirect_string_loaded);
+
// Fetch the instance type of the receiver into result register.
__ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
__ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
@@ -108,17 +111,25 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ TestAndBranchIfAllClear(result, kIsIndirectStringMask, &check_sequential);
// Dispatch on the indirect string shape: slice or cons.
- Label cons_string;
- __ TestAndBranchIfAllClear(result, kSlicedNotConsMask, &cons_string);
+ Label cons_string, thin_string;
+ __ And(result, result, kStringRepresentationMask);
+ __ Cmp(result, kConsStringTag);
+ __ B(eq, &cons_string);
+ __ Cmp(result, kThinStringTag);
+ __ B(eq, &thin_string);
// Handle slices.
- Label indirect_string_loaded;
__ Ldr(result.W(),
UntagSmiFieldMemOperand(string, SlicedString::kOffsetOffset));
__ Ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
__ Add(index, index, result.W());
__ B(&indirect_string_loaded);
+ // Handle thin strings.
+ __ Bind(&thin_string);
+ __ Ldr(string, FieldMemOperand(string, ThinString::kActualOffset));
+ __ B(&indirect_string_loaded);
+
// Handle cons strings.
// Check whether the right hand side is the empty string (i.e. if
// this is really a flat string in a cons string). If that is not
@@ -129,10 +140,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ JumpIfNotRoot(result, Heap::kempty_stringRootIndex, call_runtime);
// Get the first of the two strings and load its instance type.
__ Ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
-
- __ Bind(&indirect_string_loaded);
- __ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
- __ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+ __ B(&indirect_string_loaded);
// Distinguish sequential and external strings. Only these two string
// representations can reach here (slices and flat cons strings have been
diff --git a/deps/v8/src/arm64/deoptimizer-arm64.cc b/deps/v8/src/arm64/deoptimizer-arm64.cc
index c1d04ac3fb..0bedceb6ed 100644
--- a/deps/v8/src/arm64/deoptimizer-arm64.cc
+++ b/deps/v8/src/arm64/deoptimizer-arm64.cc
@@ -78,7 +78,7 @@ void Deoptimizer::SetPlatformCompiledStubRegisters(
void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
- double double_value = input_->GetDoubleRegister(i);
+ Float64 double_value = input_->GetDoubleRegister(i);
output_frame->SetDoubleRegister(i, double_value);
}
}
diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.cc b/deps/v8/src/arm64/interface-descriptors-arm64.cc
index b0a80c636f..988f7e935d 100644
--- a/deps/v8/src/arm64/interface-descriptors-arm64.cc
+++ b/deps/v8/src/arm64/interface-descriptors-arm64.cc
@@ -71,30 +71,6 @@ void FastNewClosureDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void FastNewRestParameterDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x1: function
- Register registers[] = {x1};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x1: function
- Register registers[] = {x1};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x1: function
- Register registers[] = {x1};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return x0; }
@@ -163,15 +139,13 @@ void CallFunctionDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
+void CallICTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {x1, x3};
+ Register registers[] = {x1, x0, x3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
+void CallICDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {x1, x0, x3, x2};
data->InitializePlatformSpecific(arraysize(registers), registers);
@@ -200,6 +174,13 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void CallForwardVarargsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // x1: target
+ // x2: start index (to supported rest parameters)
+ Register registers[] = {x1, x2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void ConstructStubDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -236,13 +217,12 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(0, nullptr, nullptr);
}
-#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
- void Allocate##Type##Descriptor::InitializePlatformSpecific( \
- CallInterfaceDescriptorData* data) { \
- data->InitializePlatformSpecific(0, nullptr, nullptr); \
- }
-SIMD128_TYPES(SIMD128_ALLOC_DESC)
-#undef SIMD128_ALLOC_DESC
+void ArrayConstructorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite
+ Register registers[] = {x1, x3, x0, x2};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -461,6 +441,14 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ x1, // loaded new FP
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
index c02a4dd234..549db5d048 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -1780,23 +1780,6 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
JumpToExternalReference(ExternalReference(fid, isolate()));
}
-
-void MacroAssembler::InitializeNewString(Register string,
- Register length,
- Heap::RootListIndex map_index,
- Register scratch1,
- Register scratch2) {
- DCHECK(!AreAliased(string, length, scratch1, scratch2));
- LoadRoot(scratch2, map_index);
- SmiTag(scratch1, length);
- Str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
-
- Mov(scratch2, String::kEmptyHashField);
- Str(scratch1, FieldMemOperand(string, String::kLengthOffset));
- Str(scratch2, FieldMemOperand(string, String::kHashFieldOffset));
-}
-
-
int MacroAssembler::ActivationFrameAlignment() {
#if V8_HOST_ARCH_ARM64
// Running on the real platform. Use the alignment as mandated by the local
@@ -2618,7 +2601,7 @@ void MacroAssembler::StubPrologue(StackFrame::Type type, int frame_slots) {
UseScratchRegisterScope temps(this);
frame_slots -= TypedFrameConstants::kFixedSlotCountAboveFp;
Register temp = temps.AcquireX();
- Mov(temp, Smi::FromInt(type));
+ Mov(temp, StackFrame::TypeToMarker(type));
Push(lr, fp);
Mov(fp, StackPointer());
Claim(frame_slots);
@@ -2636,8 +2619,8 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
Ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- Ldr(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
- Ldr(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
+ Ldr(vector, FieldMemOperand(vector, JSFunction::kFeedbackVectorOffset));
+ Ldr(vector, FieldMemOperand(vector, Cell::kValueOffset));
}
@@ -2655,7 +2638,7 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
if (type == StackFrame::INTERNAL) {
DCHECK(jssp.Is(StackPointer()));
- Mov(type_reg, Smi::FromInt(type));
+ Mov(type_reg, StackFrame::TypeToMarker(type));
Push(lr, fp);
Push(type_reg);
Mov(code_reg, Operand(CodeObject()));
@@ -2667,17 +2650,17 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
// jssp[0] : [code object]
} else if (type == StackFrame::WASM_COMPILED) {
DCHECK(csp.Is(StackPointer()));
- Mov(type_reg, Smi::FromInt(type));
- Push(xzr, lr);
- Push(fp, type_reg);
- Add(fp, csp, TypedFrameConstants::kFixedFrameSizeFromFp);
- // csp[3] for alignment
- // csp[2] : lr
- // csp[1] : fp
- // csp[0] : type
+ Mov(type_reg, StackFrame::TypeToMarker(type));
+ Push(lr, fp);
+ Mov(fp, csp);
+ Push(type_reg, xzr);
+ // csp[3] : lr
+ // csp[2] : fp
+ // csp[1] : type
+ // csp[0] : for alignment
} else {
DCHECK(jssp.Is(StackPointer()));
- Mov(type_reg, Smi::FromInt(type));
+ Mov(type_reg, StackFrame::TypeToMarker(type));
Push(lr, fp);
Push(type_reg);
Add(fp, jssp, TypedFrameConstants::kFixedFrameSizeFromFp);
@@ -2689,12 +2672,19 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
- DCHECK(jssp.Is(StackPointer()));
- // Drop the execution stack down to the frame pointer and restore
- // the caller frame pointer and return address.
- Mov(jssp, fp);
- AssertStackConsistency();
- Pop(fp, lr);
+ if (type == StackFrame::WASM_COMPILED) {
+ DCHECK(csp.Is(StackPointer()));
+ Mov(csp, fp);
+ AssertStackConsistency();
+ Pop(fp, lr);
+ } else {
+ DCHECK(jssp.Is(StackPointer()));
+ // Drop the execution stack down to the frame pointer and restore
+ // the caller frame pointer and return address.
+ Mov(jssp, fp);
+ AssertStackConsistency();
+ Pop(fp, lr);
+ }
}
@@ -2741,7 +2731,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
// Set up the new stack frame.
Push(lr, fp);
Mov(fp, StackPointer());
- Mov(scratch, Smi::FromInt(frame_type));
+ Mov(scratch, StackFrame::TypeToMarker(frame_type));
Push(scratch);
Push(xzr);
Mov(scratch, Operand(CodeObject()));
@@ -2888,16 +2878,17 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
}
-
-void MacroAssembler::DebugBreak() {
- Mov(x0, 0);
- Mov(x1, ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
- CEntryStub ces(isolate(), 1);
- DCHECK(AllowThisStubCall(&ces));
- Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
+void MacroAssembler::MaybeDropFrames() {
+ // Check whether we need to drop frames to restart a function on the stack.
+ ExternalReference restart_fp =
+ ExternalReference::debug_restart_fp_address(isolate());
+ Mov(x1, Operand(restart_fp));
+ Ldr(x1, MemOperand(x1));
+ Tst(x1, x1);
+ Jump(isolate()->builtins()->FrameDropperTrampoline(), RelocInfo::CODE_TARGET,
+ ne);
}
-
void MacroAssembler::PushStackHandler() {
DCHECK(jssp.Is(StackPointer()));
// Adjust this code if the asserts don't hold.
@@ -3407,32 +3398,6 @@ void MacroAssembler::GetMapConstructor(Register result, Register map,
Bind(&done);
}
-
-void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
- Register scratch, Label* miss) {
- DCHECK(!AreAliased(function, result, scratch));
-
- // Get the prototype or initial map from the function.
- Ldr(result,
- FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
- // If the prototype or initial map is the hole, don't return it and simply
- // miss the cache instead. This will allow us to allocate a prototype object
- // on-demand in the runtime system.
- JumpIfRoot(result, Heap::kTheHoleValueRootIndex, miss);
-
- // If the function does not have an initial map, we're done.
- Label done;
- JumpIfNotObjectType(result, scratch, scratch, MAP_TYPE, &done);
-
- // Get the prototype from the initial map.
- Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
-
- // All done.
- Bind(&done);
-}
-
-
void MacroAssembler::PushRoot(Heap::RootListIndex index) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
@@ -4645,9 +4610,8 @@ void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg,
}
}
-
InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
- : reg_(NoReg), smi_check_(NULL) {
+ : reg_(NoReg), smi_check_delta_(0), smi_check_(NULL) {
InstructionSequence* inline_data = InstructionSequence::At(info);
DCHECK(inline_data->IsInlineData());
if (inline_data->IsInlineData()) {
@@ -4659,9 +4623,9 @@ InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
uint32_t payload32 = static_cast<uint32_t>(payload);
int reg_code = RegisterBits::decode(payload32);
reg_ = Register::XRegFromCode(reg_code);
- int smi_check_delta = DeltaBits::decode(payload32);
- DCHECK(smi_check_delta != 0);
- smi_check_ = inline_data->preceding(smi_check_delta);
+ smi_check_delta_ = DeltaBits::decode(payload32);
+ DCHECK_NE(0, smi_check_delta_);
+ smi_check_ = inline_data->preceding(smi_check_delta_);
}
}
}
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h
index b3308d349e..560a824c04 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64.h
@@ -1300,12 +1300,9 @@ class MacroAssembler : public Assembler {
MacroAssembler* masm_;
};
- // ---------------------------------------------------------------------------
- // Debugger Support
-
- void DebugBreak();
+ // Frame restart support
+ void MaybeDropFrames();
- // ---------------------------------------------------------------------------
// Exception handling
// Push a new stack handler and link into stack handler chain.
@@ -1371,9 +1368,6 @@ class MacroAssembler : public Assembler {
void GetMapConstructor(Register result, Register map, Register temp,
Register temp2);
- void TryGetFunctionPrototype(Register function, Register result,
- Register scratch, Label* miss);
-
// Compare object type for heap object. heap_object contains a non-Smi
// whose object type should be compared with the given type. This both
// sets the flags and leaves the object type in the type_reg register.
@@ -2002,12 +1996,6 @@ class MacroAssembler : public Assembler {
CPURegList tmp_list_;
CPURegList fptmp_list_;
- void InitializeNewString(Register string,
- Register length,
- Heap::RootListIndex map_index,
- Register scratch1,
- Register scratch2);
-
public:
// Far branches resolving.
//
@@ -2157,6 +2145,8 @@ class InlineSmiCheckInfo {
return smi_check_;
}
+ int SmiCheckDelta() const { return smi_check_delta_; }
+
// Use MacroAssembler::InlineData to emit information about patchable inline
// SMI checks. The caller may specify 'reg' as NoReg and an unbound 'site' to
// indicate that there is no inline SMI check. Note that 'reg' cannot be csp.
@@ -2174,6 +2164,7 @@ class InlineSmiCheckInfo {
private:
Register reg_;
+ int smi_check_delta_;
Instruction* smi_check_;
// Fields in the data encoded by InlineData.
diff --git a/deps/v8/src/asmjs/asm-js.cc b/deps/v8/src/asmjs/asm-js.cc
index b4026b0b19..95d1e8a64f 100644
--- a/deps/v8/src/asmjs/asm-js.cc
+++ b/deps/v8/src/asmjs/asm-js.cc
@@ -9,11 +9,13 @@
#include "src/asmjs/asm-typer.h"
#include "src/asmjs/asm-wasm-builder.h"
#include "src/assert-scope.h"
+#include "src/base/platform/elapsed-timer.h"
#include "src/compilation-info.h"
#include "src/execution.h"
#include "src/factory.h"
#include "src/handles.h"
#include "src/isolate.h"
+#include "src/objects-inl.h"
#include "src/objects.h"
#include "src/parsing/parse-info.h"
@@ -186,11 +188,14 @@ MaybeHandle<FixedArray> AsmJs::CompileAsmViaWasm(CompilationInfo* info) {
base::ElapsedTimer compile_timer;
compile_timer.Start();
- MaybeHandle<JSObject> compiled = wasm::CreateModuleObjectFromBytes(
- info->isolate(), module->begin(), module->end(), &thrower,
- internal::wasm::kAsmJsOrigin, info->script(), asm_offsets_vec);
+ MaybeHandle<JSObject> compiled = SyncCompileTranslatedAsmJs(
+ info->isolate(), &thrower,
+ wasm::ModuleWireBytes(module->begin(), module->end()), info->script(),
+ asm_offsets_vec);
DCHECK(!compiled.is_null());
double compile_time = compile_timer.Elapsed().InMillisecondsF();
+ DCHECK_GE(module->end(), module->begin());
+ uintptr_t wasm_size = module->end() - module->begin();
wasm::AsmTyper::StdlibSet uses = builder.typer()->StdlibUses();
Handle<FixedArray> uses_array =
@@ -216,10 +221,10 @@ MaybeHandle<FixedArray> AsmJs::CompileAsmViaWasm(CompilationInfo* info) {
if (FLAG_predictable) {
length = base::OS::SNPrintF(text, arraysize(text), "success");
} else {
- length =
- base::OS::SNPrintF(text, arraysize(text),
- "success, asm->wasm: %0.3f ms, compile: %0.3f ms",
- asm_wasm_time, compile_time);
+ length = base::OS::SNPrintF(
+ text, arraysize(text),
+ "success, asm->wasm: %0.3f ms, compile: %0.3f ms, %" PRIuPTR " bytes",
+ asm_wasm_time, compile_time, wasm_size);
}
DCHECK_NE(-1, length);
USE(length);
@@ -271,22 +276,18 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(i::Isolate* isolate,
foreign, NONE);
}
- i::MaybeHandle<i::JSObject> maybe_module_object =
- i::wasm::WasmModule::Instantiate(isolate, &thrower, module, ffi_object,
- memory);
+ i::MaybeHandle<i::Object> maybe_module_object =
+ i::wasm::SyncInstantiate(isolate, &thrower, module, ffi_object, memory);
if (maybe_module_object.is_null()) {
return MaybeHandle<Object>();
}
+ i::Handle<i::Object> module_object = maybe_module_object.ToHandleChecked();
i::Handle<i::Name> init_name(isolate->factory()->InternalizeUtf8String(
wasm::AsmWasmBuilder::foreign_init_name));
+ i::Handle<i::Object> init =
+ i::Object::GetProperty(module_object, init_name).ToHandleChecked();
- i::Handle<i::Object> module_object = maybe_module_object.ToHandleChecked();
- i::MaybeHandle<i::Object> maybe_init =
- i::Object::GetProperty(module_object, init_name);
- DCHECK(!maybe_init.is_null());
-
- i::Handle<i::Object> init = maybe_init.ToHandleChecked();
i::Handle<i::Object> undefined(isolate->heap()->undefined_value(), isolate);
i::Handle<i::Object>* foreign_args_array =
new i::Handle<i::Object>[foreign_globals->length()];
@@ -345,7 +346,9 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(i::Isolate* isolate,
MessageHandler::ReportMessage(isolate, &location, message);
}
- return module_object;
+ Handle<String> exports_name =
+ isolate->factory()->InternalizeUtf8String("exports");
+ return i::Object::GetProperty(module_object, exports_name);
}
} // namespace internal
diff --git a/deps/v8/src/asmjs/asm-typer.cc b/deps/v8/src/asmjs/asm-typer.cc
index 2389551872..1d18360db1 100644
--- a/deps/v8/src/asmjs/asm-typer.cc
+++ b/deps/v8/src/asmjs/asm-typer.cc
@@ -19,6 +19,7 @@
#include "src/codegen.h"
#include "src/globals.h"
#include "src/messages.h"
+#include "src/objects-inl.h"
#include "src/utils.h"
#include "src/vector.h"
@@ -385,6 +386,10 @@ AsmTyper::VariableInfo* AsmTyper::ImportLookup(Property* import) {
return obj_info;
}
+ if (!key->IsPropertyName()) {
+ return nullptr;
+ }
+
std::unique_ptr<char[]> aname = key->AsPropertyName()->ToCString();
ObjectTypeMap::iterator i = stdlib->find(std::string(aname.get()));
if (i == stdlib->end()) {
@@ -569,6 +574,8 @@ bool AsmTyper::ValidateAfterFunctionsPhase() {
void AsmTyper::ClearFunctionNodeTypes() { function_node_types_.clear(); }
+AsmType* AsmTyper::TriggerParsingError() { FAIL(root_, "Parsing error"); }
+
namespace {
bool IsUseAsmDirective(Statement* first_statement) {
ExpressionStatement* use_asm = first_statement->AsExpressionStatement();
@@ -1219,10 +1226,12 @@ AsmType* AsmTyper::ValidateFunction(FunctionDeclaration* fun_decl) {
if (as_block != nullptr) {
statements = as_block->statements();
} else {
- // We don't check whether AsReturnStatement() below returns non-null --
- // we leave that to the ReturnTypeAnnotations method.
- RECURSE(return_type_ =
- ReturnTypeAnnotations(last_statement->AsReturnStatement()));
+ if (auto* ret_statement = last_statement->AsReturnStatement()) {
+ RECURSE(return_type_ =
+ ReturnTypeAnnotations(ret_statement->expression()));
+ } else {
+ return_type_ = AsmType::Void();
+ }
}
}
} while (return_type_ == AsmType::None());
@@ -2741,15 +2750,8 @@ AsmType* AsmTyper::ParameterTypeAnnotations(Variable* parameter,
}
// 5.2 ReturnTypeAnnotations
-AsmType* AsmTyper::ReturnTypeAnnotations(ReturnStatement* statement) {
- if (statement == nullptr) {
- return AsmType::Void();
- }
-
- auto* ret_expr = statement->expression();
- if (ret_expr == nullptr) {
- return AsmType::Void();
- }
+AsmType* AsmTyper::ReturnTypeAnnotations(Expression* ret_expr) {
+ DCHECK_NOT_NULL(ret_expr);
if (auto* binop = ret_expr->AsBinaryOperation()) {
if (IsDoubleAnnotation(binop)) {
@@ -2757,14 +2759,14 @@ AsmType* AsmTyper::ReturnTypeAnnotations(ReturnStatement* statement) {
} else if (IsIntAnnotation(binop)) {
return AsmType::Signed();
}
- FAIL(statement, "Invalid return type annotation.");
+ FAIL(ret_expr, "Invalid return type annotation.");
}
if (auto* call = ret_expr->AsCall()) {
if (IsCallToFround(call)) {
return AsmType::Float();
}
- FAIL(statement, "Invalid function call in return statement.");
+ FAIL(ret_expr, "Invalid function call in return statement.");
}
if (auto* literal = ret_expr->AsLiteral()) {
@@ -2783,28 +2785,46 @@ AsmType* AsmTyper::ReturnTypeAnnotations(ReturnStatement* statement) {
// return undefined
return AsmType::Void();
}
- FAIL(statement, "Invalid literal in return statement.");
+ FAIL(ret_expr, "Invalid literal in return statement.");
}
if (auto* proxy = ret_expr->AsVariableProxy()) {
auto* var_info = Lookup(proxy->var());
if (var_info == nullptr) {
- FAIL(statement, "Undeclared identifier in return statement.");
+ FAIL(ret_expr, "Undeclared identifier in return statement.");
}
if (var_info->mutability() != VariableInfo::kConstGlobal) {
- FAIL(statement, "Identifier in return statement is not const.");
+ FAIL(ret_expr, "Identifier in return statement is not const.");
}
if (!var_info->type()->IsReturnType()) {
- FAIL(statement, "Constant in return must be signed, float, or double.");
+ FAIL(ret_expr, "Constant in return must be signed, float, or double.");
}
return var_info->type();
}
- FAIL(statement, "Invalid return type expression.");
+ // NOTE: This is not strictly valid asm.js, but is emitted by some versions of
+ // Emscripten.
+ if (auto* cond = ret_expr->AsConditional()) {
+ AsmType* a = AsmType::None();
+ AsmType* b = AsmType::None();
+ RECURSE(a = ReturnTypeAnnotations(cond->then_expression()));
+ if (a->IsA(AsmType::None())) {
+ return a;
+ }
+ RECURSE(b = ReturnTypeAnnotations(cond->else_expression()));
+ if (b->IsA(AsmType::None())) {
+ return b;
+ }
+ if (a->IsExactly(b)) {
+ return a;
+ }
+ }
+
+ FAIL(ret_expr, "Invalid return type expression.");
}
// 5.4 VariableTypeAnnotations
diff --git a/deps/v8/src/asmjs/asm-typer.h b/deps/v8/src/asmjs/asm-typer.h
index 8ddcb34b0f..965137383e 100644
--- a/deps/v8/src/asmjs/asm-typer.h
+++ b/deps/v8/src/asmjs/asm-typer.h
@@ -82,6 +82,8 @@ class AsmTyper final {
Handle<JSMessageObject> error_message() const { return error_message_; }
const MessageLocation* message_location() const { return &message_location_; }
+ AsmType* TriggerParsingError();
+
AsmType* TypeOf(AstNode* node) const;
AsmType* TypeOf(Variable* v) const;
StandardMember VariableAsStandardMember(Variable* var);
@@ -362,7 +364,7 @@ class AsmTyper final {
AsmType* ParameterTypeAnnotations(Variable* parameter,
Expression* annotation);
// 5.2 ReturnTypeAnnotations
- AsmType* ReturnTypeAnnotations(ReturnStatement* statement);
+ AsmType* ReturnTypeAnnotations(Expression* ret_expr);
// 5.4 VariableTypeAnnotations
// 5.5 GlobalVariableTypeAnnotations
AsmType* VariableTypeAnnotations(
diff --git a/deps/v8/src/asmjs/asm-wasm-builder.cc b/deps/v8/src/asmjs/asm-wasm-builder.cc
index 907e80fe4b..891cba3ef9 100644
--- a/deps/v8/src/asmjs/asm-wasm-builder.cc
+++ b/deps/v8/src/asmjs/asm-wasm-builder.cc
@@ -22,7 +22,9 @@
#include "src/codegen.h"
#include "src/compilation-info.h"
#include "src/compiler.h"
+#include "src/counters.h"
#include "src/isolate.h"
+#include "src/objects-inl.h"
#include "src/parsing/parse-info.h"
namespace v8 {
@@ -36,6 +38,8 @@ namespace wasm {
if (HasStackOverflow()) return; \
} while (false)
+namespace {
+
enum AsmScope { kModuleScope, kInitScope, kFuncScope, kExportScope };
enum ValueFate { kDrop, kLeaveOnStack };
@@ -45,6 +49,10 @@ struct ForeignVariable {
ValueType type;
};
+enum TargetType : uint8_t { NoTarget, BreakTarget, ContinueTarget };
+
+} // namespace
+
class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
public:
AsmWasmBuilderImpl(Isolate* isolate, Zone* zone, CompilationInfo* info,
@@ -99,7 +107,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
foreign_init_function_->EmitGetLocal(static_cast<uint32_t>(pos));
ForeignVariable* fv = &foreign_variables_[pos];
uint32_t index = LookupOrInsertGlobal(fv->var, fv->type);
- foreign_init_function_->EmitWithVarInt(kExprSetGlobal, index);
+ foreign_init_function_->EmitWithVarUint(kExprSetGlobal, index);
}
foreign_init_function_->Emit(kExprEnd);
}
@@ -142,31 +150,36 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
DCHECK_EQ(kModuleScope, scope_);
DCHECK_NULL(current_function_builder_);
FunctionLiteral* old_func = decl->fun();
- Zone zone(isolate_->allocator(), ZONE_NAME);
DeclarationScope* new_func_scope = nullptr;
+ std::unique_ptr<ParseInfo> info;
if (decl->fun()->body() == nullptr) {
// TODO(titzer/bradnelson): Reuse SharedFunctionInfos used here when
// compiling the wasm module.
Handle<SharedFunctionInfo> shared =
Compiler::GetSharedFunctionInfo(decl->fun(), script_, info_);
shared->set_is_toplevel(false);
- ParseInfo info(&zone, script_);
- info.set_shared_info(shared);
- info.set_toplevel(false);
- info.set_language_mode(decl->fun()->scope()->language_mode());
- info.set_allow_lazy_parsing(false);
- info.set_function_literal_id(shared->function_literal_id());
- info.set_ast_value_factory(ast_value_factory_);
- info.set_ast_value_factory_owned(false);
+ info.reset(new ParseInfo(script_));
+ info->set_shared_info(shared);
+ info->set_toplevel(false);
+ info->set_language_mode(decl->fun()->scope()->language_mode());
+ info->set_allow_lazy_parsing(false);
+ info->set_function_literal_id(shared->function_literal_id());
+ info->set_ast_value_factory(ast_value_factory_);
+ info->set_ast_value_factory_owned(false);
// Create fresh function scope to use to parse the function in.
- new_func_scope = new (info.zone()) DeclarationScope(
- info.zone(), decl->fun()->scope()->outer_scope(), FUNCTION_SCOPE);
- info.set_asm_function_scope(new_func_scope);
- if (!Compiler::ParseAndAnalyze(&info)) {
+ new_func_scope = new (info->zone()) DeclarationScope(
+ info->zone(), decl->fun()->scope()->outer_scope(), FUNCTION_SCOPE);
+ info->set_asm_function_scope(new_func_scope);
+ if (!Compiler::ParseAndAnalyze(info.get())) {
+ decl->fun()->scope()->outer_scope()->RemoveInnerScope(new_func_scope);
+ if (isolate_->has_pending_exception()) {
+ isolate_->clear_pending_exception();
+ }
+ typer_->TriggerParsingError();
typer_failed_ = true;
return;
}
- FunctionLiteral* func = info.literal();
+ FunctionLiteral* func = info->literal();
DCHECK_NOT_NULL(func);
decl->set_fun(func);
}
@@ -226,7 +239,8 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
}
}
if (scope_ == kFuncScope) {
- BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprBlock);
+ BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprBlock,
+ BreakTarget);
RECURSE(VisitStatements(stmt->statements()));
} else {
RECURSE(VisitStatements(stmt->statements()));
@@ -239,10 +253,9 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
public:
BlockVisitor(AsmWasmBuilderImpl* builder, BreakableStatement* stmt,
- WasmOpcode opcode)
+ WasmOpcode opcode, TargetType target_type = NoTarget)
: builder_(builder) {
- builder_->breakable_blocks_.push_back(
- std::make_pair(stmt, opcode == kExprLoop));
+ builder_->breakable_blocks_.emplace_back(stmt, target_type);
// block and loops have a type immediate.
builder_->current_function_builder_->EmitWithU8(opcode, kLocalVoid);
}
@@ -290,9 +303,8 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
void VisitIfStatement(IfStatement* stmt) {
DCHECK_EQ(kFuncScope, scope_);
RECURSE(Visit(stmt->condition()));
- current_function_builder_->EmitWithU8(kExprIf, kLocalVoid);
- // WASM ifs come with implement blocks for both arms.
- breakable_blocks_.push_back(std::make_pair(nullptr, false));
+ // Wasm ifs come with implicit blocks for both arms.
+ BlockVisitor block(this, nullptr, kExprIf);
if (stmt->HasThenStatement()) {
RECURSE(Visit(stmt->then_statement()));
}
@@ -300,18 +312,15 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
current_function_builder_->Emit(kExprElse);
RECURSE(Visit(stmt->else_statement()));
}
- current_function_builder_->Emit(kExprEnd);
- breakable_blocks_.pop_back();
}
- void DoBreakOrContinue(BreakableStatement* target, bool is_continue) {
+ void DoBreakOrContinue(BreakableStatement* target, TargetType type) {
DCHECK_EQ(kFuncScope, scope_);
for (int i = static_cast<int>(breakable_blocks_.size()) - 1; i >= 0; --i) {
auto elem = breakable_blocks_.at(i);
- if (elem.first == target && elem.second == is_continue) {
+ if (elem.first == target && elem.second == type) {
int block_distance = static_cast<int>(breakable_blocks_.size() - i - 1);
- current_function_builder_->Emit(kExprBr);
- current_function_builder_->EmitVarInt(block_distance);
+ current_function_builder_->EmitWithVarUint(kExprBr, block_distance);
return;
}
}
@@ -319,11 +328,11 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
}
void VisitContinueStatement(ContinueStatement* stmt) {
- DoBreakOrContinue(stmt->target(), true);
+ DoBreakOrContinue(stmt->target(), ContinueTarget);
}
void VisitBreakStatement(BreakStatement* stmt) {
- DoBreakOrContinue(stmt->target(), false);
+ DoBreakOrContinue(stmt->target(), BreakTarget);
}
void VisitReturnStatement(ReturnStatement* stmt) {
@@ -361,7 +370,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
current_function_builder_->Emit(kExprI32LtS);
current_function_builder_->EmitWithU8(kExprIf, kLocalVoid);
if_depth++;
- breakable_blocks_.push_back(std::make_pair(nullptr, false));
+ breakable_blocks_.emplace_back(nullptr, NoTarget);
HandleCase(node->left, case_to_block, tag, default_block, if_depth);
current_function_builder_->Emit(kExprElse);
}
@@ -371,7 +380,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
current_function_builder_->Emit(kExprI32GtS);
current_function_builder_->EmitWithU8(kExprIf, kLocalVoid);
if_depth++;
- breakable_blocks_.push_back(std::make_pair(nullptr, false));
+ breakable_blocks_.emplace_back(nullptr, NoTarget);
HandleCase(node->right, case_to_block, tag, default_block, if_depth);
current_function_builder_->Emit(kExprElse);
}
@@ -382,8 +391,8 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
current_function_builder_->EmitWithU8(kExprIf, kLocalVoid);
DCHECK(case_to_block.find(node->begin) != case_to_block.end());
current_function_builder_->Emit(kExprBr);
- current_function_builder_->EmitVarInt(1 + if_depth +
- case_to_block[node->begin]);
+ current_function_builder_->EmitVarUint(1 + if_depth +
+ case_to_block[node->begin]);
current_function_builder_->Emit(kExprEnd);
} else {
if (node->begin != 0) {
@@ -394,21 +403,21 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
VisitVariableProxy(tag);
}
current_function_builder_->Emit(kExprBrTable);
- current_function_builder_->EmitVarInt(node->end - node->begin + 1);
+ current_function_builder_->EmitVarUint(node->end - node->begin + 1);
for (int v = node->begin; v <= node->end; ++v) {
if (case_to_block.find(v) != case_to_block.end()) {
uint32_t target = if_depth + case_to_block[v];
- current_function_builder_->EmitVarInt(target);
+ current_function_builder_->EmitVarUint(target);
} else {
uint32_t target = if_depth + default_block;
- current_function_builder_->EmitVarInt(target);
+ current_function_builder_->EmitVarUint(target);
}
if (v == kMaxInt) {
break;
}
}
uint32_t target = if_depth + default_block;
- current_function_builder_->EmitVarInt(target);
+ current_function_builder_->EmitVarUint(target);
}
while (if_depth-- != prev_if_depth) {
@@ -425,7 +434,8 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
if (case_count == 0) {
return;
}
- BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprBlock);
+ BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprBlock,
+ BreakTarget);
ZoneVector<BlockVisitor*> blocks(zone_);
ZoneVector<int32_t> cases(zone_);
ZoneMap<int, unsigned int> case_to_block(zone_);
@@ -455,7 +465,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
if (root->left != nullptr || root->right != nullptr ||
root->begin == root->end) {
current_function_builder_->Emit(kExprBr);
- current_function_builder_->EmitVarInt(default_block);
+ current_function_builder_->EmitVarUint(default_block);
}
}
for (int i = 0; i < case_count; ++i) {
@@ -471,26 +481,28 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
void VisitDoWhileStatement(DoWhileStatement* stmt) {
DCHECK_EQ(kFuncScope, scope_);
- BlockVisitor block(this, stmt->AsBreakableStatement(), kExprBlock);
+ BlockVisitor block(this, stmt->AsBreakableStatement(), kExprBlock,
+ BreakTarget);
BlockVisitor loop(this, stmt->AsBreakableStatement(), kExprLoop);
- RECURSE(Visit(stmt->body()));
+ {
+ BlockVisitor inner_block(this, stmt->AsBreakableStatement(), kExprBlock,
+ ContinueTarget);
+ RECURSE(Visit(stmt->body()));
+ }
RECURSE(Visit(stmt->cond()));
- current_function_builder_->EmitWithU8(kExprIf, kLocalVoid);
- current_function_builder_->EmitWithU8(kExprBr, 1);
- current_function_builder_->Emit(kExprEnd);
+ current_function_builder_->EmitWithU8(kExprBrIf, 0);
}
void VisitWhileStatement(WhileStatement* stmt) {
DCHECK_EQ(kFuncScope, scope_);
- BlockVisitor block(this, stmt->AsBreakableStatement(), kExprBlock);
- BlockVisitor loop(this, stmt->AsBreakableStatement(), kExprLoop);
+ BlockVisitor block(this, stmt->AsBreakableStatement(), kExprBlock,
+ BreakTarget);
+ BlockVisitor loop(this, stmt->AsBreakableStatement(), kExprLoop,
+ ContinueTarget);
RECURSE(Visit(stmt->cond()));
- breakable_blocks_.push_back(std::make_pair(nullptr, false));
- current_function_builder_->EmitWithU8(kExprIf, kLocalVoid);
+ BlockVisitor if_block(this, nullptr, kExprIf);
RECURSE(Visit(stmt->body()));
current_function_builder_->EmitWithU8(kExprBr, 1);
- current_function_builder_->Emit(kExprEnd);
- breakable_blocks_.pop_back();
}
void VisitForStatement(ForStatement* stmt) {
@@ -498,8 +510,10 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
if (stmt->init() != nullptr) {
RECURSE(Visit(stmt->init()));
}
- BlockVisitor block(this, stmt->AsBreakableStatement(), kExprBlock);
- BlockVisitor loop(this, stmt->AsBreakableStatement(), kExprLoop);
+ BlockVisitor block(this, stmt->AsBreakableStatement(), kExprBlock,
+ BreakTarget);
+ BlockVisitor loop(this, stmt->AsBreakableStatement(), kExprLoop,
+ ContinueTarget);
if (stmt->cond() != nullptr) {
RECURSE(Visit(stmt->cond()));
current_function_builder_->Emit(kExprI32Eqz);
@@ -557,8 +571,8 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
void VisitConditional(Conditional* expr) {
DCHECK_EQ(kFuncScope, scope_);
RECURSE(Visit(expr->condition()));
- // WASM ifs come with implicit blocks for both arms.
- breakable_blocks_.push_back(std::make_pair(nullptr, false));
+ // Wasm ifs come with implicit blocks for both arms.
+ breakable_blocks_.emplace_back(nullptr, NoTarget);
ValueTypeCode type;
switch (TypeOf(expr)) {
case kWasmI32:
@@ -645,7 +659,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
ValueType var_type = TypeOf(expr);
DCHECK_NE(kWasmStmt, var_type);
if (var->IsContextSlot()) {
- current_function_builder_->EmitWithVarInt(
+ current_function_builder_->EmitWithVarUint(
kExprGetGlobal, LookupOrInsertGlobal(var, var_type));
} else {
current_function_builder_->EmitGetLocal(
@@ -671,35 +685,26 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
if (type->IsA(AsmType::Signed())) {
int32_t i = 0;
- if (!value->ToInt32(&i)) {
- UNREACHABLE();
- }
- byte code[] = {WASM_I32V(i)};
- current_function_builder_->EmitCode(code, sizeof(code));
+ CHECK(value->ToInt32(&i));
+ current_function_builder_->EmitI32Const(i);
} else if (type->IsA(AsmType::Unsigned()) || type->IsA(AsmType::FixNum())) {
uint32_t u = 0;
- if (!value->ToUint32(&u)) {
- UNREACHABLE();
- }
- int32_t i = static_cast<int32_t>(u);
- byte code[] = {WASM_I32V(i)};
- current_function_builder_->EmitCode(code, sizeof(code));
+ CHECK(value->ToUint32(&u));
+ current_function_builder_->EmitI32Const(bit_cast<int32_t>(u));
} else if (type->IsA(AsmType::Int())) {
// The parser can collapse !0, !1 etc to true / false.
// Allow these as int literals.
if (expr->raw_value()->IsTrue()) {
- byte code[] = {WASM_I32V(1)};
+ byte code[] = {WASM_ONE};
current_function_builder_->EmitCode(code, sizeof(code));
} else if (expr->raw_value()->IsFalse()) {
- byte code[] = {WASM_I32V(0)};
+ byte code[] = {WASM_ZERO};
current_function_builder_->EmitCode(code, sizeof(code));
} else if (expr->raw_value()->IsNumber()) {
// This can happen when -x becomes x * -1 (due to the parser).
int32_t i = 0;
- if (!value->ToInt32(&i) || i != -1) {
- UNREACHABLE();
- }
- byte code[] = {WASM_I32V(i)};
+ CHECK(value->ToInt32(&i) && i == -1);
+ byte code[] = {WASM_I32V_1(-1)};
current_function_builder_->EmitCode(code, sizeof(code));
} else {
UNREACHABLE();
@@ -949,9 +954,9 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
DCHECK_NE(kWasmStmt, var_type);
if (var->IsContextSlot()) {
uint32_t index = LookupOrInsertGlobal(var, var_type);
- current_function_builder_->EmitWithVarInt(kExprSetGlobal, index);
+ current_function_builder_->EmitWithVarUint(kExprSetGlobal, index);
if (fate == kLeaveOnStack) {
- current_function_builder_->EmitWithVarInt(kExprGetGlobal, index);
+ current_function_builder_->EmitWithVarUint(kExprGetGlobal, index);
}
} else {
if (fate == kDrop) {
@@ -1461,7 +1466,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
int parent_pos = returns_value ? parent_binop->position() : pos;
current_function_builder_->AddAsmWasmOffset(pos, parent_pos);
current_function_builder_->Emit(kExprCallFunction);
- current_function_builder_->EmitVarInt(index);
+ current_function_builder_->EmitVarUint(index);
} else {
WasmFunctionBuilder* function = LookupOrInsertFunction(vp->var());
VisitCallArgs(expr);
@@ -1495,8 +1500,8 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
current_function_builder_->AddAsmWasmOffset(expr->position(),
expr->position());
current_function_builder_->Emit(kExprCallIndirect);
- current_function_builder_->EmitVarInt(indices->signature_index);
- current_function_builder_->EmitVarInt(0); // table index
+ current_function_builder_->EmitVarUint(indices->signature_index);
+ current_function_builder_->EmitVarUint(0); // table index
returns_value =
builder_->GetSignature(indices->signature_index)->return_count() >
0;
@@ -1964,7 +1969,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
AsmTyper* typer_;
bool typer_failed_;
bool typer_finished_;
- ZoneVector<std::pair<BreakableStatement*, bool>> breakable_blocks_;
+ ZoneVector<std::pair<BreakableStatement*, TargetType>> breakable_blocks_;
ZoneVector<ForeignVariable> foreign_variables_;
WasmFunctionBuilder* init_function_;
WasmFunctionBuilder* foreign_init_function_;
@@ -1988,6 +1993,9 @@ AsmWasmBuilder::AsmWasmBuilder(CompilationInfo* info)
// TODO(aseemgarg): probably should take zone (to write wasm to) as input so
// that zone in constructor may be thrown away once wasm module is written.
AsmWasmBuilder::Result AsmWasmBuilder::Run(Handle<FixedArray>* foreign_args) {
+ HistogramTimerScope asm_wasm_time_scope(
+ info_->isolate()->counters()->asm_wasm_translation_time());
+
Zone* zone = info_->zone();
AsmWasmBuilderImpl impl(info_->isolate(), zone, info_,
info_->parse_info()->ast_value_factory(),
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index a4d97ec3e6..d945cc4c3e 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -235,17 +235,6 @@ unsigned CpuFeatures::icache_line_size_ = 0;
unsigned CpuFeatures::dcache_line_size_ = 0;
// -----------------------------------------------------------------------------
-// Implementation of Label
-
-int Label::pos() const {
- if (pos_ < 0) return -pos_ - 1;
- if (pos_ > 0) return pos_ - 1;
- UNREACHABLE();
- return 0;
-}
-
-
-// -----------------------------------------------------------------------------
// Implementation of RelocInfoWriter and RelocIterator
//
// Relocation information is written backwards in memory, from high addresses
@@ -319,25 +308,25 @@ const int kCodeWithIdTag = 0;
const int kDeoptReasonTag = 1;
void RelocInfo::update_wasm_memory_reference(
- Address old_base, Address new_base, uint32_t old_size, uint32_t new_size,
- ICacheFlushMode icache_flush_mode) {
- DCHECK(IsWasmMemoryReference(rmode_) || IsWasmMemorySizeReference(rmode_));
- if (IsWasmMemoryReference(rmode_)) {
- Address updated_reference;
- DCHECK_GE(wasm_memory_reference(), old_base);
- updated_reference = new_base + (wasm_memory_reference() - old_base);
- // The reference is not checked here but at runtime. Validity of references
- // may change over time.
- unchecked_update_wasm_memory_reference(updated_reference,
- icache_flush_mode);
- } else if (IsWasmMemorySizeReference(rmode_)) {
- uint32_t current_size_reference = wasm_memory_size_reference();
- uint32_t updated_size_reference =
- new_size + (current_size_reference - old_size);
- unchecked_update_wasm_size(updated_size_reference, icache_flush_mode);
- } else {
- UNREACHABLE();
+ Address old_base, Address new_base, ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsWasmMemoryReference(rmode_));
+ DCHECK_GE(wasm_memory_reference(), old_base);
+ Address updated_reference = new_base + (wasm_memory_reference() - old_base);
+ // The reference is not checked here but at runtime. Validity of references
+ // may change over time.
+ unchecked_update_wasm_memory_reference(updated_reference, icache_flush_mode);
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ Assembler::FlushICache(isolate_, pc_, sizeof(int64_t));
}
+}
+
+void RelocInfo::update_wasm_memory_size(uint32_t old_size, uint32_t new_size,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsWasmMemorySizeReference(rmode_));
+ uint32_t current_size_reference = wasm_memory_size_reference();
+ uint32_t updated_size_reference =
+ new_size + (current_size_reference - old_size);
+ unchecked_update_wasm_size(updated_size_reference, icache_flush_mode);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(isolate_, pc_, sizeof(int64_t));
}
@@ -488,7 +477,8 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
WriteData(rinfo->data());
} else if (RelocInfo::IsConstPool(rmode) ||
RelocInfo::IsVeneerPool(rmode) || RelocInfo::IsDeoptId(rmode) ||
- RelocInfo::IsDeoptPosition(rmode)) {
+ RelocInfo::IsDeoptPosition(rmode) ||
+ RelocInfo::IsWasmProtectedLanding(rmode)) {
WriteIntData(static_cast<int>(rinfo->data()));
}
}
@@ -637,7 +627,8 @@ void RelocIterator::next() {
} else if (RelocInfo::IsConstPool(rmode) ||
RelocInfo::IsVeneerPool(rmode) ||
RelocInfo::IsDeoptId(rmode) ||
- RelocInfo::IsDeoptPosition(rmode)) {
+ RelocInfo::IsDeoptPosition(rmode) ||
+ RelocInfo::IsWasmProtectedLanding(rmode)) {
if (SetMode(rmode)) {
AdvanceReadInt();
return;
@@ -734,8 +725,6 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "no reloc 64";
case EMBEDDED_OBJECT:
return "embedded object";
- case DEBUGGER_STATEMENT:
- return "debugger statement";
case CODE_TARGET:
return "code target";
case CODE_TARGET_WITH_ID:
@@ -782,6 +771,8 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "wasm global value reference";
case WASM_FUNCTION_TABLE_SIZE_REFERENCE:
return "wasm function table size reference";
+ case WASM_PROTECTED_INSTRUCTION_LANDING:
+ return "wasm protected instruction landing";
case NUMBER_OF_MODES:
case PC_JUMP:
UNREACHABLE();
@@ -841,7 +832,6 @@ void RelocInfo::Verify(Isolate* isolate) {
case CELL:
Object::VerifyPointer(target_cell());
break;
- case DEBUGGER_STATEMENT:
case CODE_TARGET_WITH_ID:
case CODE_TARGET: {
// convert inline target address to code object
@@ -880,6 +870,8 @@ void RelocInfo::Verify(Isolate* isolate) {
case WASM_MEMORY_SIZE_REFERENCE:
case WASM_GLOBAL_REFERENCE:
case WASM_FUNCTION_TABLE_SIZE_REFERENCE:
+ case WASM_PROTECTED_INSTRUCTION_LANDING:
+ // TODO(eholk): make sure the protected instruction is in range.
case NONE32:
case NONE64:
break;
@@ -1575,8 +1567,9 @@ ExternalReference ExternalReference::is_tail_call_elimination_enabled_address(
return ExternalReference(isolate->is_tail_call_elimination_enabled_address());
}
-ExternalReference ExternalReference::promise_hook_address(Isolate* isolate) {
- return ExternalReference(isolate->promise_hook_address());
+ExternalReference ExternalReference::promise_hook_or_debug_is_active_address(
+ Isolate* isolate) {
+ return ExternalReference(isolate->promise_hook_or_debug_is_active_address());
}
ExternalReference ExternalReference::debug_is_active_address(
@@ -1589,12 +1582,6 @@ ExternalReference ExternalReference::debug_hook_on_function_call_address(
return ExternalReference(isolate->debug()->hook_on_function_call_address());
}
-ExternalReference ExternalReference::debug_after_break_target_address(
- Isolate* isolate) {
- return ExternalReference(isolate->debug()->after_break_target_address());
-}
-
-
ExternalReference ExternalReference::runtime_function_table_address(
Isolate* isolate) {
return ExternalReference(
@@ -1675,6 +1662,11 @@ ExternalReference ExternalReference::debug_suspended_generator_address(
return ExternalReference(isolate->debug()->suspended_generator_address());
}
+ExternalReference ExternalReference::debug_restart_fp_address(
+ Isolate* isolate) {
+ return ExternalReference(isolate->debug()->restart_fp_address());
+}
+
ExternalReference ExternalReference::fixed_typed_array_base_data_offset() {
return ExternalReference(reinterpret_cast<void*>(
FixedTypedArrayBase::kDataOffset - kHeapObjectTag));
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index cd5867689e..856072f127 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -40,6 +40,7 @@
#include "src/deoptimize-reason.h"
#include "src/globals.h"
#include "src/isolate.h"
+#include "src/label.h"
#include "src/log.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
@@ -272,79 +273,6 @@ class CpuFeatures : public AllStatic {
};
-// -----------------------------------------------------------------------------
-// Labels represent pc locations; they are typically jump or call targets.
-// After declaration, a label can be freely used to denote known or (yet)
-// unknown pc location. Assembler::bind() is used to bind a label to the
-// current pc. A label can be bound only once.
-
-class Label {
- public:
- enum Distance {
- kNear, kFar
- };
-
- INLINE(Label()) {
- Unuse();
- UnuseNear();
- }
-
- INLINE(~Label()) {
- DCHECK(!is_linked());
- DCHECK(!is_near_linked());
- }
-
- INLINE(void Unuse()) { pos_ = 0; }
- INLINE(void UnuseNear()) { near_link_pos_ = 0; }
-
- INLINE(bool is_bound() const) { return pos_ < 0; }
- INLINE(bool is_unused() const) { return pos_ == 0 && near_link_pos_ == 0; }
- INLINE(bool is_linked() const) { return pos_ > 0; }
- INLINE(bool is_near_linked() const) { return near_link_pos_ > 0; }
-
- // Returns the position of bound or linked labels. Cannot be used
- // for unused labels.
- int pos() const;
- int near_link_pos() const { return near_link_pos_ - 1; }
-
- private:
- // pos_ encodes both the binding state (via its sign)
- // and the binding position (via its value) of a label.
- //
- // pos_ < 0 bound label, pos() returns the jump target position
- // pos_ == 0 unused label
- // pos_ > 0 linked label, pos() returns the last reference position
- int pos_;
-
- // Behaves like |pos_| in the "> 0" case, but for near jumps to this label.
- int near_link_pos_;
-
- void bind_to(int pos) {
- pos_ = -pos - 1;
- DCHECK(is_bound());
- }
- void link_to(int pos, Distance distance = kFar) {
- if (distance == kNear) {
- near_link_pos_ = pos + 1;
- DCHECK(is_near_linked());
- } else {
- pos_ = pos + 1;
- DCHECK(is_linked());
- }
- }
-
- friend class Assembler;
- friend class Displacement;
- friend class RegExpMacroAssemblerIrregexp;
-
-#if V8_TARGET_ARCH_ARM64
- // On ARM64, the Assembler keeps track of pointers to Labels to resolve
- // branches to distant targets. Copying labels would confuse the Assembler.
- DISALLOW_COPY_AND_ASSIGN(Label); // NOLINT
-#endif
-};
-
-
enum SaveFPRegsMode { kDontSaveFPRegs, kSaveFPRegs };
enum ArgvMode { kArgvOnStack, kArgvInRegister };
@@ -389,13 +317,13 @@ class RelocInfo {
// Please note the order is important (see IsCodeTarget, IsGCRelocMode).
CODE_TARGET, // Code target which is not any of the above.
CODE_TARGET_WITH_ID,
- DEBUGGER_STATEMENT, // Code target for the debugger statement.
EMBEDDED_OBJECT,
// To relocate pointers into the wasm memory embedded in wasm code
WASM_MEMORY_REFERENCE,
WASM_GLOBAL_REFERENCE,
WASM_MEMORY_SIZE_REFERENCE,
WASM_FUNCTION_TABLE_SIZE_REFERENCE,
+ WASM_PROTECTED_INSTRUCTION_LANDING,
CELL,
// Everything after runtime_entry (inclusive) is not GC'ed.
@@ -437,7 +365,7 @@ class RelocInfo {
FIRST_REAL_RELOC_MODE = CODE_TARGET,
LAST_REAL_RELOC_MODE = VENEER_POOL,
- LAST_CODE_ENUM = DEBUGGER_STATEMENT,
+ LAST_CODE_ENUM = CODE_TARGET_WITH_ID,
LAST_GCED_ENUM = WASM_FUNCTION_TABLE_SIZE_REFERENCE,
FIRST_SHAREABLE_RELOC_MODE = CELL,
};
@@ -513,9 +441,6 @@ class RelocInfo {
static inline bool IsDebugBreakSlotAtTailCall(Mode mode) {
return mode == DEBUG_BREAK_SLOT_AT_TAIL_CALL;
}
- static inline bool IsDebuggerStatement(Mode mode) {
- return mode == DEBUGGER_STATEMENT;
- }
static inline bool IsNone(Mode mode) {
return mode == NONE32 || mode == NONE64;
}
@@ -546,6 +471,9 @@ class RelocInfo {
static inline bool IsWasmPtrReference(Mode mode) {
return mode == WASM_MEMORY_REFERENCE || mode == WASM_GLOBAL_REFERENCE;
}
+ static inline bool IsWasmProtectedLanding(Mode mode) {
+ return mode == WASM_PROTECTED_INSTRUCTION_LANDING;
+ }
static inline int ModeMask(Mode mode) { return 1 << mode; }
@@ -578,7 +506,10 @@ class RelocInfo {
uint32_t wasm_function_table_size_reference();
uint32_t wasm_memory_size_reference();
void update_wasm_memory_reference(
- Address old_base, Address new_base, uint32_t old_size, uint32_t new_size,
+ Address old_base, Address new_base,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+ void update_wasm_memory_size(
+ uint32_t old_size, uint32_t new_size,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void update_wasm_global_reference(
Address old_base, Address new_base,
@@ -1069,7 +1000,8 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference invoke_function_callback(Isolate* isolate);
static ExternalReference invoke_accessor_getter_callback(Isolate* isolate);
- static ExternalReference promise_hook_address(Isolate* isolate);
+ static ExternalReference promise_hook_or_debug_is_active_address(
+ Isolate* isolate);
V8_EXPORT_PRIVATE static ExternalReference runtime_function_table_address(
Isolate* isolate);
@@ -1082,6 +1014,9 @@ class ExternalReference BASE_EMBEDDED {
// Used to check for suspended generator, used for stepping across await call.
static ExternalReference debug_suspended_generator_address(Isolate* isolate);
+ // Used to store the frame pointer to drop to when restarting a frame.
+ static ExternalReference debug_restart_fp_address(Isolate* isolate);
+
#ifndef V8_INTERPRETED_REGEXP
// C functions called from RegExp generated code.
diff --git a/deps/v8/src/assert-scope.cc b/deps/v8/src/assert-scope.cc
index f446ad0895..8754cca5d5 100644
--- a/deps/v8/src/assert-scope.cc
+++ b/deps/v8/src/assert-scope.cc
@@ -6,7 +6,6 @@
#include "src/base/lazy-instance.h"
#include "src/base/platform/platform.h"
-#include "src/debug/debug.h"
#include "src/isolate.h"
#include "src/utils.h"
diff --git a/deps/v8/src/ast/OWNERS b/deps/v8/src/ast/OWNERS
index b4e1473f83..16e048accd 100644
--- a/deps/v8/src/ast/OWNERS
+++ b/deps/v8/src/ast/OWNERS
@@ -5,5 +5,6 @@ bmeurer@chromium.org
littledan@chromium.org
marja@chromium.org
mstarzinger@chromium.org
+neis@chromium.org
rossberg@chromium.org
verwaest@chromium.org
diff --git a/deps/v8/src/ast/ast-literal-reindexer.cc b/deps/v8/src/ast/ast-literal-reindexer.cc
deleted file mode 100644
index 67e180fe42..0000000000
--- a/deps/v8/src/ast/ast-literal-reindexer.cc
+++ /dev/null
@@ -1,322 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/ast/ast-literal-reindexer.h"
-
-#include "src/ast/ast.h"
-#include "src/ast/scopes.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-void AstLiteralReindexer::VisitVariableDeclaration(VariableDeclaration* node) {
- VisitVariableProxy(node->proxy());
-}
-
-
-void AstLiteralReindexer::VisitEmptyStatement(EmptyStatement* node) {}
-
-
-void AstLiteralReindexer::VisitSloppyBlockFunctionStatement(
- SloppyBlockFunctionStatement* node) {
- Visit(node->statement());
-}
-
-
-void AstLiteralReindexer::VisitContinueStatement(ContinueStatement* node) {}
-
-
-void AstLiteralReindexer::VisitBreakStatement(BreakStatement* node) {}
-
-
-void AstLiteralReindexer::VisitDebuggerStatement(DebuggerStatement* node) {}
-
-
-void AstLiteralReindexer::VisitNativeFunctionLiteral(
- NativeFunctionLiteral* node) {}
-
-
-void AstLiteralReindexer::VisitDoExpression(DoExpression* node) {
- Visit(node->block());
- Visit(node->result());
-}
-
-
-void AstLiteralReindexer::VisitLiteral(Literal* node) {}
-
-
-void AstLiteralReindexer::VisitRegExpLiteral(RegExpLiteral* node) {
- UpdateIndex(node);
-}
-
-
-void AstLiteralReindexer::VisitVariableProxy(VariableProxy* node) {}
-
-
-void AstLiteralReindexer::VisitThisFunction(ThisFunction* node) {}
-
-
-void AstLiteralReindexer::VisitSuperPropertyReference(
- SuperPropertyReference* node) {
- Visit(node->this_var());
- Visit(node->home_object());
-}
-
-
-void AstLiteralReindexer::VisitSuperCallReference(SuperCallReference* node) {
- Visit(node->this_var());
- Visit(node->new_target_var());
- Visit(node->this_function_var());
-}
-
-
-void AstLiteralReindexer::VisitRewritableExpression(
- RewritableExpression* node) {
- Visit(node->expression());
-}
-
-
-void AstLiteralReindexer::VisitExpressionStatement(ExpressionStatement* node) {
- Visit(node->expression());
-}
-
-
-void AstLiteralReindexer::VisitReturnStatement(ReturnStatement* node) {
- Visit(node->expression());
-}
-
-
-void AstLiteralReindexer::VisitYield(Yield* node) {
- Visit(node->generator_object());
- Visit(node->expression());
-}
-
-
-void AstLiteralReindexer::VisitThrow(Throw* node) { Visit(node->exception()); }
-
-
-void AstLiteralReindexer::VisitUnaryOperation(UnaryOperation* node) {
- Visit(node->expression());
-}
-
-
-void AstLiteralReindexer::VisitCountOperation(CountOperation* node) {
- Visit(node->expression());
-}
-
-
-void AstLiteralReindexer::VisitBlock(Block* node) {
- VisitStatements(node->statements());
-}
-
-
-void AstLiteralReindexer::VisitFunctionDeclaration(FunctionDeclaration* node) {
- VisitVariableProxy(node->proxy());
- VisitFunctionLiteral(node->fun());
-}
-
-
-void AstLiteralReindexer::VisitCallRuntime(CallRuntime* node) {
- VisitArguments(node->arguments());
-}
-
-
-void AstLiteralReindexer::VisitWithStatement(WithStatement* node) {
- Visit(node->expression());
- Visit(node->statement());
-}
-
-
-void AstLiteralReindexer::VisitDoWhileStatement(DoWhileStatement* node) {
- Visit(node->body());
- Visit(node->cond());
-}
-
-
-void AstLiteralReindexer::VisitWhileStatement(WhileStatement* node) {
- Visit(node->cond());
- Visit(node->body());
-}
-
-
-void AstLiteralReindexer::VisitTryCatchStatement(TryCatchStatement* node) {
- Visit(node->try_block());
- Visit(node->catch_block());
-}
-
-
-void AstLiteralReindexer::VisitTryFinallyStatement(TryFinallyStatement* node) {
- Visit(node->try_block());
- Visit(node->finally_block());
-}
-
-
-void AstLiteralReindexer::VisitProperty(Property* node) {
- Visit(node->key());
- Visit(node->obj());
-}
-
-
-void AstLiteralReindexer::VisitAssignment(Assignment* node) {
- Visit(node->target());
- Visit(node->value());
-}
-
-
-void AstLiteralReindexer::VisitBinaryOperation(BinaryOperation* node) {
- Visit(node->left());
- Visit(node->right());
-}
-
-
-void AstLiteralReindexer::VisitCompareOperation(CompareOperation* node) {
- Visit(node->left());
- Visit(node->right());
-}
-
-
-void AstLiteralReindexer::VisitSpread(Spread* node) {
- // This is reachable because ParserBase::ParseArrowFunctionLiteral calls
- // ReindexLiterals before calling RewriteDestructuringAssignments.
- Visit(node->expression());
-}
-
-
-void AstLiteralReindexer::VisitEmptyParentheses(EmptyParentheses* node) {}
-
-void AstLiteralReindexer::VisitGetIterator(GetIterator* node) {
- Visit(node->iterable());
-}
-
-void AstLiteralReindexer::VisitForInStatement(ForInStatement* node) {
- Visit(node->each());
- Visit(node->enumerable());
- Visit(node->body());
-}
-
-
-void AstLiteralReindexer::VisitForOfStatement(ForOfStatement* node) {
- Visit(node->assign_iterator());
- Visit(node->next_result());
- Visit(node->result_done());
- Visit(node->assign_each());
- Visit(node->body());
-}
-
-
-void AstLiteralReindexer::VisitConditional(Conditional* node) {
- Visit(node->condition());
- Visit(node->then_expression());
- Visit(node->else_expression());
-}
-
-
-void AstLiteralReindexer::VisitIfStatement(IfStatement* node) {
- Visit(node->condition());
- Visit(node->then_statement());
- if (node->HasElseStatement()) {
- Visit(node->else_statement());
- }
-}
-
-
-void AstLiteralReindexer::VisitSwitchStatement(SwitchStatement* node) {
- Visit(node->tag());
- ZoneList<CaseClause*>* cases = node->cases();
- for (int i = 0; i < cases->length(); i++) {
- VisitCaseClause(cases->at(i));
- }
-}
-
-
-void AstLiteralReindexer::VisitCaseClause(CaseClause* node) {
- if (!node->is_default()) Visit(node->label());
- VisitStatements(node->statements());
-}
-
-
-void AstLiteralReindexer::VisitForStatement(ForStatement* node) {
- if (node->init() != NULL) Visit(node->init());
- if (node->cond() != NULL) Visit(node->cond());
- if (node->next() != NULL) Visit(node->next());
- Visit(node->body());
-}
-
-
-void AstLiteralReindexer::VisitClassLiteral(ClassLiteral* node) {
- if (node->extends()) Visit(node->extends());
- if (node->constructor()) Visit(node->constructor());
- if (node->class_variable_proxy()) {
- VisitVariableProxy(node->class_variable_proxy());
- }
- for (int i = 0; i < node->properties()->length(); i++) {
- VisitLiteralProperty(node->properties()->at(i));
- }
-}
-
-void AstLiteralReindexer::VisitObjectLiteral(ObjectLiteral* node) {
- UpdateIndex(node);
- for (int i = 0; i < node->properties()->length(); i++) {
- VisitLiteralProperty(node->properties()->at(i));
- }
-}
-
-void AstLiteralReindexer::VisitLiteralProperty(LiteralProperty* node) {
- Visit(node->key());
- Visit(node->value());
-}
-
-
-void AstLiteralReindexer::VisitArrayLiteral(ArrayLiteral* node) {
- UpdateIndex(node);
- for (int i = 0; i < node->values()->length(); i++) {
- Visit(node->values()->at(i));
- }
-}
-
-
-void AstLiteralReindexer::VisitCall(Call* node) {
- Visit(node->expression());
- VisitArguments(node->arguments());
-}
-
-
-void AstLiteralReindexer::VisitCallNew(CallNew* node) {
- Visit(node->expression());
- VisitArguments(node->arguments());
-}
-
-
-void AstLiteralReindexer::VisitStatements(ZoneList<Statement*>* statements) {
- if (statements == NULL) return;
- for (int i = 0; i < statements->length(); i++) {
- Visit(statements->at(i));
- }
-}
-
-
-void AstLiteralReindexer::VisitDeclarations(
- ZoneList<Declaration*>* declarations) {
- for (int i = 0; i < declarations->length(); i++) {
- Visit(declarations->at(i));
- }
-}
-
-
-void AstLiteralReindexer::VisitArguments(ZoneList<Expression*>* arguments) {
- for (int i = 0; i < arguments->length(); i++) {
- Visit(arguments->at(i));
- }
-}
-
-
-void AstLiteralReindexer::VisitFunctionLiteral(FunctionLiteral* node) {
- // We don't recurse into the declarations or body of the function literal:
-}
-
-void AstLiteralReindexer::Reindex(Expression* pattern) { Visit(pattern); }
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/ast/ast-literal-reindexer.h b/deps/v8/src/ast/ast-literal-reindexer.h
deleted file mode 100644
index 4e0ca6bef5..0000000000
--- a/deps/v8/src/ast/ast-literal-reindexer.h
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_AST_AST_LITERAL_REINDEXER
-#define V8_AST_AST_LITERAL_REINDEXER
-
-#include "src/ast/ast.h"
-#include "src/ast/scopes.h"
-
-namespace v8 {
-namespace internal {
-
-class AstLiteralReindexer final : public AstVisitor<AstLiteralReindexer> {
- public:
- AstLiteralReindexer() : next_index_(0) {}
-
- int count() const { return next_index_; }
- void Reindex(Expression* pattern);
-
- private:
-#define DEFINE_VISIT(type) void Visit##type(type* node);
- AST_NODE_LIST(DEFINE_VISIT)
-#undef DEFINE_VISIT
-
- void VisitStatements(ZoneList<Statement*>* statements);
- void VisitDeclarations(ZoneList<Declaration*>* declarations);
- void VisitArguments(ZoneList<Expression*>* arguments);
- void VisitLiteralProperty(LiteralProperty* property);
-
- void UpdateIndex(MaterializedLiteral* literal) {
- literal->literal_index_ = next_index_++;
- }
-
- int next_index_;
-
- DEFINE_AST_VISITOR_MEMBERS_WITHOUT_STACKOVERFLOW()
- DISALLOW_COPY_AND_ASSIGN(AstLiteralReindexer);
-};
-} // namespace internal
-} // namespace v8
-
-#endif // V8_AST_AST_LITERAL_REINDEXER
diff --git a/deps/v8/src/ast/ast-numbering.cc b/deps/v8/src/ast/ast-numbering.cc
index 49040f7841..499760de14 100644
--- a/deps/v8/src/ast/ast-numbering.cc
+++ b/deps/v8/src/ast/ast-numbering.cc
@@ -21,6 +21,7 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
next_id_(BailoutId::FirstUsable().ToInt()),
yield_count_(0),
properties_(zone),
+ language_mode_(SLOPPY),
slot_cache_(zone),
disable_crankshaft_reason_(kNoReason),
dont_optimize_reason_(kNoReason),
@@ -36,10 +37,12 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
AST_NODE_LIST(DEFINE_VISIT)
#undef DEFINE_VISIT
+ void VisitVariableProxy(VariableProxy* node, TypeofMode typeof_mode);
void VisitVariableProxyReference(VariableProxy* node);
void VisitPropertyReference(Property* node);
void VisitReference(Expression* expr);
+ void VisitStatementsAndDeclarations(Block* node);
void VisitStatements(ZoneList<Statement*>* statements);
void VisitDeclarations(Declaration::List* declarations);
void VisitArguments(ZoneList<Expression*>* arguments);
@@ -66,9 +69,23 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
template <typename Node>
void ReserveFeedbackSlots(Node* node) {
- node->AssignFeedbackVectorSlots(properties_.get_spec(), &slot_cache_);
+ node->AssignFeedbackSlots(properties_.get_spec(), language_mode_,
+ &slot_cache_);
}
+ class LanguageModeScope {
+ public:
+ LanguageModeScope(AstNumberingVisitor* visitor, LanguageMode language_mode)
+ : visitor_(visitor), outer_language_mode_(visitor->language_mode_) {
+ visitor_->language_mode_ = language_mode;
+ }
+ ~LanguageModeScope() { visitor_->language_mode_ = outer_language_mode_; }
+
+ private:
+ AstNumberingVisitor* visitor_;
+ LanguageMode outer_language_mode_;
+ };
+
BailoutReason dont_optimize_reason() const { return dont_optimize_reason_; }
Zone* zone() const { return zone_; }
@@ -78,8 +95,9 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
int next_id_;
int yield_count_;
AstProperties properties_;
- // The slot cache allows us to reuse certain feedback vector slots.
- FeedbackVectorSlotCache slot_cache_;
+ LanguageMode language_mode_;
+ // The slot cache allows us to reuse certain feedback slots.
+ FeedbackSlotCache slot_cache_;
BailoutReason disable_crankshaft_reason_;
BailoutReason dont_optimize_reason_;
HandlerTable::CatchPrediction catch_prediction_;
@@ -119,8 +137,7 @@ void AstNumberingVisitor::VisitBreakStatement(BreakStatement* node) {
void AstNumberingVisitor::VisitDebuggerStatement(DebuggerStatement* node) {
IncrementNodeCount();
- DisableOptimization(kDebuggerStatement);
- node->set_base_id(ReserveIdRange(DebuggerStatement::num_ids()));
+ DisableFullCodegenAndCrankshaft(kDebuggerStatement);
}
@@ -150,6 +167,7 @@ void AstNumberingVisitor::VisitLiteral(Literal* node) {
void AstNumberingVisitor::VisitRegExpLiteral(RegExpLiteral* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(RegExpLiteral::num_ids()));
+ ReserveFeedbackSlots(node);
}
@@ -169,10 +187,14 @@ void AstNumberingVisitor::VisitVariableProxyReference(VariableProxy* node) {
node->set_base_id(ReserveIdRange(VariableProxy::num_ids()));
}
+void AstNumberingVisitor::VisitVariableProxy(VariableProxy* node,
+ TypeofMode typeof_mode) {
+ VisitVariableProxyReference(node);
+ node->AssignFeedbackSlots(properties_.get_spec(), typeof_mode, &slot_cache_);
+}
void AstNumberingVisitor::VisitVariableProxy(VariableProxy* node) {
- VisitVariableProxyReference(node);
- ReserveFeedbackSlots(node);
+ VisitVariableProxy(node, NOT_INSIDE_TYPEOF);
}
@@ -237,7 +259,12 @@ void AstNumberingVisitor::VisitThrow(Throw* node) {
void AstNumberingVisitor::VisitUnaryOperation(UnaryOperation* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(UnaryOperation::num_ids()));
- Visit(node->expression());
+ if ((node->op() == Token::TYPEOF) && node->expression()->IsVariableProxy()) {
+ VariableProxy* proxy = node->expression()->AsVariableProxy();
+ VisitVariableProxy(proxy, INSIDE_TYPEOF);
+ } else {
+ Visit(node->expression());
+ }
}
@@ -252,10 +279,21 @@ void AstNumberingVisitor::VisitCountOperation(CountOperation* node) {
void AstNumberingVisitor::VisitBlock(Block* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(Block::num_ids()));
- if (node->scope() != NULL) VisitDeclarations(node->scope()->declarations());
- VisitStatements(node->statements());
+ Scope* scope = node->scope();
+ if (scope != nullptr) {
+ LanguageModeScope language_mode_scope(this, scope->language_mode());
+ VisitStatementsAndDeclarations(node);
+ } else {
+ VisitStatementsAndDeclarations(node);
+ }
}
+void AstNumberingVisitor::VisitStatementsAndDeclarations(Block* node) {
+ Scope* scope = node->scope();
+ DCHECK(scope == nullptr || !scope->HasBeenRemoved());
+ if (scope) VisitDeclarations(scope->declarations());
+ VisitStatements(node->statements());
+}
void AstNumberingVisitor::VisitFunctionDeclaration(FunctionDeclaration* node) {
IncrementNodeCount();
@@ -323,6 +361,7 @@ void AstNumberingVisitor::VisitWhileStatement(WhileStatement* node) {
void AstNumberingVisitor::VisitTryCatchStatement(TryCatchStatement* node) {
+ DCHECK(node->scope() == nullptr || !node->scope()->HasBeenRemoved());
IncrementNodeCount();
DisableFullCodegenAndCrankshaft(kTryCatchStatement);
{
@@ -406,8 +445,8 @@ void AstNumberingVisitor::VisitCompareOperation(CompareOperation* node) {
void AstNumberingVisitor::VisitSpread(Spread* node) {
IncrementNodeCount();
- // We can only get here from super calls currently.
- DisableFullCodegenAndCrankshaft(kSuperReference);
+ // We can only get here from spread calls currently.
+ DisableFullCodegenAndCrankshaft(kSpreadCall);
node->set_base_id(ReserveIdRange(Spread::num_ids()));
Visit(node->expression());
}
@@ -595,12 +634,19 @@ void AstNumberingVisitor::VisitArguments(ZoneList<Expression*>* arguments) {
void AstNumberingVisitor::VisitFunctionLiteral(FunctionLiteral* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(FunctionLiteral::num_ids()));
- if (eager_literals_ && node->ShouldEagerCompile()) {
- eager_literals_->Add(new (zone())
- ThreadedListZoneEntry<FunctionLiteral*>(node));
+ if (node->ShouldEagerCompile()) {
+ if (eager_literals_) {
+ eager_literals_->Add(new (zone())
+ ThreadedListZoneEntry<FunctionLiteral*>(node));
+ }
+
+ // If the function literal is being eagerly compiled, recurse into the
+ // declarations and body of the function literal.
+ if (!AstNumbering::Renumber(stack_limit_, zone_, node, eager_literals_)) {
+ SetStackOverflow();
+ return;
+ }
}
- // We don't recurse into the declarations or body of the function literal:
- // you have to separately Renumber() each FunctionLiteral that you compile.
ReserveFeedbackSlots(node);
}
@@ -615,6 +661,8 @@ void AstNumberingVisitor::VisitRewritableExpression(
bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
DeclarationScope* scope = node->scope();
+ DCHECK(!scope->HasBeenRemoved());
+
if (scope->new_target_var() != nullptr ||
scope->this_function_var() != nullptr) {
DisableFullCodegenAndCrankshaft(kSuperReference);
@@ -637,6 +685,8 @@ bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
DisableFullCodegenAndCrankshaft(kClassConstructorFunction);
}
+ LanguageModeScope language_mode_scope(this, node->language_mode());
+
VisitDeclarations(scope->declarations());
VisitStatements(node->body());
@@ -646,6 +696,13 @@ bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
if (FLAG_trace_opt) {
if (disable_crankshaft_reason_ != kNoReason) {
+ // TODO(leszeks): This is a quick'n'dirty fix to allow the debug name of
+ // the function to be accessed in the below print. This DCHECK will fail
+ // if we move ast numbering off the main thread, but that won't be before
+ // we remove FCG, in which case this entire check isn't necessary anyway.
+ AllowHandleDereference allow_deref;
+ DCHECK(!node->debug_name().is_null());
+
PrintF("[enforcing Ignition and TurboFan for %s because: %s\n",
node->debug_name()->ToCString().get(),
GetBailoutReason(disable_crankshaft_reason_));
diff --git a/deps/v8/src/ast/ast-types.cc b/deps/v8/src/ast/ast-types.cc
index 83879215fc..3dde86413a 100644
--- a/deps/v8/src/ast/ast-types.cc
+++ b/deps/v8/src/ast/ast-types.cc
@@ -157,6 +157,8 @@ AstType::bitset AstBitsetType::Lub(i::Map* map) {
case ONE_BYTE_STRING_TYPE:
case CONS_STRING_TYPE:
case CONS_ONE_BYTE_STRING_TYPE:
+ case THIN_STRING_TYPE:
+ case THIN_ONE_BYTE_STRING_TYPE:
case SLICED_STRING_TYPE:
case SLICED_ONE_BYTE_STRING_TYPE:
case EXTERNAL_STRING_TYPE:
@@ -193,8 +195,6 @@ AstType::bitset AstBitsetType::Lub(i::Map* map) {
}
case HEAP_NUMBER_TYPE:
return kNumber & kTaggedPointer;
- case SIMD128_VALUE_TYPE:
- return kSimd;
case JS_OBJECT_TYPE:
case JS_ARGUMENTS_TYPE:
case JS_ERROR_TYPE:
@@ -220,6 +220,7 @@ AstType::bitset AstBitsetType::Lub(i::Map* map) {
case JS_SET_ITERATOR_TYPE:
case JS_MAP_ITERATOR_TYPE:
case JS_STRING_ITERATOR_TYPE:
+ case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
case JS_TYPED_ARRAY_KEY_ITERATOR_TYPE:
case JS_FAST_ARRAY_KEY_ITERATOR_TYPE:
@@ -308,7 +309,6 @@ AstType::bitset AstBitsetType::Lub(i::Map* map) {
case ALLOCATION_MEMENTO_TYPE:
case TYPE_FEEDBACK_INFO_TYPE:
case ALIASED_ARGUMENTS_ENTRY_TYPE:
- case BOX_TYPE:
case DEBUG_INFO_TYPE:
case BREAK_POINT_INFO_TYPE:
case CELL_TYPE:
@@ -1296,13 +1296,6 @@ AstBitsetType::bitset AstBitsetType::UnsignedSmall() {
return i::SmiValuesAre31Bits() ? kUnsigned30 : kUnsigned31;
}
-#define CONSTRUCT_SIMD_TYPE(NAME, Name, name, lane_count, lane_type) \
- AstType* AstType::Name(Isolate* isolate, Zone* zone) { \
- return Class(i::handle(isolate->heap()->name##_map()), zone); \
- }
-SIMD128_TYPES(CONSTRUCT_SIMD_TYPE)
-#undef CONSTRUCT_SIMD_TYPE
-
// -----------------------------------------------------------------------------
// Instantiations.
diff --git a/deps/v8/src/ast/ast-types.h b/deps/v8/src/ast/ast-types.h
index 0b6e23ffda..ea0be65eb6 100644
--- a/deps/v8/src/ast/ast-types.h
+++ b/deps/v8/src/ast/ast-types.h
@@ -156,15 +156,15 @@ namespace internal {
#define AST_REPRESENTATION(k) ((k) & AstBitsetType::kRepresentation)
#define AST_SEMANTIC(k) ((k) & AstBitsetType::kSemantic)
+// Bits 21-22 are available.
#define AST_REPRESENTATION_BITSET_TYPE_LIST(V) \
V(None, 0) \
- V(UntaggedBit, 1u << 22 | kSemantic) \
- V(UntaggedIntegral8, 1u << 23 | kSemantic) \
- V(UntaggedIntegral16, 1u << 24 | kSemantic) \
- V(UntaggedIntegral32, 1u << 25 | kSemantic) \
- V(UntaggedFloat32, 1u << 26 | kSemantic) \
- V(UntaggedFloat64, 1u << 27 | kSemantic) \
- V(UntaggedSimd128, 1u << 28 | kSemantic) \
+ V(UntaggedBit, 1u << 23 | kSemantic) \
+ V(UntaggedIntegral8, 1u << 24 | kSemantic) \
+ V(UntaggedIntegral16, 1u << 25 | kSemantic) \
+ V(UntaggedIntegral32, 1u << 26 | kSemantic) \
+ V(UntaggedFloat32, 1u << 27 | kSemantic) \
+ V(UntaggedFloat64, 1u << 28 | kSemantic) \
V(UntaggedPointer, 1u << 29 | kSemantic) \
V(TaggedSigned, 1u << 30 | kSemantic) \
V(TaggedPointer, 1u << 31 | kSemantic) \
@@ -197,13 +197,12 @@ namespace internal {
V(Symbol, 1u << 12 | AST_REPRESENTATION(kTaggedPointer)) \
V(InternalizedString, 1u << 13 | AST_REPRESENTATION(kTaggedPointer)) \
V(OtherString, 1u << 14 | AST_REPRESENTATION(kTaggedPointer)) \
- V(Simd, 1u << 15 | AST_REPRESENTATION(kTaggedPointer)) \
- V(OtherObject, 1u << 17 | AST_REPRESENTATION(kTaggedPointer)) \
+ V(OtherObject, 1u << 15 | AST_REPRESENTATION(kTaggedPointer)) \
V(OtherUndetectable, 1u << 16 | AST_REPRESENTATION(kTaggedPointer)) \
- V(Proxy, 1u << 18 | AST_REPRESENTATION(kTaggedPointer)) \
- V(Function, 1u << 19 | AST_REPRESENTATION(kTaggedPointer)) \
- V(Hole, 1u << 20 | AST_REPRESENTATION(kTaggedPointer)) \
- V(OtherInternal, 1u << 21 | \
+ V(Proxy, 1u << 17 | AST_REPRESENTATION(kTaggedPointer)) \
+ V(Function, 1u << 18 | AST_REPRESENTATION(kTaggedPointer)) \
+ V(Hole, 1u << 19 | AST_REPRESENTATION(kTaggedPointer)) \
+ V(OtherInternal, 1u << 20 | \
AST_REPRESENTATION(kTagged | kUntagged)) \
\
V(Signed31, kUnsigned30 | kNegative31) \
@@ -232,11 +231,10 @@ namespace internal {
V(NullOrUndefined, kNull | kUndefined) \
V(Undetectable, kNullOrUndefined | kOtherUndetectable) \
V(NumberOrOddball, kNumber | kNullOrUndefined | kBoolean | kHole) \
- V(NumberOrSimdOrString, kNumber | kSimd | kString) \
V(NumberOrString, kNumber | kString) \
V(NumberOrUndefined, kNumber | kUndefined) \
V(PlainPrimitive, kNumberOrString | kBoolean | kNullOrUndefined) \
- V(Primitive, kSymbol | kSimd | kPlainPrimitive) \
+ V(Primitive, kSymbol | kPlainPrimitive) \
V(DetectableReceiver, kFunction | kOtherObject | kProxy) \
V(Object, kFunction | kOtherObject | kOtherUndetectable) \
V(Receiver, kObject | kProxy) \
@@ -770,11 +768,6 @@ class AstType {
return tuple;
}
-#define CONSTRUCT_SIMD_TYPE(NAME, Name, name, lane_count, lane_type) \
- static AstType* Name(Isolate* isolate, Zone* zone);
- SIMD128_TYPES(CONSTRUCT_SIMD_TYPE)
-#undef CONSTRUCT_SIMD_TYPE
-
static AstType* Union(AstType* type1, AstType* type2, Zone* zone);
static AstType* Intersect(AstType* type1, AstType* type2, Zone* zone);
diff --git a/deps/v8/src/ast/ast-value-factory.cc b/deps/v8/src/ast/ast-value-factory.cc
index 4add57955f..b160c48a20 100644
--- a/deps/v8/src/ast/ast-value-factory.cc
+++ b/deps/v8/src/ast/ast-value-factory.cc
@@ -129,6 +129,36 @@ bool AstRawString::IsOneByteEqualTo(const char* data) const {
return false;
}
+bool AstRawString::Compare(void* a, void* b) {
+ const AstRawString* lhs = static_cast<AstRawString*>(a);
+ const AstRawString* rhs = static_cast<AstRawString*>(b);
+ DCHECK_EQ(lhs->hash(), rhs->hash());
+ if (lhs->length() != rhs->length()) return false;
+ const unsigned char* l = lhs->raw_data();
+ const unsigned char* r = rhs->raw_data();
+ size_t length = rhs->length();
+ if (lhs->is_one_byte()) {
+ if (rhs->is_one_byte()) {
+ return CompareCharsUnsigned(reinterpret_cast<const uint8_t*>(l),
+ reinterpret_cast<const uint8_t*>(r),
+ length) == 0;
+ } else {
+ return CompareCharsUnsigned(reinterpret_cast<const uint8_t*>(l),
+ reinterpret_cast<const uint16_t*>(r),
+ length) == 0;
+ }
+ } else {
+ if (rhs->is_one_byte()) {
+ return CompareCharsUnsigned(reinterpret_cast<const uint16_t*>(l),
+ reinterpret_cast<const uint8_t*>(r),
+ length) == 0;
+ } else {
+ return CompareCharsUnsigned(reinterpret_cast<const uint16_t*>(l),
+ reinterpret_cast<const uint16_t*>(r),
+ length) == 0;
+ }
+ }
+}
void AstConsString::Internalize(Isolate* isolate) {
// AstRawStrings are internalized before AstConsStrings so left and right are
@@ -184,14 +214,10 @@ void AstValue::Internalize(Isolate* isolate) {
DCHECK(!string_->string().is_null());
break;
case SYMBOL:
- if (symbol_name_[0] == 'i') {
- DCHECK_EQ(0, strcmp(symbol_name_, "iterator_symbol"));
- set_value(isolate->factory()->iterator_symbol());
- } else if (strcmp(symbol_name_, "hasInstance_symbol") == 0) {
- set_value(isolate->factory()->has_instance_symbol());
- } else {
- DCHECK_EQ(0, strcmp(symbol_name_, "home_object_symbol"));
- set_value(isolate->factory()->home_object_symbol());
+ switch (symbol_) {
+ case AstSymbol::kHomeObjectSymbol:
+ set_value(isolate->factory()->home_object_symbol());
+ break;
}
break;
case NUMBER_WITH_DOT:
@@ -295,9 +321,8 @@ const AstValue* AstValueFactory::NewString(const AstRawString* string) {
return AddValue(value);
}
-
-const AstValue* AstValueFactory::NewSymbol(const char* name) {
- AstValue* value = new (zone_) AstValue(name);
+const AstValue* AstValueFactory::NewSymbol(AstSymbol symbol) {
+ AstValue* value = new (zone_) AstValue(symbol);
return AddValue(value);
}
@@ -356,7 +381,7 @@ AstRawString* AstValueFactory::GetString(uint32_t hash, bool is_one_byte,
// return this AstRawString.
AstRawString key(is_one_byte, literal_bytes, hash);
base::HashMap::Entry* entry = string_table_.LookupOrInsert(&key, hash);
- if (entry->value == NULL) {
+ if (entry->value == nullptr) {
// Copy literal contents for later comparison.
int length = literal_bytes.length();
byte* new_literal_bytes = zone_->NewArray<byte>(length);
@@ -371,36 +396,5 @@ AstRawString* AstValueFactory::GetString(uint32_t hash, bool is_one_byte,
return reinterpret_cast<AstRawString*>(entry->key);
}
-
-bool AstValueFactory::AstRawStringCompare(void* a, void* b) {
- const AstRawString* lhs = static_cast<AstRawString*>(a);
- const AstRawString* rhs = static_cast<AstRawString*>(b);
- DCHECK_EQ(lhs->hash(), rhs->hash());
- if (lhs->length() != rhs->length()) return false;
- const unsigned char* l = lhs->raw_data();
- const unsigned char* r = rhs->raw_data();
- size_t length = rhs->length();
- if (lhs->is_one_byte()) {
- if (rhs->is_one_byte()) {
- return CompareCharsUnsigned(reinterpret_cast<const uint8_t*>(l),
- reinterpret_cast<const uint8_t*>(r),
- length) == 0;
- } else {
- return CompareCharsUnsigned(reinterpret_cast<const uint8_t*>(l),
- reinterpret_cast<const uint16_t*>(r),
- length) == 0;
- }
- } else {
- if (rhs->is_one_byte()) {
- return CompareCharsUnsigned(reinterpret_cast<const uint16_t*>(l),
- reinterpret_cast<const uint8_t*>(r),
- length) == 0;
- } else {
- return CompareCharsUnsigned(reinterpret_cast<const uint16_t*>(l),
- reinterpret_cast<const uint16_t*>(r),
- length) == 0;
- }
- }
-}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ast/ast-value-factory.h b/deps/v8/src/ast/ast-value-factory.h
index fd9ed71167..c1ed7acc57 100644
--- a/deps/v8/src/ast/ast-value-factory.h
+++ b/deps/v8/src/ast/ast-value-factory.h
@@ -28,10 +28,11 @@
#ifndef V8_AST_AST_VALUE_FACTORY_H_
#define V8_AST_AST_VALUE_FACTORY_H_
-#include "src/api.h"
#include "src/base/hashmap.h"
#include "src/conversions.h"
+#include "src/factory.h"
#include "src/globals.h"
+#include "src/isolate.h"
#include "src/utils.h"
// AstString, AstValue and AstValueFactory are for storing strings and values
@@ -105,6 +106,8 @@ class AstRawString final : public AstString {
return *c;
}
+ static bool Compare(void* a, void* b);
+
// For storing AstRawStrings in a hash map.
uint32_t hash() const {
return hash_;
@@ -151,15 +154,18 @@ class AstConsString final : public AstString {
const AstString* right_;
};
+enum class AstSymbol : uint8_t { kHomeObjectSymbol };
-// AstValue is either a string, a number, a string array, a boolean, or a
-// special value (null, undefined, the hole).
+// AstValue is either a string, a symbol, a number, a string array, a boolean,
+// or a special value (null, undefined, the hole).
class AstValue : public ZoneObject {
public:
bool IsString() const {
return type_ == STRING;
}
+ bool IsSymbol() const { return type_ == SYMBOL; }
+
bool IsNumber() const { return IsSmi() || IsHeapNumber(); }
bool ContainsDot() const {
@@ -171,6 +177,11 @@ class AstValue : public ZoneObject {
return string_;
}
+ AstSymbol AsSymbol() const {
+ CHECK_EQ(SYMBOL, type_);
+ return symbol_;
+ }
+
double AsNumber() const {
if (IsHeapNumber()) return number_;
if (IsSmi()) return smi_;
@@ -248,8 +259,8 @@ class AstValue : public ZoneObject {
string_ = s;
}
- explicit AstValue(const char* name) : type_(SYMBOL), next_(nullptr) {
- symbol_name_ = name;
+ explicit AstValue(AstSymbol symbol) : type_(SYMBOL), next_(nullptr) {
+ symbol_ = symbol;
}
explicit AstValue(double n, bool with_dot) : next_(nullptr) {
@@ -289,7 +300,7 @@ class AstValue : public ZoneObject {
double number_;
int smi_;
bool bool_;
- const char* symbol_name_;
+ AstSymbol symbol_;
};
};
@@ -335,7 +346,9 @@ class AstValue : public ZoneObject {
class AstStringConstants final {
public:
AstStringConstants(Isolate* isolate, uint32_t hash_seed)
- : zone_(isolate->allocator(), ZONE_NAME), hash_seed_(hash_seed) {
+ : zone_(isolate->allocator(), ZONE_NAME),
+ string_table_(AstRawString::Compare),
+ hash_seed_(hash_seed) {
DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
#define F(name, str) \
{ \
@@ -348,20 +361,28 @@ class AstStringConstants final {
/* The Handle returned by the factory is located on the roots */ \
/* array, not on the temporary HandleScope, so this is safe. */ \
name##_string_->set_string(isolate->factory()->name##_string()); \
+ base::HashMap::Entry* entry = \
+ string_table_.InsertNew(name##_string_, name##_string_->hash()); \
+ DCHECK(entry->value == nullptr); \
+ entry->value = reinterpret_cast<void*>(1); \
}
STRING_CONSTANTS(F)
#undef F
}
#define F(name, str) \
- AstRawString* name##_string() { return name##_string_; }
+ const AstRawString* name##_string() const { return name##_string_; }
STRING_CONSTANTS(F)
#undef F
uint32_t hash_seed() const { return hash_seed_; }
+ const base::CustomMatcherHashMap* string_table() const {
+ return &string_table_;
+ }
private:
Zone zone_;
+ base::CustomMatcherHashMap string_table_;
uint32_t hash_seed_;
#define F(name, str) AstRawString* name##_string_;
@@ -380,9 +401,9 @@ class AstStringConstants final {
class AstValueFactory {
public:
- AstValueFactory(Zone* zone, AstStringConstants* string_constants,
+ AstValueFactory(Zone* zone, const AstStringConstants* string_constants,
uint32_t hash_seed)
- : string_table_(AstRawStringCompare),
+ : string_table_(string_constants->string_table()),
values_(nullptr),
strings_(nullptr),
strings_end_(&strings_),
@@ -397,7 +418,6 @@ class AstValueFactory {
std::fill(one_character_strings_,
one_character_strings_ + arraysize(one_character_strings_),
nullptr);
- InitializeStringConstants();
}
Zone* zone() const { return zone_; }
@@ -416,7 +436,7 @@ class AstValueFactory {
const AstConsString* NewConsString(const AstString* left,
const AstString* right);
- void Internalize(Isolate* isolate);
+ V8_EXPORT_PRIVATE void Internalize(Isolate* isolate);
#define F(name, str) \
const AstRawString* name##_string() { \
@@ -425,10 +445,11 @@ class AstValueFactory {
STRING_CONSTANTS(F)
#undef F
- const AstValue* NewString(const AstRawString* string);
+ V8_EXPORT_PRIVATE const AstValue* NewString(const AstRawString* string);
// A JavaScript symbol (ECMA-262 edition 6).
- const AstValue* NewSymbol(const char* name);
- const AstValue* NewNumber(double number, bool with_dot = false);
+ const AstValue* NewSymbol(AstSymbol symbol);
+ V8_EXPORT_PRIVATE const AstValue* NewNumber(double number,
+ bool with_dot = false);
const AstValue* NewSmi(uint32_t number);
const AstValue* NewBoolean(bool b);
const AstValue* NewStringList(ZoneList<const AstRawString*>* strings);
@@ -461,19 +482,6 @@ class AstValueFactory {
AstRawString* GetString(uint32_t hash, bool is_one_byte,
Vector<const byte> literal_bytes);
- void InitializeStringConstants() {
-#define F(name, str) \
- AstRawString* raw_string_##name = string_constants_->name##_string(); \
- base::HashMap::Entry* entry_##name = string_table_.LookupOrInsert( \
- raw_string_##name, raw_string_##name->hash()); \
- DCHECK(entry_##name->value == nullptr); \
- entry_##name->value = reinterpret_cast<void*>(1);
- STRING_CONSTANTS(F)
-#undef F
- }
-
- static bool AstRawStringCompare(void* a, void* b);
-
// All strings are copied here, one after another (no NULLs inbetween).
base::CustomMatcherHashMap string_table_;
// For keeping track of all AstValues and AstRawStrings we've created (so that
@@ -486,7 +494,7 @@ class AstValueFactory {
AstString** strings_end_;
// Holds constant string values which are shared across the isolate.
- AstStringConstants* string_constants_;
+ const AstStringConstants* string_constants_;
// Caches for faster access: small numbers, one character lowercase strings
// (for minified code).
diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc
index c63f90ecf1..5705c70057 100644
--- a/deps/v8/src/ast/ast.cc
+++ b/deps/v8/src/ast/ast.cc
@@ -15,7 +15,10 @@
#include "src/code-stubs.h"
#include "src/contexts.h"
#include "src/conversions.h"
+#include "src/double.h"
#include "src/elements.h"
+#include "src/objects-inl.h"
+#include "src/objects/literal-objects.h"
#include "src/property-details.h"
#include "src/property.h"
#include "src/string-stream.h"
@@ -29,6 +32,22 @@ namespace internal {
#ifdef DEBUG
+static const char* NameForNativeContextIntrinsicIndex(uint32_t idx) {
+ switch (idx) {
+#define NATIVE_CONTEXT_FIELDS_IDX(NAME, Type, name) \
+ case Context::NAME: \
+ return #name;
+
+ NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELDS_IDX)
+#undef NATIVE_CONTEXT_FIELDS_IDX
+
+ default:
+ break;
+ }
+
+ return "UnknownIntrinsicIndex";
+}
+
void AstNode::Print() { Print(Isolate::Current()); }
void AstNode::Print(Isolate* isolate) {
@@ -202,47 +221,51 @@ void VariableProxy::BindTo(Variable* var) {
set_var(var);
set_is_resolved();
var->set_is_used();
+ if (is_assigned()) var->set_maybe_assigned();
}
-void VariableProxy::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
- FeedbackVectorSlotCache* cache) {
+void VariableProxy::AssignFeedbackSlots(FeedbackVectorSpec* spec,
+ TypeofMode typeof_mode,
+ FeedbackSlotCache* cache) {
if (UsesVariableFeedbackSlot()) {
// VariableProxies that point to the same Variable within a function can
// make their loads from the same IC slot.
if (var()->IsUnallocated() || var()->mode() == DYNAMIC_GLOBAL) {
- ZoneHashMap::Entry* entry = cache->Get(var());
- if (entry != NULL) {
- variable_feedback_slot_ = FeedbackVectorSlot(
- static_cast<int>(reinterpret_cast<intptr_t>(entry->value)));
+ FeedbackSlot slot = cache->Get(typeof_mode, var());
+ if (!slot.IsInvalid()) {
+ variable_feedback_slot_ = slot;
return;
}
- variable_feedback_slot_ = spec->AddLoadGlobalICSlot();
- cache->Put(var(), variable_feedback_slot_);
+ variable_feedback_slot_ = spec->AddLoadGlobalICSlot(typeof_mode);
+ cache->Put(typeof_mode, var(), variable_feedback_slot_);
} else {
variable_feedback_slot_ = spec->AddLoadICSlot();
}
}
}
-
static void AssignVectorSlots(Expression* expr, FeedbackVectorSpec* spec,
- FeedbackVectorSlot* out_slot) {
+ LanguageMode language_mode,
+ FeedbackSlot* out_slot) {
Property* property = expr->AsProperty();
LhsKind assign_type = Property::GetAssignType(property);
if ((assign_type == VARIABLE &&
expr->AsVariableProxy()->var()->IsUnallocated()) ||
assign_type == NAMED_PROPERTY || assign_type == KEYED_PROPERTY) {
// TODO(ishell): consider using ICSlotCache for variables here.
- FeedbackVectorSlotKind kind = assign_type == KEYED_PROPERTY
- ? FeedbackVectorSlotKind::KEYED_STORE_IC
- : FeedbackVectorSlotKind::STORE_IC;
- *out_slot = spec->AddSlot(kind);
+ if (assign_type == KEYED_PROPERTY) {
+ *out_slot = spec->AddKeyedStoreICSlot(language_mode);
+
+ } else {
+ *out_slot = spec->AddStoreICSlot(language_mode);
+ }
}
}
-void ForInStatement::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
- FeedbackVectorSlotCache* cache) {
- AssignVectorSlots(each(), spec, &each_slot_);
+void ForInStatement::AssignFeedbackSlots(FeedbackVectorSpec* spec,
+ LanguageMode language_mode,
+ FeedbackSlotCache* cache) {
+ AssignVectorSlots(each(), spec, language_mode, &each_slot_);
for_in_feedback_slot_ = spec->AddGeneralSlot();
}
@@ -257,14 +280,16 @@ Assignment::Assignment(Token::Value op, Expression* target, Expression* value,
StoreModeField::encode(STANDARD_STORE) | TokenField::encode(op);
}
-void Assignment::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
- FeedbackVectorSlotCache* cache) {
- AssignVectorSlots(target(), spec, &slot_);
+void Assignment::AssignFeedbackSlots(FeedbackVectorSpec* spec,
+ LanguageMode language_mode,
+ FeedbackSlotCache* cache) {
+ AssignVectorSlots(target(), spec, language_mode, &slot_);
}
-void CountOperation::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
- FeedbackVectorSlotCache* cache) {
- AssignVectorSlots(expression(), spec, &slot_);
+void CountOperation::AssignFeedbackSlots(FeedbackVectorSpec* spec,
+ LanguageMode language_mode,
+ FeedbackSlotCache* cache) {
+ AssignVectorSlots(expression(), spec, language_mode, &slot_);
// Assign a slot to collect feedback about binary operations. Used only in
// ignition. Fullcodegen uses AstId to record type feedback.
binary_operation_slot_ = spec->AddInterpreterBinaryOpICSlot();
@@ -347,12 +372,12 @@ ObjectLiteralProperty::ObjectLiteralProperty(AstValueFactory* ast_value_factory,
}
}
-FeedbackVectorSlot LiteralProperty::GetStoreDataPropertySlot() const {
+FeedbackSlot LiteralProperty::GetStoreDataPropertySlot() const {
int offset = FunctionLiteral::NeedsHomeObject(value_) ? 1 : 0;
return GetSlot(offset);
}
-void LiteralProperty::SetStoreDataPropertySlot(FeedbackVectorSlot slot) {
+void LiteralProperty::SetStoreDataPropertySlot(FeedbackSlot slot) {
int offset = FunctionLiteral::NeedsHomeObject(value_) ? 1 : 0;
return SetSlot(slot, offset);
}
@@ -371,23 +396,24 @@ ClassLiteralProperty::ClassLiteralProperty(Expression* key, Expression* value,
kind_(kind),
is_static_(is_static) {}
-void ClassLiteral::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
- FeedbackVectorSlotCache* cache) {
+void ClassLiteral::AssignFeedbackSlots(FeedbackVectorSpec* spec,
+ LanguageMode language_mode,
+ FeedbackSlotCache* cache) {
// This logic that computes the number of slots needed for vector store
// ICs must mirror BytecodeGenerator::VisitClassLiteral.
if (FunctionLiteral::NeedsHomeObject(constructor())) {
- home_object_slot_ = spec->AddStoreICSlot();
+ home_object_slot_ = spec->AddStoreICSlot(language_mode);
}
if (NeedsProxySlot()) {
- proxy_slot_ = spec->AddStoreICSlot();
+ proxy_slot_ = spec->AddStoreICSlot(language_mode);
}
for (int i = 0; i < properties()->length(); i++) {
ClassLiteral::Property* property = properties()->at(i);
Expression* value = property->value();
if (FunctionLiteral::NeedsHomeObject(value)) {
- property->SetSlot(spec->AddStoreICSlot());
+ property->SetSlot(spec->AddStoreICSlot(language_mode));
}
property->SetStoreDataPropertySlot(
spec->AddStoreDataPropertyInLiteralICSlot());
@@ -407,8 +433,11 @@ void ObjectLiteral::Property::set_emit_store(bool emit_store) {
bool ObjectLiteral::Property::emit_store() const { return emit_store_; }
-void ObjectLiteral::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
- FeedbackVectorSlotCache* cache) {
+void ObjectLiteral::AssignFeedbackSlots(FeedbackVectorSpec* spec,
+ LanguageMode language_mode,
+ FeedbackSlotCache* cache) {
+ MaterializedLiteral::AssignFeedbackSlots(spec, language_mode, cache);
+
// This logic that computes the number of slots needed for vector store
// ics must mirror FullCodeGenerator::VisitObjectLiteral.
int property_index = 0;
@@ -430,27 +459,27 @@ void ObjectLiteral::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
// contains computed properties with an uninitialized value.
if (key->IsStringLiteral()) {
if (property->emit_store()) {
- property->SetSlot(spec->AddStoreICSlot());
+ property->SetSlot(spec->AddStoreOwnICSlot());
if (FunctionLiteral::NeedsHomeObject(value)) {
- property->SetSlot(spec->AddStoreICSlot(), 1);
+ property->SetSlot(spec->AddStoreICSlot(language_mode), 1);
}
}
break;
}
if (property->emit_store() && FunctionLiteral::NeedsHomeObject(value)) {
- property->SetSlot(spec->AddStoreICSlot());
+ property->SetSlot(spec->AddStoreICSlot(language_mode));
}
break;
case ObjectLiteral::Property::PROTOTYPE:
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store() && FunctionLiteral::NeedsHomeObject(value)) {
- property->SetSlot(spec->AddStoreICSlot());
+ property->SetSlot(spec->AddStoreICSlot(language_mode));
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store() && FunctionLiteral::NeedsHomeObject(value)) {
- property->SetSlot(spec->AddStoreICSlot());
+ property->SetSlot(spec->AddStoreICSlot(language_mode));
}
break;
}
@@ -462,7 +491,7 @@ void ObjectLiteral::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
Expression* value = property->value();
if (property->kind() != ObjectLiteral::Property::PROTOTYPE) {
if (FunctionLiteral::NeedsHomeObject(value)) {
- property->SetSlot(spec->AddStoreICSlot());
+ property->SetSlot(spec->AddStoreICSlot(language_mode));
}
}
property->SetStoreDataPropertySlot(
@@ -582,9 +611,31 @@ void ObjectLiteral::InitDepthAndFlags() {
void ObjectLiteral::BuildConstantProperties(Isolate* isolate) {
if (!constant_properties_.is_null()) return;
- // Allocate a fixed array to hold all the constant properties.
- Handle<FixedArray> constant_properties =
- isolate->factory()->NewFixedArray(boilerplate_properties_ * 2, TENURED);
+ int index_keys = 0;
+ bool has_seen_proto = false;
+ for (int i = 0; i < properties()->length(); i++) {
+ ObjectLiteral::Property* property = properties()->at(i);
+ if (!IsBoilerplateProperty(property)) {
+ has_seen_proto = true;
+ continue;
+ }
+ if (property->is_computed_name()) {
+ continue;
+ }
+
+ Handle<Object> key = property->key()->AsLiteral()->value();
+
+ uint32_t element_index = 0;
+ if (key->ToArrayIndex(&element_index) ||
+ (key->IsString() && String::cast(*key)->AsArrayIndex(&element_index))) {
+ index_keys++;
+ }
+ }
+
+ Handle<BoilerplateDescription> constant_properties =
+ isolate->factory()->NewBoilerplateDescription(boilerplate_properties_,
+ properties()->length(),
+ index_keys, has_seen_proto);
int position = 0;
for (int i = 0; i < properties()->length(); i++) {
@@ -634,6 +685,10 @@ bool ObjectLiteral::IsFastCloningSupported() const {
kMaximumClonedShallowObjectProperties;
}
+ElementsKind ArrayLiteral::constant_elements_kind() const {
+ return static_cast<ElementsKind>(constant_elements()->elements_kind());
+}
+
void ArrayLiteral::InitDepthAndFlags() {
DCHECK_LT(first_spread_index_, 0);
@@ -734,8 +789,16 @@ bool ArrayLiteral::IsFastCloningSupported() const {
ConstructorBuiltinsAssembler::kMaximumClonedShallowArrayElements;
}
-void ArrayLiteral::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
- FeedbackVectorSlotCache* cache) {
+void ArrayLiteral::RewindSpreads() {
+ values_->Rewind(first_spread_index_);
+ first_spread_index_ = -1;
+}
+
+void ArrayLiteral::AssignFeedbackSlots(FeedbackVectorSpec* spec,
+ LanguageMode language_mode,
+ FeedbackSlotCache* cache) {
+ MaterializedLiteral::AssignFeedbackSlots(spec, language_mode, cache);
+
// This logic that computes the number of slots needed for vector store
// ics must mirror FullCodeGenerator::VisitArrayLiteral.
for (int array_index = 0; array_index < values()->length(); array_index++) {
@@ -745,7 +808,7 @@ void ArrayLiteral::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
// We'll reuse the same literal slot for all of the non-constant
// subexpressions that use a keyed store IC.
- literal_slot_ = spec->AddKeyedStoreICSlot();
+ literal_slot_ = spec->AddKeyedStoreICSlot(language_mode);
return;
}
}
@@ -803,8 +866,9 @@ void BinaryOperation::RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) {
set_to_boolean_types(oracle->ToBooleanTypes(right()->test_id()));
}
-void BinaryOperation::AssignFeedbackVectorSlots(
- FeedbackVectorSpec* spec, FeedbackVectorSlotCache* cache) {
+void BinaryOperation::AssignFeedbackSlots(FeedbackVectorSpec* spec,
+ LanguageMode language_mode,
+ FeedbackSlotCache* cache) {
// Feedback vector slot is only used by interpreter for binary operations.
// Full-codegen uses AstId to record type feedback.
switch (op()) {
@@ -814,7 +878,7 @@ void BinaryOperation::AssignFeedbackVectorSlots(
case Token::OR:
return;
default:
- type_feedback_slot_ = spec->AddInterpreterBinaryOpICSlot();
+ feedback_slot_ = spec->AddInterpreterBinaryOpICSlot();
return;
}
}
@@ -824,8 +888,9 @@ static bool IsTypeof(Expression* expr) {
return maybe_unary != NULL && maybe_unary->op() == Token::TYPEOF;
}
-void CompareOperation::AssignFeedbackVectorSlots(
- FeedbackVectorSpec* spec, FeedbackVectorSlotCache* cache_) {
+void CompareOperation::AssignFeedbackSlots(FeedbackVectorSpec* spec,
+ LanguageMode language_mode,
+ FeedbackSlotCache* cache_) {
// Feedback vector slot is only used by interpreter for binary operations.
// Full-codegen uses AstId to record type feedback.
switch (op()) {
@@ -834,7 +899,7 @@ void CompareOperation::AssignFeedbackVectorSlots(
case Token::IN:
return;
default:
- type_feedback_slot_ = spec->AddInterpreterCompareICSlot();
+ feedback_slot_ = spec->AddInterpreterCompareICSlot();
}
}
@@ -983,8 +1048,9 @@ bool Expression::IsMonomorphic() const {
}
}
-void Call::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
- FeedbackVectorSlotCache* cache) {
+void Call::AssignFeedbackSlots(FeedbackVectorSpec* spec,
+ LanguageMode language_mode,
+ FeedbackSlotCache* cache) {
ic_slot_ = spec->AddCallICSlot();
}
@@ -1022,9 +1088,10 @@ CaseClause::CaseClause(Expression* label, ZoneList<Statement*>* statements,
statements_(statements),
compare_type_(AstType::None()) {}
-void CaseClause::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
- FeedbackVectorSlotCache* cache) {
- type_feedback_slot_ = spec->AddInterpreterCompareICSlot();
+void CaseClause::AssignFeedbackSlots(FeedbackVectorSpec* spec,
+ LanguageMode language_mode,
+ FeedbackSlotCache* cache) {
+ feedback_slot_ = spec->AddInterpreterCompareICSlot();
}
uint32_t Literal::Hash() {
@@ -1042,5 +1109,14 @@ bool Literal::Match(void* literal1, void* literal2) {
(x->IsNumber() && y->IsNumber() && x->AsNumber() == y->AsNumber());
}
+const char* CallRuntime::debug_name() {
+#ifdef DEBUG
+ return is_jsruntime() ? NameForNativeContextIntrinsicIndex(context_index_)
+ : function_->name;
+#else
+ return is_jsruntime() ? "(context function)" : function_->name;
+#endif // DEBUG
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h
index 0cf20b0e4c..90e94bb87e 100644
--- a/deps/v8/src/ast/ast.h
+++ b/deps/v8/src/ast/ast.h
@@ -5,7 +5,6 @@
#ifndef V8_AST_AST_H_
#define V8_AST_AST_H_
-#include "src/assembler.h"
#include "src/ast/ast-types.h"
#include "src/ast/ast-value-factory.h"
#include "src/ast/modules.h"
@@ -15,11 +14,12 @@
#include "src/factory.h"
#include "src/globals.h"
#include "src/isolate.h"
+#include "src/label.h"
#include "src/list.h"
+#include "src/objects/literal-objects.h"
#include "src/parsing/token.h"
#include "src/runtime/runtime.h"
#include "src/small-pointer-list.h"
-#include "src/utils.h"
namespace v8 {
namespace internal {
@@ -127,27 +127,29 @@ class TypeFeedbackOracle;
AST_NODE_LIST(DEF_FORWARD_DECLARATION)
#undef DEF_FORWARD_DECLARATION
-
-class FeedbackVectorSlotCache {
+class FeedbackSlotCache {
public:
- explicit FeedbackVectorSlotCache(Zone* zone)
- : zone_(zone),
- hash_map_(ZoneHashMap::kDefaultHashMapCapacity,
- ZoneAllocationPolicy(zone)) {}
+ typedef std::pair<TypeofMode, Variable*> Key;
+
+ explicit FeedbackSlotCache(Zone* zone) : map_(zone) {}
- void Put(Variable* variable, FeedbackVectorSlot slot) {
- ZoneHashMap::Entry* entry = hash_map_.LookupOrInsert(
- variable, ComputePointerHash(variable), ZoneAllocationPolicy(zone_));
- entry->value = reinterpret_cast<void*>(slot.ToInt());
+ void Put(TypeofMode typeof_mode, Variable* variable, FeedbackSlot slot) {
+ Key key = std::make_pair(typeof_mode, variable);
+ auto entry = std::make_pair(key, slot);
+ map_.insert(entry);
}
- ZoneHashMap::Entry* Get(Variable* variable) const {
- return hash_map_.Lookup(variable, ComputePointerHash(variable));
+ FeedbackSlot Get(TypeofMode typeof_mode, Variable* variable) const {
+ Key key = std::make_pair(typeof_mode, variable);
+ auto iter = map_.find(key);
+ if (iter != map_.end()) {
+ return iter->second;
+ }
+ return FeedbackSlot();
}
private:
- Zone* zone_;
- ZoneHashMap hash_map_;
+ ZoneMap<Key, FeedbackSlot> map_;
};
@@ -732,10 +734,10 @@ class ForInStatement final : public ForEachStatement {
void set_subject(Expression* e) { subject_ = e; }
// Type feedback information.
- void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
- FeedbackVectorSlotCache* cache);
- FeedbackVectorSlot EachFeedbackSlot() const { return each_slot_; }
- FeedbackVectorSlot ForInFeedbackSlot() {
+ void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
+ FeedbackSlotCache* cache);
+ FeedbackSlot EachFeedbackSlot() const { return each_slot_; }
+ FeedbackSlot ForInFeedbackSlot() {
DCHECK(!for_in_feedback_slot_.IsInvalid());
return for_in_feedback_slot_;
}
@@ -771,8 +773,8 @@ class ForInStatement final : public ForEachStatement {
Expression* each_;
Expression* subject_;
- FeedbackVectorSlot each_slot_;
- FeedbackVectorSlot for_in_feedback_slot_;
+ FeedbackSlot each_slot_;
+ FeedbackSlot for_in_feedback_slot_;
class ForInTypeField
: public BitField<ForInType, ForEachStatement::kNextBitFieldIndex, 1> {};
@@ -962,12 +964,10 @@ class CaseClause final : public Expression {
// CaseClause will have both a slot in the feedback vector and the
// TypeFeedbackId to record the type information. TypeFeedbackId is used by
// full codegen and the feedback vector slot is used by interpreter.
- void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
- FeedbackVectorSlotCache* cache);
+ void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
+ FeedbackSlotCache* cache);
- FeedbackVectorSlot CompareOperationFeedbackSlot() {
- return type_feedback_slot_;
- }
+ FeedbackSlot CompareOperationFeedbackSlot() { return feedback_slot_; }
private:
friend class AstNodeFactory;
@@ -980,7 +980,7 @@ class CaseClause final : public Expression {
Label body_target_;
ZoneList<Statement*>* statements_;
AstType* compare_type_;
- FeedbackVectorSlot type_feedback_slot_;
+ FeedbackSlot feedback_slot_;
};
@@ -1155,26 +1155,10 @@ class TryFinallyStatement final : public TryStatement {
class DebuggerStatement final : public Statement {
- public:
- void set_base_id(int id) { base_id_ = id; }
- static int num_ids() { return parent_num_ids() + 1; }
- BailoutId DebugBreakId() const { return BailoutId(local_id(0)); }
-
private:
friend class AstNodeFactory;
- explicit DebuggerStatement(int pos)
- : Statement(pos, kDebuggerStatement),
- base_id_(BailoutId::None().ToInt()) {}
-
- static int parent_num_ids() { return 0; }
- int base_id() const {
- DCHECK(!BailoutId(base_id_).IsNone());
- return base_id_;
- }
- int local_id(int n) const { return base_id() + parent_num_ids() + n; }
-
- int base_id_;
+ explicit DebuggerStatement(int pos) : Statement(pos, kDebuggerStatement) {}
};
@@ -1249,32 +1233,32 @@ class Literal final : public Expression {
const AstValue* value_;
};
-
-class AstLiteralReindexer;
-
-// Base class for literals that needs space in the corresponding JSFunction.
+// Base class for literals that need space in the type feedback vector.
class MaterializedLiteral : public Expression {
public:
- int literal_index() { return literal_index_; }
-
int depth() const {
// only callable after initialization.
DCHECK(depth_ >= 1);
return depth_;
}
+ void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
+ FeedbackSlotCache* cache) {
+ literal_slot_ = spec->AddLiteralSlot();
+ }
+
+ FeedbackSlot literal_slot() const { return literal_slot_; }
+
private:
int depth_ : 31;
- int literal_index_;
-
- friend class AstLiteralReindexer;
+ FeedbackSlot literal_slot_;
class IsSimpleField
: public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
protected:
- MaterializedLiteral(int literal_index, int pos, NodeType type)
- : Expression(pos, type), depth_(0), literal_index_(literal_index) {
+ MaterializedLiteral(int pos, NodeType type)
+ : Expression(pos, type), depth_(0) {
bit_field_ |= IsSimpleField::encode(false);
}
@@ -1319,19 +1303,19 @@ class LiteralProperty : public ZoneObject {
bool is_computed_name() const { return is_computed_name_; }
- FeedbackVectorSlot GetSlot(int offset = 0) const {
+ FeedbackSlot GetSlot(int offset = 0) const {
DCHECK_LT(offset, static_cast<int>(arraysize(slots_)));
return slots_[offset];
}
- FeedbackVectorSlot GetStoreDataPropertySlot() const;
+ FeedbackSlot GetStoreDataPropertySlot() const;
- void SetSlot(FeedbackVectorSlot slot, int offset = 0) {
+ void SetSlot(FeedbackSlot slot, int offset = 0) {
DCHECK_LT(offset, static_cast<int>(arraysize(slots_)));
slots_[offset] = slot;
}
- void SetStoreDataPropertySlot(FeedbackVectorSlot slot);
+ void SetStoreDataPropertySlot(FeedbackSlot slot);
bool NeedsSetFunctionName() const;
@@ -1341,7 +1325,7 @@ class LiteralProperty : public ZoneObject {
Expression* key_;
Expression* value_;
- FeedbackVectorSlot slots_[2];
+ FeedbackSlot slots_[2];
bool is_computed_name_;
};
@@ -1393,7 +1377,7 @@ class ObjectLiteral final : public MaterializedLiteral {
public:
typedef ObjectLiteralProperty Property;
- Handle<FixedArray> constant_properties() const {
+ Handle<BoilerplateDescription> constant_properties() const {
DCHECK(!constant_properties_.is_null());
return constant_properties_;
}
@@ -1407,6 +1391,9 @@ class ObjectLiteral final : public MaterializedLiteral {
bool has_shallow_properties() const {
return depth() == 1 && !has_elements() && !may_store_doubles();
}
+ bool has_rest_property() const {
+ return HasRestPropertyField::decode(bit_field_);
+ }
// Decide if a property should be in the object boilerplate.
static bool IsBoilerplateProperty(Property* property);
@@ -1415,7 +1402,8 @@ class ObjectLiteral final : public MaterializedLiteral {
void InitDepthAndFlags();
// Get the constant properties fixed array, populating it if necessary.
- Handle<FixedArray> GetOrBuildConstantProperties(Isolate* isolate) {
+ Handle<BoilerplateDescription> GetOrBuildConstantProperties(
+ Isolate* isolate) {
if (constant_properties_.is_null()) {
BuildConstantProperties(isolate);
}
@@ -1449,7 +1437,8 @@ class ObjectLiteral final : public MaterializedLiteral {
kNoFlags = 0,
kFastElements = 1,
kShallowProperties = 1 << 1,
- kDisableMementos = 1 << 2
+ kDisableMementos = 1 << 2,
+ kHasRestProperty = 1 << 3,
};
struct Accessors: public ZoneObject {
@@ -1470,27 +1459,29 @@ class ObjectLiteral final : public MaterializedLiteral {
// Object literals need one feedback slot for each non-trivial value, as well
// as some slots for home objects.
- void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
- FeedbackVectorSlotCache* cache);
+ void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
+ FeedbackSlotCache* cache);
private:
friend class AstNodeFactory;
- ObjectLiteral(ZoneList<Property*>* properties, int literal_index,
- uint32_t boilerplate_properties, int pos)
- : MaterializedLiteral(literal_index, pos, kObjectLiteral),
+ ObjectLiteral(ZoneList<Property*>* properties,
+ uint32_t boilerplate_properties, int pos,
+ bool has_rest_property)
+ : MaterializedLiteral(pos, kObjectLiteral),
boilerplate_properties_(boilerplate_properties),
properties_(properties) {
bit_field_ |= FastElementsField::encode(false) |
HasElementsField::encode(false) |
- MayStoreDoublesField::encode(false);
+ MayStoreDoublesField::encode(false) |
+ HasRestPropertyField::encode(has_rest_property);
}
static int parent_num_ids() { return MaterializedLiteral::num_ids(); }
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
uint32_t boilerplate_properties_;
- Handle<FixedArray> constant_properties_;
+ Handle<BoilerplateDescription> constant_properties_;
ZoneList<Property*>* properties_;
class FastElementsField
@@ -1499,6 +1490,8 @@ class ObjectLiteral final : public MaterializedLiteral {
};
class MayStoreDoublesField
: public BitField<bool, HasElementsField::kNext, 1> {};
+ class HasRestPropertyField
+ : public BitField<bool, MayStoreDoublesField::kNext, 1> {};
};
@@ -1529,14 +1522,14 @@ class AccessorTable
class RegExpLiteral final : public MaterializedLiteral {
public:
Handle<String> pattern() const { return pattern_->string(); }
+ const AstRawString* raw_pattern() const { return pattern_; }
int flags() const { return flags_; }
private:
friend class AstNodeFactory;
- RegExpLiteral(const AstRawString* pattern, int flags, int literal_index,
- int pos)
- : MaterializedLiteral(literal_index, pos, kRegExpLiteral),
+ RegExpLiteral(const AstRawString* pattern, int flags, int pos)
+ : MaterializedLiteral(pos, kRegExpLiteral),
flags_(flags),
pattern_(pattern) {
set_depth(1);
@@ -1554,9 +1547,7 @@ class ArrayLiteral final : public MaterializedLiteral {
Handle<ConstantElementsPair> constant_elements() const {
return constant_elements_;
}
- ElementsKind constant_elements_kind() const {
- return static_cast<ElementsKind>(constant_elements()->elements_kind());
- }
+ ElementsKind constant_elements_kind() const;
ZoneList<Expression*>* values() const { return values_; }
@@ -1603,10 +1594,7 @@ class ArrayLiteral final : public MaterializedLiteral {
ZoneList<Expression*>::iterator EndValue() const { return values_->end(); }
// Rewind an array literal omitting everything from the first spread on.
- void RewindSpreads() {
- values_->Rewind(first_spread_index_);
- first_spread_index_ = -1;
- }
+ void RewindSpreads();
enum Flags {
kNoFlags = 0,
@@ -1614,16 +1602,15 @@ class ArrayLiteral final : public MaterializedLiteral {
kDisableMementos = 1 << 1
};
- void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
- FeedbackVectorSlotCache* cache);
- FeedbackVectorSlot LiteralFeedbackSlot() const { return literal_slot_; }
+ void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
+ FeedbackSlotCache* cache);
+ FeedbackSlot LiteralFeedbackSlot() const { return literal_slot_; }
private:
friend class AstNodeFactory;
- ArrayLiteral(ZoneList<Expression*>* values, int first_spread_index,
- int literal_index, int pos)
- : MaterializedLiteral(literal_index, pos, kArrayLiteral),
+ ArrayLiteral(ZoneList<Expression*>* values, int first_spread_index, int pos)
+ : MaterializedLiteral(pos, kArrayLiteral),
first_spread_index_(first_spread_index),
values_(values) {}
@@ -1631,7 +1618,7 @@ class ArrayLiteral final : public MaterializedLiteral {
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
int first_spread_index_;
- FeedbackVectorSlot literal_slot_;
+ FeedbackSlot literal_slot_;
Handle<ConstantElementsPair> constant_elements_;
ZoneList<Expression*>* values_;
};
@@ -1693,10 +1680,10 @@ class VariableProxy final : public Expression {
return var()->IsUnallocated() || var()->IsLookupSlot();
}
- void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
- FeedbackVectorSlotCache* cache);
+ void AssignFeedbackSlots(FeedbackVectorSpec* spec, TypeofMode typeof_mode,
+ FeedbackSlotCache* cache);
- FeedbackVectorSlot VariableFeedbackSlot() { return variable_feedback_slot_; }
+ FeedbackSlot VariableFeedbackSlot() { return variable_feedback_slot_; }
static int num_ids() { return parent_num_ids() + 1; }
BailoutId BeforeId() const { return BailoutId(local_id(0)); }
@@ -1722,7 +1709,7 @@ class VariableProxy final : public Expression {
class HoleCheckModeField
: public BitField<HoleCheckMode, IsNewTargetField::kNext, 1> {};
- FeedbackVectorSlot variable_feedback_slot_;
+ FeedbackSlot variable_feedback_slot_;
union {
const AstRawString* raw_name_; // if !is_resolved_
Variable* var_; // if is_resolved_
@@ -1789,17 +1776,16 @@ class Property final : public Expression {
bool IsSuperAccess() { return obj()->IsSuperPropertyReference(); }
- void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
- FeedbackVectorSlotCache* cache) {
- FeedbackVectorSlotKind kind = key()->IsPropertyName()
- ? FeedbackVectorSlotKind::LOAD_IC
- : FeedbackVectorSlotKind::KEYED_LOAD_IC;
- property_feedback_slot_ = spec->AddSlot(kind);
+ void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
+ FeedbackSlotCache* cache) {
+ if (key()->IsPropertyName()) {
+ property_feedback_slot_ = spec->AddLoadICSlot();
+ } else {
+ property_feedback_slot_ = spec->AddKeyedLoadICSlot();
+ }
}
- FeedbackVectorSlot PropertyFeedbackSlot() const {
- return property_feedback_slot_;
- }
+ FeedbackSlot PropertyFeedbackSlot() const { return property_feedback_slot_; }
// Returns the properties assign type.
static LhsKind GetAssignType(Property* property) {
@@ -1832,7 +1818,7 @@ class Property final : public Expression {
class InlineCacheStateField
: public BitField<InlineCacheState, KeyTypeField::kNext, 4> {};
- FeedbackVectorSlot property_feedback_slot_;
+ FeedbackSlot property_feedback_slot_;
Expression* obj_;
Expression* key_;
SmallMapList receiver_types_;
@@ -1847,10 +1833,10 @@ class Call final : public Expression {
void set_expression(Expression* e) { expression_ = e; }
// Type feedback information.
- void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
- FeedbackVectorSlotCache* cache);
+ void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
+ FeedbackSlotCache* cache);
- FeedbackVectorSlot CallFeedbackICSlot() const { return ic_slot_; }
+ FeedbackSlot CallFeedbackICSlot() const { return ic_slot_; }
SmallMapList* GetReceiverTypes() {
if (expression()->IsProperty()) {
@@ -1900,6 +1886,10 @@ class Call final : public Expression {
}
void MarkTail() { bit_field_ = IsTailField::update(bit_field_, true); }
+ bool only_last_arg_is_spread() {
+ return !arguments_->is_empty() && arguments_->last()->IsSpread();
+ }
+
enum CallType {
GLOBAL_CALL,
WITH_CALL,
@@ -1949,7 +1939,7 @@ class Call final : public Expression {
class IsTailField : public BitField<bool, IsUninitializedField::kNext, 1> {};
class IsPossiblyEvalField : public BitField<bool, IsTailField::kNext, 1> {};
- FeedbackVectorSlot ic_slot_;
+ FeedbackSlot ic_slot_;
Expression* expression_;
ZoneList<Expression*>* arguments_;
Handle<JSFunction> target_;
@@ -1965,14 +1955,14 @@ class CallNew final : public Expression {
void set_expression(Expression* e) { expression_ = e; }
// Type feedback information.
- void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
- FeedbackVectorSlotCache* cache) {
+ void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
+ FeedbackSlotCache* cache) {
// CallNew stores feedback in the exact same way as Call. We can
// piggyback on the type feedback infrastructure for calls.
callnew_feedback_slot_ = spec->AddCallICSlot();
}
- FeedbackVectorSlot CallNewFeedbackSlot() {
+ FeedbackSlot CallNewFeedbackSlot() {
DCHECK(!callnew_feedback_slot_.IsInvalid());
return callnew_feedback_slot_;
}
@@ -1999,6 +1989,10 @@ class CallNew final : public Expression {
set_is_monomorphic(true);
}
+ bool only_last_arg_is_spread() {
+ return !arguments_->is_empty() && arguments_->last()->IsSpread();
+ }
+
private:
friend class AstNodeFactory;
@@ -2012,7 +2006,7 @@ class CallNew final : public Expression {
static int parent_num_ids() { return Expression::num_ids(); }
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
- FeedbackVectorSlot callnew_feedback_slot_;
+ FeedbackSlot callnew_feedback_slot_;
Expression* expression_;
ZoneList<Expression*>* arguments_;
Handle<JSFunction> target_;
@@ -2047,10 +2041,7 @@ class CallRuntime final : public Expression {
static int num_ids() { return parent_num_ids() + 1; }
BailoutId CallId() { return BailoutId(local_id(0)); }
-
- const char* debug_name() {
- return is_jsruntime() ? "(context function)" : function_->name;
- }
+ const char* debug_name();
private:
friend class AstNodeFactory;
@@ -2139,12 +2130,10 @@ class BinaryOperation final : public Expression {
// BinaryOperation will have both a slot in the feedback vector and the
// TypeFeedbackId to record the type information. TypeFeedbackId is used
// by full codegen and the feedback vector slot is used by interpreter.
- void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
- FeedbackVectorSlotCache* cache);
+ void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
+ FeedbackSlotCache* cache);
- FeedbackVectorSlot BinaryOperationFeedbackSlot() const {
- return type_feedback_slot_;
- }
+ FeedbackSlot BinaryOperationFeedbackSlot() const { return feedback_slot_; }
TypeFeedbackId BinaryOperationFeedbackId() const {
return TypeFeedbackId(local_id(1));
@@ -2182,7 +2171,7 @@ class BinaryOperation final : public Expression {
Expression* left_;
Expression* right_;
Handle<AllocationSite> allocation_site_;
- FeedbackVectorSlot type_feedback_slot_;
+ FeedbackSlot feedback_slot_;
class OperatorField
: public BitField<Token::Value, Expression::kNextBitFieldIndex, 7> {};
@@ -2228,13 +2217,13 @@ class CountOperation final : public Expression {
}
// Feedback slot for binary operation is only used by ignition.
- FeedbackVectorSlot CountBinaryOpFeedbackSlot() const {
+ FeedbackSlot CountBinaryOpFeedbackSlot() const {
return binary_operation_slot_;
}
- void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
- FeedbackVectorSlotCache* cache);
- FeedbackVectorSlot CountSlot() const { return slot_; }
+ void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
+ FeedbackSlotCache* cache);
+ FeedbackSlot CountSlot() const { return slot_; }
private:
friend class AstNodeFactory;
@@ -2256,8 +2245,8 @@ class CountOperation final : public Expression {
: public BitField<KeyedAccessStoreMode, KeyTypeField::kNext, 3> {};
class TokenField : public BitField<Token::Value, StoreModeField::kNext, 7> {};
- FeedbackVectorSlot slot_;
- FeedbackVectorSlot binary_operation_slot_;
+ FeedbackSlot slot_;
+ FeedbackSlot binary_operation_slot_;
AstType* type_;
Expression* expression_;
SmallMapList receiver_types_;
@@ -2284,12 +2273,10 @@ class CompareOperation final : public Expression {
// CompareOperation will have both a slot in the feedback vector and the
// TypeFeedbackId to record the type information. TypeFeedbackId is used
// by full codegen and the feedback vector slot is used by interpreter.
- void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
- FeedbackVectorSlotCache* cache);
+ void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
+ FeedbackSlotCache* cache);
- FeedbackVectorSlot CompareOperationFeedbackSlot() const {
- return type_feedback_slot_;
- }
+ FeedbackSlot CompareOperationFeedbackSlot() const { return feedback_slot_; }
// Match special cases.
bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check);
@@ -2316,7 +2303,7 @@ class CompareOperation final : public Expression {
Expression* right_;
AstType* combined_type_;
- FeedbackVectorSlot type_feedback_slot_;
+ FeedbackSlot feedback_slot_;
class OperatorField
: public BitField<Token::Value, Expression::kNextBitFieldIndex, 7> {};
};
@@ -2430,9 +2417,9 @@ class Assignment final : public Expression {
bit_field_ = StoreModeField::update(bit_field_, mode);
}
- void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
- FeedbackVectorSlotCache* cache);
- FeedbackVectorSlot AssignmentSlot() const { return slot_; }
+ void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
+ FeedbackSlotCache* cache);
+ FeedbackSlot AssignmentSlot() const { return slot_; }
private:
friend class AstNodeFactory;
@@ -2450,7 +2437,7 @@ class Assignment final : public Expression {
: public BitField<KeyedAccessStoreMode, KeyTypeField::kNext, 3> {};
class TokenField : public BitField<Token::Value, StoreModeField::kNext, 7> {};
- FeedbackVectorSlot slot_;
+ FeedbackSlot slot_;
Expression* target_;
Expression* value_;
BinaryOperation* binary_operation_;
@@ -2597,21 +2584,15 @@ class FunctionLiteral final : public Expression {
}
LanguageMode language_mode() const;
- void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
- FeedbackVectorSlotCache* cache) {
- // The + 1 is because we need an array with room for the literals
- // as well as the feedback vector.
- literal_feedback_slot_ =
- spec->AddCreateClosureSlot(materialized_literal_count_ + 1);
+ void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
+ FeedbackSlotCache* cache) {
+ literal_feedback_slot_ = spec->AddCreateClosureSlot();
}
- FeedbackVectorSlot LiteralFeedbackSlot() const {
- return literal_feedback_slot_;
- }
+ FeedbackSlot LiteralFeedbackSlot() const { return literal_feedback_slot_; }
static bool NeedsHomeObject(Expression* expr);
- int materialized_literal_count() { return materialized_literal_count_; }
int expected_property_count() { return expected_property_count_; }
int parameter_count() { return parameter_count_; }
int function_length() { return function_length_; }
@@ -2718,14 +2699,13 @@ class FunctionLiteral final : public Expression {
FunctionLiteral(Zone* zone, const AstString* name,
AstValueFactory* ast_value_factory, DeclarationScope* scope,
- ZoneList<Statement*>* body, int materialized_literal_count,
- int expected_property_count, int parameter_count,
- int function_length, FunctionType function_type,
+ ZoneList<Statement*>* body, int expected_property_count,
+ int parameter_count, int function_length,
+ FunctionType function_type,
ParameterFlag has_duplicate_parameters,
EagerCompileHint eager_compile_hint, int position,
bool has_braces, int function_literal_id)
: Expression(position, kFunctionLiteral),
- materialized_literal_count_(materialized_literal_count),
expected_property_count_(expected_property_count),
parameter_count_(parameter_count),
function_length_(function_length),
@@ -2757,7 +2737,6 @@ class FunctionLiteral final : public Expression {
: public BitField<BailoutReason, ShouldNotBeUsedOnceHintField::kNext, 8> {
};
- int materialized_literal_count_;
int expected_property_count_;
int parameter_count_;
int function_length_;
@@ -2772,7 +2751,7 @@ class FunctionLiteral final : public Expression {
Handle<String> inferred_name_;
AstProperties ast_properties_;
int function_literal_id_;
- FeedbackVectorSlot literal_feedback_slot_;
+ FeedbackSlot literal_feedback_slot_;
};
// Property is used for passing information
@@ -2816,16 +2795,16 @@ class ClassLiteral final : public Expression {
// Object literals need one feedback slot for each non-trivial value, as well
// as some slots for home objects.
- void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
- FeedbackVectorSlotCache* cache);
+ void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
+ FeedbackSlotCache* cache);
bool NeedsProxySlot() const {
return class_variable_proxy() != nullptr &&
class_variable_proxy()->var()->IsUnallocated();
}
- FeedbackVectorSlot HomeObjectSlot() const { return home_object_slot_; }
- FeedbackVectorSlot ProxySlot() const { return proxy_slot_; }
+ FeedbackSlot HomeObjectSlot() const { return home_object_slot_; }
+ FeedbackSlot ProxySlot() const { return proxy_slot_; }
private:
friend class AstNodeFactory;
@@ -2845,8 +2824,8 @@ class ClassLiteral final : public Expression {
}
int end_position_;
- FeedbackVectorSlot home_object_slot_;
- FeedbackVectorSlot proxy_slot_;
+ FeedbackSlot home_object_slot_;
+ FeedbackSlot proxy_slot_;
VariableProxy* class_variable_proxy_;
Expression* extends_;
FunctionLiteral* constructor_;
@@ -2863,18 +2842,13 @@ class NativeFunctionLiteral final : public Expression {
public:
Handle<String> name() const { return name_->string(); }
v8::Extension* extension() const { return extension_; }
- FeedbackVectorSlot LiteralFeedbackSlot() const {
- return literal_feedback_slot_;
- }
+ FeedbackSlot LiteralFeedbackSlot() const { return literal_feedback_slot_; }
- void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
- FeedbackVectorSlotCache* cache) {
- // 0 is a magic number here. It means we are holding the literals
- // array for a native function literal, which needs to be
- // the empty literals array.
- // TODO(mvstanton): The FeedbackVectorSlotCache can be adapted
+ void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
+ FeedbackSlotCache* cache) {
+ // TODO(mvstanton): The FeedbackSlotCache can be adapted
// to always return the same slot for this case.
- literal_feedback_slot_ = spec->AddCreateClosureSlot(0);
+ literal_feedback_slot_ = spec->AddCreateClosureSlot();
}
private:
@@ -2888,7 +2862,7 @@ class NativeFunctionLiteral final : public Expression {
const AstRawString* name_;
v8::Extension* extension_;
- FeedbackVectorSlot literal_feedback_slot_;
+ FeedbackSlot literal_feedback_slot_;
};
@@ -2965,38 +2939,54 @@ class EmptyParentheses final : public Expression {
// (defined at https://tc39.github.io/ecma262/#sec-getiterator). Ignition
// desugars this into a LoadIC / JSLoadNamed, CallIC, and a type-check to
// validate return value of the Symbol.iterator() call.
+enum class IteratorType { kNormal, kAsync };
class GetIterator final : public Expression {
public:
+ IteratorType hint() const { return hint_; }
+
Expression* iterable() const { return iterable_; }
void set_iterable(Expression* iterable) { iterable_ = iterable; }
static int num_ids() { return parent_num_ids(); }
- void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
- FeedbackVectorSlotCache* cache) {
- iterator_property_feedback_slot_ =
- spec->AddSlot(FeedbackVectorSlotKind::LOAD_IC);
- iterator_call_feedback_slot_ =
- spec->AddSlot(FeedbackVectorSlotKind::CALL_IC);
+ void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
+ FeedbackSlotCache* cache) {
+ iterator_property_feedback_slot_ = spec->AddLoadICSlot();
+ iterator_call_feedback_slot_ = spec->AddCallICSlot();
+ if (hint() == IteratorType::kAsync) {
+ async_iterator_property_feedback_slot_ = spec->AddLoadICSlot();
+ async_iterator_call_feedback_slot_ = spec->AddCallICSlot();
+ }
}
- FeedbackVectorSlot IteratorPropertyFeedbackSlot() const {
+ FeedbackSlot IteratorPropertyFeedbackSlot() const {
return iterator_property_feedback_slot_;
}
- FeedbackVectorSlot IteratorCallFeedbackSlot() const {
+ FeedbackSlot IteratorCallFeedbackSlot() const {
return iterator_call_feedback_slot_;
}
+ FeedbackSlot AsyncIteratorPropertyFeedbackSlot() const {
+ return async_iterator_property_feedback_slot_;
+ }
+
+ FeedbackSlot AsyncIteratorCallFeedbackSlot() const {
+ return async_iterator_call_feedback_slot_;
+ }
+
private:
friend class AstNodeFactory;
- explicit GetIterator(Expression* iterable, int pos)
- : Expression(pos, kGetIterator), iterable_(iterable) {}
+ explicit GetIterator(Expression* iterable, IteratorType hint, int pos)
+ : Expression(pos, kGetIterator), hint_(hint), iterable_(iterable) {}
+ IteratorType hint_;
Expression* iterable_;
- FeedbackVectorSlot iterator_property_feedback_slot_;
- FeedbackVectorSlot iterator_call_feedback_slot_;
+ FeedbackSlot iterator_property_feedback_slot_;
+ FeedbackSlot iterator_call_feedback_slot_;
+ FeedbackSlot async_iterator_property_feedback_slot_;
+ FeedbackSlot async_iterator_call_feedback_slot_;
};
// ----------------------------------------------------------------------------
@@ -3212,6 +3202,11 @@ class AstNodeFactory final BASE_EMBEDDED {
return NULL;
}
+ ForOfStatement* NewForOfStatement(ZoneList<const AstRawString*>* labels,
+ int pos) {
+ return new (zone_) ForOfStatement(labels, pos);
+ }
+
ExpressionStatement* NewExpressionStatement(Expression* expression, int pos) {
return new (zone_) ExpressionStatement(expression, pos);
}
@@ -3312,8 +3307,8 @@ class AstNodeFactory final BASE_EMBEDDED {
}
// A JavaScript symbol (ECMA-262 edition 6).
- Literal* NewSymbolLiteral(const char* name, int pos) {
- return new (zone_) Literal(ast_value_factory_->NewSymbol(name), pos);
+ Literal* NewSymbolLiteral(AstSymbol symbol, int pos) {
+ return new (zone_) Literal(ast_value_factory_->NewSymbol(symbol), pos);
}
Literal* NewNumberLiteral(double number, int pos, bool with_dot = false) {
@@ -3342,10 +3337,10 @@ class AstNodeFactory final BASE_EMBEDDED {
}
ObjectLiteral* NewObjectLiteral(
- ZoneList<ObjectLiteral::Property*>* properties, int literal_index,
- uint32_t boilerplate_properties, int pos) {
- return new (zone_)
- ObjectLiteral(properties, literal_index, boilerplate_properties, pos);
+ ZoneList<ObjectLiteral::Property*>* properties,
+ uint32_t boilerplate_properties, int pos, bool has_rest_property) {
+ return new (zone_) ObjectLiteral(properties, boilerplate_properties, pos,
+ has_rest_property);
}
ObjectLiteral::Property* NewObjectLiteralProperty(
@@ -3363,21 +3358,18 @@ class AstNodeFactory final BASE_EMBEDDED {
}
RegExpLiteral* NewRegExpLiteral(const AstRawString* pattern, int flags,
- int literal_index, int pos) {
- return new (zone_) RegExpLiteral(pattern, flags, literal_index, pos);
+ int pos) {
+ return new (zone_) RegExpLiteral(pattern, flags, pos);
}
ArrayLiteral* NewArrayLiteral(ZoneList<Expression*>* values,
- int literal_index,
int pos) {
- return new (zone_) ArrayLiteral(values, -1, literal_index, pos);
+ return new (zone_) ArrayLiteral(values, -1, pos);
}
ArrayLiteral* NewArrayLiteral(ZoneList<Expression*>* values,
- int first_spread_index, int literal_index,
- int pos) {
- return new (zone_)
- ArrayLiteral(values, first_spread_index, literal_index, pos);
+ int first_spread_index, int pos) {
+ return new (zone_) ArrayLiteral(values, first_spread_index, pos);
}
VariableProxy* NewVariableProxy(Variable* var,
@@ -3501,30 +3493,30 @@ class AstNodeFactory final BASE_EMBEDDED {
FunctionLiteral* NewFunctionLiteral(
const AstRawString* name, DeclarationScope* scope,
- ZoneList<Statement*>* body, int materialized_literal_count,
- int expected_property_count, int parameter_count, int function_length,
+ ZoneList<Statement*>* body, int expected_property_count,
+ int parameter_count, int function_length,
FunctionLiteral::ParameterFlag has_duplicate_parameters,
FunctionLiteral::FunctionType function_type,
FunctionLiteral::EagerCompileHint eager_compile_hint, int position,
bool has_braces, int function_literal_id) {
return new (zone_) FunctionLiteral(
- zone_, name, ast_value_factory_, scope, body,
- materialized_literal_count, expected_property_count, parameter_count,
- function_length, function_type, has_duplicate_parameters,
- eager_compile_hint, position, has_braces, function_literal_id);
+ zone_, name, ast_value_factory_, scope, body, expected_property_count,
+ parameter_count, function_length, function_type,
+ has_duplicate_parameters, eager_compile_hint, position, has_braces,
+ function_literal_id);
}
// Creates a FunctionLiteral representing a top-level script, the
// result of an eval (top-level or otherwise), or the result of calling
// the Function constructor.
- FunctionLiteral* NewScriptOrEvalFunctionLiteral(
- DeclarationScope* scope, ZoneList<Statement*>* body,
- int materialized_literal_count, int expected_property_count,
- int parameter_count) {
+ FunctionLiteral* NewScriptOrEvalFunctionLiteral(DeclarationScope* scope,
+ ZoneList<Statement*>* body,
+ int expected_property_count,
+ int parameter_count) {
return new (zone_) FunctionLiteral(
zone_, ast_value_factory_->empty_string(), ast_value_factory_, scope,
- body, materialized_literal_count, expected_property_count,
- parameter_count, parameter_count, FunctionLiteral::kAnonymousExpression,
+ body, expected_property_count, parameter_count, parameter_count,
+ FunctionLiteral::kAnonymousExpression,
FunctionLiteral::kNoDuplicateParameters,
FunctionLiteral::kShouldLazyCompile, 0, true,
FunctionLiteral::kIdTypeTopLevel);
@@ -3581,8 +3573,9 @@ class AstNodeFactory final BASE_EMBEDDED {
return new (zone_) EmptyParentheses(pos);
}
- GetIterator* NewGetIterator(Expression* iterable, int pos) {
- return new (zone_) GetIterator(iterable, pos);
+ GetIterator* NewGetIterator(Expression* iterable, IteratorType hint,
+ int pos) {
+ return new (zone_) GetIterator(iterable, hint, pos);
}
Zone* zone() const { return zone_; }
diff --git a/deps/v8/src/ast/modules.cc b/deps/v8/src/ast/modules.cc
index 41ce9e03da..9d3a23535e 100644
--- a/deps/v8/src/ast/modules.cc
+++ b/deps/v8/src/ast/modules.cc
@@ -7,6 +7,7 @@
#include "src/ast/scopes.h"
#include "src/objects-inl.h"
#include "src/objects/module-info.h"
+#include "src/pending-compilation-error-handler.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/ast/modules.h b/deps/v8/src/ast/modules.h
index 94550fb5c9..ce8aba8d70 100644
--- a/deps/v8/src/ast/modules.h
+++ b/deps/v8/src/ast/modules.h
@@ -6,7 +6,6 @@
#define V8_AST_MODULES_H_
#include "src/parsing/scanner.h" // Only for Scanner::Location.
-#include "src/pending-compilation-error-handler.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -16,6 +15,7 @@ namespace internal {
class AstRawString;
class ModuleInfo;
class ModuleInfoEntry;
+class PendingCompilationErrorHandler;
class ModuleDescriptor : public ZoneObject {
public:
diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc
index 463ae26c4d..725a8a7e7e 100644
--- a/deps/v8/src/ast/prettyprinter.cc
+++ b/deps/v8/src/ast/prettyprinter.cc
@@ -440,9 +440,9 @@ void CallPrinter::PrintLiteral(const AstRawString* value, bool quote) {
#ifdef DEBUG
-// A helper for ast nodes that use FeedbackVectorSlots.
+// A helper for ast nodes that use FeedbackSlots.
static int FormatSlotNode(Vector<char>* buf, Expression* node,
- const char* node_name, FeedbackVectorSlot slot) {
+ const char* node_name, FeedbackSlot slot) {
int pos = SNPrintF(*buf, "%s", node_name);
if (!slot.IsInvalid()) {
pos += SNPrintF(*buf + pos, " Slot(%d)", slot.ToInt());
@@ -978,7 +978,7 @@ void AstPrinter::VisitLiteral(Literal* node) {
void AstPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
IndentedScope indent(this, "REGEXP LITERAL", node->position());
EmbeddedVector<char, 128> buf;
- SNPrintF(buf, "literal_index = %d\n", node->literal_index());
+ SNPrintF(buf, "literal_slot = %d\n", node->literal_slot().ToInt());
PrintIndented(buf.start());
PrintLiteralIndented("PATTERN", node->pattern(), false);
int i = 0;
@@ -997,7 +997,7 @@ void AstPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
void AstPrinter::VisitObjectLiteral(ObjectLiteral* node) {
IndentedScope indent(this, "OBJ LITERAL", node->position());
EmbeddedVector<char, 128> buf;
- SNPrintF(buf, "literal_index = %d\n", node->literal_index());
+ SNPrintF(buf, "literal_slot = %d\n", node->literal_slot().ToInt());
PrintIndented(buf.start());
PrintObjectProperties(node->properties());
}
@@ -1043,7 +1043,7 @@ void AstPrinter::VisitArrayLiteral(ArrayLiteral* node) {
IndentedScope indent(this, "ARRAY LITERAL", node->position());
EmbeddedVector<char, 128> buf;
- SNPrintF(buf, "literal_index = %d\n", node->literal_index());
+ SNPrintF(buf, "literal_slot = %d\n", node->literal_slot().ToInt());
PrintIndented(buf.start());
if (node->values()->length() > 0) {
IndentedScope indent(this, "VALUES", node->position());
diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc
index 27654c20f6..225793c7bb 100644
--- a/deps/v8/src/ast/scopes.cc
+++ b/deps/v8/src/ast/scopes.cc
@@ -13,7 +13,9 @@
#include "src/messages.h"
#include "src/objects-inl.h"
#include "src/objects/module-info.h"
+#include "src/objects/scope-info.h"
#include "src/parsing/parse-info.h"
+#include "src/parsing/preparsed-scope-data.h"
namespace v8 {
namespace internal {
@@ -64,8 +66,8 @@ Variable* VariableMap::Declare(Zone* zone, Scope* scope,
return reinterpret_cast<Variable*>(p->value);
}
-void VariableMap::DeclareName(Zone* zone, const AstRawString* name,
- VariableMode mode) {
+Variable* VariableMap::DeclareName(Zone* zone, const AstRawString* name,
+ VariableMode mode) {
Entry* p =
ZoneHashMap::LookupOrInsert(const_cast<AstRawString*>(name), name->hash(),
ZoneAllocationPolicy(zone));
@@ -75,6 +77,7 @@ void VariableMap::DeclareName(Zone* zone, const AstRawString* name,
p->value =
mode == VAR ? kDummyPreParserVariable : kDummyPreParserLexicalVariable;
}
+ return reinterpret_cast<Variable*>(p->value);
}
void VariableMap::Remove(Variable* var) {
@@ -506,7 +509,7 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
}
}
- bool var_created = false;
+ Variable* created_variable = nullptr;
// Write in assignments to var for each block-scoped function declaration
auto delegates = static_cast<SloppyBlockFunctionMap::Delegate*>(p->value);
@@ -541,9 +544,9 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
if (!should_hoist) continue;
// Declare a var-style binding for the function in the outer scope
- if (!var_created) {
- var_created = true;
- if (factory) {
+ if (factory) {
+ DCHECK(!is_being_lazily_parsed_);
+ if (created_variable == nullptr) {
VariableProxy* proxy =
factory->NewVariableProxy(name, NORMAL_VARIABLE);
auto declaration =
@@ -552,22 +555,28 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
// allow_harmony_restrictive_generators and
// sloppy_mode_block_scope_function_redefinition.
bool ok = true;
- DeclareVariable(declaration, VAR,
- Variable::DefaultInitializationFlag(VAR), false,
- nullptr, &ok);
+ created_variable = DeclareVariable(
+ declaration, VAR, Variable::DefaultInitializationFlag(VAR), false,
+ nullptr, &ok);
CHECK(ok); // Based on the preceding check, this should not fail
- } else {
- DeclareVariableName(name, VAR);
}
- }
- if (factory) {
Expression* assignment = factory->NewAssignment(
Token::ASSIGN, NewUnresolved(factory, name),
delegate->scope()->NewUnresolved(factory, name), kNoSourcePosition);
Statement* statement =
factory->NewExpressionStatement(assignment, kNoSourcePosition);
delegate->set_statement(statement);
+ } else {
+ DCHECK(is_being_lazily_parsed_);
+ if (created_variable == nullptr) {
+ created_variable = DeclareVariableName(name, VAR);
+ if (created_variable != kDummyPreParserVariable &&
+ created_variable != kDummyPreParserLexicalVariable) {
+ DCHECK(FLAG_preparser_scope_analysis);
+ created_variable->set_maybe_assigned();
+ }
+ }
}
}
}
@@ -625,6 +634,7 @@ void DeclarationScope::Analyze(ParseInfo* info, AnalyzeMode mode) {
#ifdef DEBUG
if (info->script_is_native() ? FLAG_print_builtin_scopes
: FLAG_print_scopes) {
+ PrintF("Global scope:\n");
scope->Print();
}
scope->CheckScopePositions();
@@ -655,7 +665,7 @@ void DeclarationScope::DeclareArguments(AstValueFactory* ast_value_factory) {
// Note that it might never be accessed, in which case it won't be
// allocated during variable allocation.
arguments_ = Declare(zone(), ast_value_factory->arguments_string(), VAR);
- } else if (IsLexicalVariableMode(arguments_->mode())) {
+ } else if (IsLexical(arguments_)) {
// Check if there's lexically declared variable named arguments to avoid
// redeclaration. See ES#sec-functiondeclarationinstantiation, step 20.
arguments_ = nullptr;
@@ -698,7 +708,8 @@ Variable* DeclarationScope::DeclareGeneratorObjectVar(
DCHECK(is_function_scope() || is_module_scope());
DCHECK_NULL(generator_object_var());
- Variable* result = EnsureRareData()->generator_object = NewTemporary(name);
+ Variable* result = EnsureRareData()->generator_object =
+ NewTemporary(name, kNotAssigned);
result->set_is_used();
return result;
}
@@ -946,6 +957,7 @@ Variable* DeclarationScope::DeclareParameter(
if (mode == TEMPORARY) {
var = NewTemporary(name);
} else {
+ DCHECK_EQ(mode, VAR);
var = Declare(zone(), name, mode);
// TODO(wingo): Avoid O(n^2) check.
*is_duplicate = IsDeclaredParameter(name);
@@ -958,6 +970,26 @@ Variable* DeclarationScope::DeclareParameter(
return var;
}
+Variable* DeclarationScope::DeclareParameterName(
+ const AstRawString* name, bool is_rest,
+ AstValueFactory* ast_value_factory) {
+ DCHECK(!already_resolved_);
+ DCHECK(is_function_scope() || is_module_scope());
+ DCHECK(!has_rest_ || is_rest);
+ DCHECK(is_being_lazily_parsed_);
+ has_rest_ = is_rest;
+ if (name == ast_value_factory->arguments_string()) {
+ has_arguments_parameter_ = true;
+ }
+ if (FLAG_preparser_scope_analysis) {
+ Variable* var = Declare(zone(), name, VAR);
+ params_.Add(var, zone());
+ return var;
+ }
+ DeclareVariableName(name, VAR);
+ return nullptr;
+}
+
Variable* Scope::DeclareLocal(const AstRawString* name, VariableMode mode,
InitializationFlag init_flag, VariableKind kind,
MaybeAssignedFlag maybe_assigned_flag) {
@@ -966,7 +998,8 @@ Variable* Scope::DeclareLocal(const AstRawString* name, VariableMode mode,
// introduced during variable allocation, and TEMPORARY variables are
// allocated via NewTemporary().
DCHECK(IsDeclaredVariableMode(mode));
- DCHECK(!GetDeclarationScope()->is_being_lazily_parsed());
+ DCHECK_IMPLIES(GetDeclarationScope()->is_being_lazily_parsed(),
+ mode == VAR || mode == LET || mode == CONST);
DCHECK(!GetDeclarationScope()->was_lazily_parsed());
return Declare(zone(), name, mode, kind, init_flag, maybe_assigned_flag);
}
@@ -995,15 +1028,25 @@ Variable* Scope::DeclareVariable(
const AstRawString* name = proxy->raw_name();
bool is_function_declaration = declaration->IsFunctionDeclaration();
+ // Pessimistically assume that top-level variables will be assigned.
+ //
+ // Top-level variables in a script can be accessed by other scripts or even
+ // become global properties. While this does not apply to top-level variables
+ // in a module (assuming they are not exported), we must still mark these as
+ // assigned because they might be accessed by a lazily parsed top-level
+ // function, which, for efficiency, we preparse without variable tracking.
+ if (is_script_scope() || is_module_scope()) {
+ if (mode != CONST) proxy->set_is_assigned();
+ }
+
Variable* var = nullptr;
if (is_eval_scope() && is_sloppy(language_mode()) && mode == VAR) {
// In a var binding in a sloppy direct eval, pollute the enclosing scope
// with this new binding by doing the following:
// The proxy is bound to a lookup variable to force a dynamic declaration
// using the DeclareEvalVar or DeclareEvalFunction runtime functions.
- VariableKind kind = NORMAL_VARIABLE;
- // TODO(sigurds) figure out if kNotAssigned is OK here
- var = new (zone()) Variable(this, name, mode, kind, init, kNotAssigned);
+ var = new (zone())
+ Variable(this, name, mode, NORMAL_VARIABLE, init, kMaybeAssigned);
var->AllocateTo(VariableLocation::LOOKUP, -1);
} else {
// Declare the variable in the declaration scope.
@@ -1077,7 +1120,8 @@ Variable* Scope::DeclareVariable(
return var;
}
-void Scope::DeclareVariableName(const AstRawString* name, VariableMode mode) {
+Variable* Scope::DeclareVariableName(const AstRawString* name,
+ VariableMode mode) {
DCHECK(IsDeclaredVariableMode(mode));
DCHECK(!already_resolved_);
DCHECK(GetDeclarationScope()->is_being_lazily_parsed());
@@ -1096,7 +1140,21 @@ void Scope::DeclareVariableName(const AstRawString* name, VariableMode mode) {
DCHECK(scope_info_.is_null());
// Declare the variable in the declaration scope.
- variables_.DeclareName(zone(), name, mode);
+ if (FLAG_preparser_scope_analysis) {
+ Variable* var = LookupLocal(name);
+ DCHECK_NE(var, kDummyPreParserLexicalVariable);
+ DCHECK_NE(var, kDummyPreParserVariable);
+ if (var == nullptr) {
+ var = DeclareLocal(name, mode);
+ } else if (mode == VAR) {
+ DCHECK_EQ(var->mode(), VAR);
+ var->set_maybe_assigned();
+ }
+ var->set_is_used();
+ return var;
+ } else {
+ return variables_.DeclareName(zone(), name, mode);
+ }
}
VariableProxy* Scope::NewUnresolved(AstNodeFactory* factory,
@@ -1124,6 +1182,7 @@ Variable* DeclarationScope::DeclareDynamicGlobal(const AstRawString* name,
VariableKind kind) {
DCHECK(is_script_scope());
return variables_.Declare(zone(), this, name, DYNAMIC_GLOBAL, kind);
+ // TODO(neis): Mark variable as maybe-assigned?
}
@@ -1147,10 +1206,16 @@ bool Scope::RemoveUnresolved(VariableProxy* var) {
}
Variable* Scope::NewTemporary(const AstRawString* name) {
+ return NewTemporary(name, kMaybeAssigned);
+}
+
+Variable* Scope::NewTemporary(const AstRawString* name,
+ MaybeAssignedFlag maybe_assigned) {
DeclarationScope* scope = GetClosureScope();
Variable* var = new (zone())
Variable(scope, name, TEMPORARY, NORMAL_VARIABLE, kCreatedInitialized);
scope->AddLocal(var);
+ if (maybe_assigned == kMaybeAssigned) var->set_maybe_assigned();
return var;
}
@@ -1365,7 +1430,11 @@ void DeclarationScope::ResetAfterPreparsing(AstValueFactory* ast_value_factory,
DCHECK(is_function_scope());
// Reset all non-trivial members.
- params_.Clear();
+ if (!aborted || !IsArrowFunction(function_kind_)) {
+ // Do not remove parameters when lazy parsing an Arrow Function has failed,
+ // as the formal parameters are not re-parsed.
+ params_.Clear();
+ }
decls_.Clear();
locals_.Clear();
inner_scope_ = nullptr;
@@ -1394,7 +1463,9 @@ void DeclarationScope::ResetAfterPreparsing(AstValueFactory* ast_value_factory,
was_lazily_parsed_ = !aborted;
}
-void DeclarationScope::AnalyzePartially(AstNodeFactory* ast_node_factory) {
+void DeclarationScope::AnalyzePartially(
+ AstNodeFactory* ast_node_factory,
+ PreParsedScopeData* preparsed_scope_data) {
DCHECK(!force_eager_compilation_);
VariableProxy* unresolved = nullptr;
@@ -1415,7 +1486,20 @@ void DeclarationScope::AnalyzePartially(AstNodeFactory* ast_node_factory) {
!(MustAllocate(arguments_) && !has_arguments_parameter_)) {
arguments_ = nullptr;
}
+
+ if (FLAG_preparser_scope_analysis) {
+ // Decide context allocation for the locals and parameters and store the
+ // info away.
+ AllocateVariablesRecursively();
+ CollectVariableData(preparsed_scope_data);
+ }
+ }
+#ifdef DEBUG
+ if (FLAG_print_scopes) {
+ PrintF("Inner function scope:\n");
+ Print();
}
+#endif
ResetAfterPreparsing(ast_node_factory->ast_value_factory(), false);
@@ -1501,6 +1585,10 @@ void PrintMap(int indent, const char* label, VariableMap* map, bool locals,
for (VariableMap::Entry* p = map->Start(); p != nullptr; p = map->Next(p)) {
Variable* var = reinterpret_cast<Variable*>(p->value);
if (var == function_var) continue;
+ if (var == kDummyPreParserVariable ||
+ var == kDummyPreParserLexicalVariable) {
+ continue;
+ }
bool local = !IsDynamicVariableMode(var->mode());
if ((locals ? local : !local) &&
(var->is_used() || !var->IsUnallocated())) {
@@ -1550,6 +1638,9 @@ void Scope::Print(int n) {
}
PrintF(" { // (%d, %d)\n", start_position(), end_position());
+ if (is_hidden()) {
+ Indent(n1, "// is hidden\n");
+ }
// Function name, if any (named function literals, only).
if (function != nullptr) {
@@ -1836,7 +1927,6 @@ void Scope::ResolveTo(ParseInfo* info, VariableProxy* proxy, Variable* var) {
#endif
DCHECK_NOT_NULL(var);
- if (proxy->is_assigned()) var->set_maybe_assigned();
if (AccessNeedsHoleCheck(var, proxy, this)) proxy->set_needs_hole_check();
proxy->BindTo(var);
}
@@ -1875,6 +1965,11 @@ void Scope::ResolveVariablesRecursively(ParseInfo* info) {
VariableProxy* Scope::FetchFreeVariables(DeclarationScope* max_outer_scope,
ParseInfo* info,
VariableProxy* stack) {
+ // Module variables must be allocated before variable resolution
+ // to ensure that AccessNeedsHoleCheck() can detect import variables.
+ if (info != nullptr && is_module_scope()) {
+ AsModuleScope()->AllocateModuleVariables();
+ }
// Lazy parsed declaration scopes are already partially analyzed. If there are
// unresolved references remaining, they just need to be resolved in outer
// scopes.
@@ -1901,6 +1996,9 @@ VariableProxy* Scope::FetchFreeVariables(DeclarationScope* max_outer_scope,
if (!var->is_dynamic() && lookup != this) var->ForceContextAllocation();
} else {
var->set_is_used();
+ if (proxy->is_assigned()) {
+ var->set_maybe_assigned();
+ }
}
}
}
@@ -1916,6 +2014,9 @@ VariableProxy* Scope::FetchFreeVariables(DeclarationScope* max_outer_scope,
}
bool Scope::MustAllocate(Variable* var) {
+ if (var == kDummyPreParserLexicalVariable || var == kDummyPreParserVariable) {
+ return true;
+ }
DCHECK(var->location() != VariableLocation::MODULE);
// Give var a read/write use if there is a chance it might be accessed
// via an eval() call. This is only possible if the variable has a
@@ -2091,7 +2192,8 @@ void ModuleScope::AllocateModuleVariables() {
void Scope::AllocateVariablesRecursively() {
DCHECK(!already_resolved_);
- DCHECK_EQ(0, num_stack_slots_);
+ DCHECK_IMPLIES(!FLAG_preparser_scope_analysis, num_stack_slots_ == 0);
+
// Don't allocate variables of preparsed scopes.
if (is_declaration_scope() && AsDeclarationScope()->was_lazily_parsed()) {
return;
@@ -2167,6 +2269,17 @@ void Scope::AllocateDebuggerScopeInfos(Isolate* isolate,
}
}
+void Scope::CollectVariableData(PreParsedScopeData* data) {
+ PreParsedScopeData::ScopeScope scope_scope(data, scope_type(),
+ start_position(), end_position());
+ for (Variable* local : locals_) {
+ scope_scope.MaybeAddVariable(local);
+ }
+ for (Scope* inner = inner_scope_; inner != nullptr; inner = inner->sibling_) {
+ inner->CollectVariableData(data);
+ }
+}
+
int Scope::StackLocalCount() const {
Variable* function =
is_function_scope() ? AsDeclarationScope()->function_var() : nullptr;
diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h
index 9e80e1c8e6..119d77c5c8 100644
--- a/deps/v8/src/ast/scopes.h
+++ b/deps/v8/src/ast/scopes.h
@@ -9,7 +9,6 @@
#include "src/base/hashmap.h"
#include "src/globals.h"
#include "src/objects.h"
-#include "src/objects/scope-info.h"
#include "src/zone/zone.h"
namespace v8 {
@@ -20,6 +19,7 @@ class AstValueFactory;
class AstRawString;
class Declaration;
class ParseInfo;
+class PreParsedScopeData;
class SloppyBlockFunctionStatement;
class Statement;
class StringSet;
@@ -39,7 +39,8 @@ class VariableMap: public ZoneHashMap {
// Records that "name" exists (if not recorded yet) but doesn't create a
// Variable. Useful for preparsing.
- void DeclareName(Zone* zone, const AstRawString* name, VariableMode mode);
+ Variable* DeclareName(Zone* zone, const AstRawString* name,
+ VariableMode mode);
Variable* Lookup(const AstRawString* name);
void Remove(Variable* var);
@@ -180,7 +181,8 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
bool* sloppy_mode_block_scope_function_redefinition,
bool* ok);
- void DeclareVariableName(const AstRawString* name, VariableMode mode);
+ // The return value is meaningful only if FLAG_preparser_scope_analysis is on.
+ Variable* DeclareVariableName(const AstRawString* name, VariableMode mode);
// Declarations list.
ThreadedList<Declaration>* declarations() { return &decls_; }
@@ -409,7 +411,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
Scope* GetOuterScopeWithContext();
// Analyze() must have been called once to create the ScopeInfo.
- Handle<ScopeInfo> scope_info() {
+ Handle<ScopeInfo> scope_info() const {
DCHECK(!scope_info_.is_null());
return scope_info_;
}
@@ -481,6 +483,8 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// should also be invoked after resolution.
bool NeedsScopeInfo() const;
+ Variable* NewTemporary(const AstRawString* name,
+ MaybeAssignedFlag maybe_assigned);
Zone* zone_;
// Scope tree.
@@ -586,6 +590,8 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
void AllocateDebuggerScopeInfos(Isolate* isolate,
MaybeHandle<ScopeInfo> outer_scope);
+ void CollectVariableData(PreParsedScopeData* data);
+
// Construct a scope based on the scope info.
Scope(Zone* zone, ScopeType type, Handle<ScopeInfo> scope_info);
@@ -605,7 +611,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
friend class ScopeTestHelper;
};
-class DeclarationScope : public Scope {
+class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
public:
DeclarationScope(Zone* zone, Scope* outer_scope, ScopeType scope_type,
FunctionKind function_kind = kNormalFunction);
@@ -688,6 +694,11 @@ class DeclarationScope : public Scope {
bool is_optional, bool is_rest, bool* is_duplicate,
AstValueFactory* ast_value_factory);
+ // Declares that a parameter with the name exists. Creates a Variable and
+ // returns it if FLAG_preparser_scope_analysis is on.
+ Variable* DeclareParameterName(const AstRawString* name, bool is_rest,
+ AstValueFactory* ast_value_factory);
+
// Declare an implicit global variable in this scope which must be a
// script scope. The variable was introduced (possibly from an inner
// scope) by a reference to an unresolved variable with no intervening
@@ -807,7 +818,8 @@ class DeclarationScope : public Scope {
// records variables which cannot be resolved inside the Scope (we don't yet
// know what they will resolve to since the outer Scopes are incomplete) and
// migrates them into migrate_to.
- void AnalyzePartially(AstNodeFactory* ast_node_factory);
+ void AnalyzePartially(AstNodeFactory* ast_node_factory,
+ PreParsedScopeData* preparsed_scope_data);
Handle<StringSet> CollectNonLocals(ParseInfo* info,
Handle<StringSet> non_locals);
@@ -887,8 +899,6 @@ class DeclarationScope : public Scope {
Variable* arguments_;
struct RareData : public ZoneObject {
- void* operator new(size_t size, Zone* zone) { return zone->New(size); }
-
// Convenience variable; Subclass constructor only
Variable* this_function = nullptr;
diff --git a/deps/v8/src/ast/variables.cc b/deps/v8/src/ast/variables.cc
index f138727177..cd1d8f77b7 100644
--- a/deps/v8/src/ast/variables.cc
+++ b/deps/v8/src/ast/variables.cc
@@ -37,9 +37,8 @@ Variable::Variable(Scope* scope, const AstRawString* name, VariableMode mode,
bool Variable::IsGlobalObjectProperty() const {
// Temporaries are never global, they must always be allocated in the
// activation frame.
- return (IsDynamicVariableMode(mode()) ||
- (IsDeclaredVariableMode(mode()) && !IsLexicalVariableMode(mode()))) &&
- scope_ != NULL && scope_->is_script_scope();
+ return (IsDynamicVariableMode(mode()) || mode() == VAR) &&
+ scope_ != nullptr && scope_->is_script_scope();
}
} // namespace internal
diff --git a/deps/v8/src/background-parsing-task.cc b/deps/v8/src/background-parsing-task.cc
index e0af700ce5..4a5b9cb558 100644
--- a/deps/v8/src/background-parsing-task.cc
+++ b/deps/v8/src/background-parsing-task.cc
@@ -4,7 +4,7 @@
#include "src/background-parsing-task.h"
-#include "src/debug/debug.h"
+#include "src/objects-inl.h"
#include "src/parsing/parser.h"
namespace v8 {
@@ -13,7 +13,6 @@ namespace internal {
void StreamedSource::Release() {
parser.reset();
info.reset();
- zone.reset();
}
BackgroundParsingTask::BackgroundParsingTask(
@@ -29,10 +28,8 @@ BackgroundParsingTask::BackgroundParsingTask(
// Prepare the data for the internalization phase and compilation phase, which
// will happen in the main thread after parsing.
- Zone* zone = new Zone(isolate->allocator(), ZONE_NAME);
- ParseInfo* info = new ParseInfo(zone);
+ ParseInfo* info = new ParseInfo(isolate->allocator());
info->set_toplevel();
- source->zone.reset(zone);
source->info.reset(info);
info->set_isolate(isolate);
info->set_source_stream(source->source_stream.get());
diff --git a/deps/v8/src/background-parsing-task.h b/deps/v8/src/background-parsing-task.h
index d7fe6ba8db..061e36595d 100644
--- a/deps/v8/src/background-parsing-task.h
+++ b/deps/v8/src/background-parsing-task.h
@@ -38,7 +38,6 @@ struct StreamedSource {
// between parsing and compilation. These need to be initialized before the
// compilation starts.
UnicodeCache unicode_cache;
- std::unique_ptr<Zone> zone;
std::unique_ptr<ParseInfo> info;
std::unique_ptr<Parser> parser;
diff --git a/deps/v8/src/bailout-reason.h b/deps/v8/src/bailout-reason.h
index fe3f6a4b70..b5e39c69f7 100644
--- a/deps/v8/src/bailout-reason.h
+++ b/deps/v8/src/bailout-reason.h
@@ -186,6 +186,7 @@ namespace internal {
"Sloppy function expects JSReceiver as receiver.") \
V(kSmiAdditionOverflow, "Smi addition overflow") \
V(kSmiSubtractionOverflow, "Smi subtraction overflow") \
+ V(kSpreadCall, "Call with spread argument") \
V(kStackAccessBelowStackPointer, "Stack access below stack pointer") \
V(kStackFrameTypesMustMatch, "Stack frame types must match") \
V(kSuperReference, "Super reference") \
@@ -212,14 +213,10 @@ namespace internal {
"Unexpected ElementsKind in array constructor") \
V(kUnexpectedFallthroughFromCharCodeAtSlowCase, \
"Unexpected fallthrough from CharCodeAt slow case") \
- V(kUnexpectedFallthroughFromCharFromCodeSlowCase, \
- "Unexpected fallthrough from CharFromCode slow case") \
V(kUnexpectedFallThroughFromStringComparison, \
"Unexpected fall-through from string comparison") \
V(kUnexpectedFallthroughToCharCodeAtSlowCase, \
"Unexpected fallthrough to CharCodeAt slow case") \
- V(kUnexpectedFallthroughToCharFromCodeSlowCase, \
- "Unexpected fallthrough to CharFromCode slow case") \
V(kUnexpectedFPUStackDepthAfterInstruction, \
"Unexpected FPU stack depth after instruction") \
V(kUnexpectedInitialMapForArrayFunction1, \
@@ -252,6 +249,8 @@ namespace internal {
V(kUnsupportedPhiUseOfArguments, "Unsupported phi use of arguments") \
V(kUnsupportedPhiUseOfConstVariable, \
"Unsupported phi use of const or let variable") \
+ V(kUnexpectedReturnFromFrameDropper, \
+ "Unexpectedly returned from dropping frames") \
V(kUnexpectedReturnFromThrow, "Unexpectedly returned from a throw") \
V(kUnsupportedSwitchStatement, "Unsupported switch statement") \
V(kUnsupportedTaggedImmediate, "Unsupported tagged immediate") \
diff --git a/deps/v8/src/base/atomic-utils.h b/deps/v8/src/base/atomic-utils.h
index f40853c587..f4f43fc817 100644
--- a/deps/v8/src/base/atomic-utils.h
+++ b/deps/v8/src/base/atomic-utils.h
@@ -31,7 +31,9 @@ class AtomicNumber {
&value_, -static_cast<base::AtomicWord>(decrement)));
}
- V8_INLINE T Value() { return static_cast<T>(base::Acquire_Load(&value_)); }
+ V8_INLINE T Value() const {
+ return static_cast<T>(base::Acquire_Load(&value_));
+ }
V8_INLINE void SetValue(T new_value) {
base::Release_Store(&value_, static_cast<base::AtomicWord>(new_value));
diff --git a/deps/v8/src/base/hashmap.h b/deps/v8/src/base/hashmap.h
index e643b2f2ac..4436a2d949 100644
--- a/deps/v8/src/base/hashmap.h
+++ b/deps/v8/src/base/hashmap.h
@@ -40,6 +40,11 @@ class TemplateHashMapImpl {
MatchFun match = MatchFun(),
AllocationPolicy allocator = AllocationPolicy());
+ // Clones the given hashmap and creates a copy with the same entries.
+ TemplateHashMapImpl(const TemplateHashMapImpl<Key, Value, MatchFun,
+ AllocationPolicy>* original,
+ AllocationPolicy allocator = AllocationPolicy());
+
~TemplateHashMapImpl();
// If an entry with matching key is found, returns that entry.
@@ -119,6 +124,8 @@ class TemplateHashMapImpl {
uint32_t hash,
AllocationPolicy allocator = AllocationPolicy());
void Resize(AllocationPolicy allocator);
+
+ DISALLOW_COPY_AND_ASSIGN(TemplateHashMapImpl);
};
template <typename Key, typename Value, typename MatchFun,
class AllocationPolicy>
@@ -131,6 +138,19 @@ TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::
template <typename Key, typename Value, typename MatchFun,
class AllocationPolicy>
+TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::
+ TemplateHashMapImpl(const TemplateHashMapImpl<Key, Value, MatchFun,
+ AllocationPolicy>* original,
+ AllocationPolicy allocator)
+ : capacity_(original->capacity_),
+ occupancy_(original->occupancy_),
+ match_(original->match_) {
+ map_ = reinterpret_cast<Entry*>(allocator.New(capacity_ * sizeof(Entry)));
+ memcpy(map_, original->map_, capacity_ * sizeof(Entry));
+}
+
+template <typename Key, typename Value, typename MatchFun,
+ class AllocationPolicy>
TemplateHashMapImpl<Key, Value, MatchFun,
AllocationPolicy>::~TemplateHashMapImpl() {
AllocationPolicy::Delete(map_);
@@ -382,6 +402,14 @@ class CustomMatcherTemplateHashMapImpl
AllocationPolicy allocator = AllocationPolicy())
: Base(capacity, HashEqualityThenKeyMatcher<void*, MatchFun>(match),
allocator) {}
+
+ CustomMatcherTemplateHashMapImpl(
+ const CustomMatcherTemplateHashMapImpl<AllocationPolicy>* original,
+ AllocationPolicy allocator = AllocationPolicy())
+ : Base(original, allocator) {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(CustomMatcherTemplateHashMapImpl);
};
typedef CustomMatcherTemplateHashMapImpl<DefaultAllocationPolicy>
diff --git a/deps/v8/src/base/logging.h b/deps/v8/src/base/logging.h
index cb2ff8f92f..e852dde8dd 100644
--- a/deps/v8/src/base/logging.h
+++ b/deps/v8/src/base/logging.h
@@ -43,13 +43,13 @@ namespace base {
//
// We make sure CHECK et al. always evaluates their arguments, as
// doing CHECK(FunctionWithSideEffect()) is a common idiom.
-#define CHECK(condition) \
- do { \
- if (V8_UNLIKELY(!(condition))) { \
- V8_Fatal(__FILE__, __LINE__, "Check failed: %s.", #condition); \
- } \
+#define CHECK_WITH_MSG(condition, message) \
+ do { \
+ if (V8_UNLIKELY(!(condition))) { \
+ V8_Fatal(__FILE__, __LINE__, "Check failed: %s.", message); \
+ } \
} while (0)
-
+#define CHECK(condition) CHECK_WITH_MSG(condition, #condition)
#ifdef DEBUG
@@ -70,7 +70,12 @@ namespace base {
// Make all CHECK functions discard their log strings to reduce code
// bloat for official release builds.
-#define CHECK_OP(name, op, lhs, rhs) CHECK((lhs)op(rhs))
+#define CHECK_OP(name, op, lhs, rhs) \
+ do { \
+ bool _cmp = \
+ ::v8::base::Cmp##name##Impl<decltype(lhs), decltype(rhs)>(lhs, rhs); \
+ CHECK_WITH_MSG(_cmp, #lhs " " #op " " #rhs); \
+ } while (0)
#endif
@@ -199,7 +204,8 @@ DEFINE_CHECK_OP_IMPL(GT, > )
#define CHECK_GT(lhs, rhs) CHECK_OP(GT, >, lhs, rhs)
#define CHECK_NULL(val) CHECK((val) == nullptr)
#define CHECK_NOT_NULL(val) CHECK((val) != nullptr)
-#define CHECK_IMPLIES(lhs, rhs) CHECK(!(lhs) || (rhs))
+#define CHECK_IMPLIES(lhs, rhs) \
+ CHECK_WITH_MSG(!(lhs) || (rhs), #lhs " implies " #rhs)
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index fd47931b6b..f195649e5e 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -620,12 +620,15 @@ void Thread::Start() {
result = pthread_attr_init(&attr);
DCHECK_EQ(0, result);
size_t stack_size = stack_size_;
-#if V8_OS_AIX
if (stack_size == 0) {
- // Default on AIX is 96KB -- bump up to 2MB
+#if V8_OS_MACOSX
+ // Default on Mac OS X is 512kB -- bump up to 1MB
+ stack_size = 1 * 1024 * 1024;
+#elif V8_OS_AIX
+ // Default on AIX is 96kB -- bump up to 2MB
stack_size = 2 * 1024 * 1024;
- }
#endif
+ }
if (stack_size > 0) {
result = pthread_attr_setstacksize(&attr, stack_size);
DCHECK_EQ(0, result);
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index db4712cc7f..8f9f92a08c 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -9,12 +9,14 @@
#include "src/base/ieee754.h"
#include "src/code-stubs.h"
#include "src/compiler.h"
+#include "src/debug/debug.h"
#include "src/extensions/externalize-string-extension.h"
#include "src/extensions/free-buffer-extension.h"
#include "src/extensions/gc-extension.h"
#include "src/extensions/ignition-statistics-extension.h"
#include "src/extensions/statistics-extension.h"
#include "src/extensions/trigger-failure-extension.h"
+#include "src/ffi/ffi-compiler.h"
#include "src/heap/heap.h"
#include "src/isolate-inl.h"
#include "src/snapshot/natives.h"
@@ -173,6 +175,7 @@ class Genesis BASE_EMBEDDED {
void CreateStrictModeFunctionMaps(Handle<JSFunction> empty);
void CreateIteratorMaps(Handle<JSFunction> empty);
+ void CreateAsyncIteratorMaps();
void CreateAsyncFunctionMaps(Handle<JSFunction> empty);
void CreateJSProxyMaps();
@@ -216,6 +219,8 @@ class Genesis BASE_EMBEDDED {
HARMONY_SHIPPING(DECLARE_FEATURE_INITIALIZATION)
#undef DECLARE_FEATURE_INITIALIZATION
+ void InitializeGlobal_enable_fast_array_builtins();
+
Handle<JSFunction> InstallArrayBuffer(Handle<JSObject> target,
const char* name, Builtins::Name call,
BuiltinFunctionId id);
@@ -362,7 +367,6 @@ void InstallFunction(Handle<JSObject> target, Handle<Name> property_name,
if (target->IsJSGlobalObject()) {
function->shared()->set_instance_class_name(*function_name);
}
- function->shared()->set_native(true);
}
void InstallFunction(Handle<JSObject> target, Handle<JSFunction> function,
@@ -380,11 +384,14 @@ Handle<JSFunction> CreateFunction(Isolate* isolate, Handle<String> name,
Factory* factory = isolate->factory();
Handle<Code> call_code(isolate->builtins()->builtin(call));
Handle<JSObject> prototype;
- return maybe_prototype.ToHandle(&prototype)
- ? factory->NewFunction(name, call_code, prototype, type,
- instance_size, strict_function_map)
- : factory->NewFunctionWithoutPrototype(name, call_code,
- strict_function_map);
+ Handle<JSFunction> result =
+ maybe_prototype.ToHandle(&prototype)
+ ? factory->NewFunction(name, call_code, prototype, type,
+ instance_size, strict_function_map)
+ : factory->NewFunctionWithoutPrototype(name, call_code,
+ strict_function_map);
+ result->shared()->set_native(true);
+ return result;
}
Handle<JSFunction> InstallFunction(Handle<JSObject> target, Handle<Name> name,
@@ -468,14 +475,12 @@ void SimpleInstallGetterSetter(Handle<JSObject> base, Handle<String> name,
.ToHandleChecked();
Handle<JSFunction> getter =
SimpleCreateFunction(isolate, getter_name, call_getter, 0, true);
- getter->shared()->set_native(true);
Handle<String> setter_name =
Name::ToFunctionName(name, isolate->factory()->set_string())
.ToHandleChecked();
Handle<JSFunction> setter =
SimpleCreateFunction(isolate, setter_name, call_setter, 1, true);
- setter->shared()->set_native(true);
JSObject::DefineAccessor(base, name, getter, setter, attribs).Check();
}
@@ -491,7 +496,6 @@ Handle<JSFunction> SimpleInstallGetter(Handle<JSObject> base,
.ToHandleChecked();
Handle<JSFunction> getter =
SimpleCreateFunction(isolate, getter_name, call, 0, adapt);
- getter->shared()->set_native(true);
Handle<Object> setter = isolate->factory()->undefined_value();
@@ -721,7 +725,6 @@ void Genesis::CreateIteratorMaps(Handle<JSFunction> empty) {
Handle<JSFunction> iterator_prototype_iterator = SimpleCreateFunction(
isolate(), factory()->NewStringFromAsciiChecked("[Symbol.iterator]"),
Builtins::kReturnReceiver, 0, true);
- iterator_prototype_iterator->shared()->set_native(true);
JSObject::AddProperty(iterator_prototype, factory()->iterator_symbol(),
iterator_prototype_iterator, DONT_ENUM);
@@ -760,10 +763,12 @@ void Genesis::CreateIteratorMaps(Handle<JSFunction> empty) {
SimpleInstallFunction(generator_object_prototype, "throw",
Builtins::kGeneratorPrototypeThrow, 1, true);
- // Internal version of generator_prototype_next, flagged as non-native.
+ // Internal version of generator_prototype_next, flagged as non-native such
+ // that it doesn't show up in Error traces.
Handle<JSFunction> generator_next_internal =
SimpleCreateFunction(isolate(), factory()->next_string(),
Builtins::kGeneratorPrototypeNext, 1, true);
+ generator_next_internal->shared()->set_native(false);
native_context()->set_generator_next_internal(*generator_next_internal);
// Create maps for generator functions and their prototypes. Store those
@@ -785,6 +790,50 @@ void Genesis::CreateIteratorMaps(Handle<JSFunction> empty) {
*generator_object_prototype_map);
}
+void Genesis::CreateAsyncIteratorMaps() {
+ // %AsyncIteratorPrototype%
+ // proposal-async-iteration/#sec-asynciteratorprototype
+ Handle<JSObject> async_iterator_prototype =
+ factory()->NewJSObject(isolate()->object_function(), TENURED);
+
+ Handle<JSFunction> async_iterator_prototype_iterator = SimpleCreateFunction(
+ isolate(), factory()->NewStringFromAsciiChecked("[Symbol.asyncIterator]"),
+ Builtins::kReturnReceiver, 0, true);
+
+ JSObject::AddProperty(async_iterator_prototype,
+ factory()->async_iterator_symbol(),
+ async_iterator_prototype_iterator, DONT_ENUM);
+
+ // %AsyncFromSyncIteratorPrototype%
+ // proposal-async-iteration/#sec-%asyncfromsynciteratorprototype%-object
+ Handle<JSObject> async_from_sync_iterator_prototype =
+ factory()->NewJSObject(isolate()->object_function(), TENURED);
+ SimpleInstallFunction(async_from_sync_iterator_prototype,
+ factory()->next_string(),
+ Builtins::kAsyncFromSyncIteratorPrototypeNext, 1, true);
+ SimpleInstallFunction(
+ async_from_sync_iterator_prototype, factory()->return_string(),
+ Builtins::kAsyncFromSyncIteratorPrototypeReturn, 1, true);
+ SimpleInstallFunction(
+ async_from_sync_iterator_prototype, factory()->throw_string(),
+ Builtins::kAsyncFromSyncIteratorPrototypeThrow, 1, true);
+
+ JSObject::AddProperty(
+ async_from_sync_iterator_prototype, factory()->to_string_tag_symbol(),
+ factory()->NewStringFromAsciiChecked("Async-from-Sync Iterator"),
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+ JSObject::ForceSetPrototype(async_from_sync_iterator_prototype,
+ async_iterator_prototype);
+
+ Handle<Map> async_from_sync_iterator_map = factory()->NewMap(
+ JS_ASYNC_FROM_SYNC_ITERATOR_TYPE, JSAsyncFromSyncIterator::kSize);
+ Map::SetPrototype(async_from_sync_iterator_map,
+ async_from_sync_iterator_prototype);
+ native_context()->set_async_from_sync_iterator_map(
+ *async_from_sync_iterator_map);
+}
+
void Genesis::CreateAsyncFunctionMaps(Handle<JSFunction> empty) {
// %AsyncFunctionPrototype% intrinsic
Handle<JSObject> async_function_prototype =
@@ -1295,6 +1344,16 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
class_function_map_->SetConstructor(*function_fun);
}
+ {
+ // --- A s y n c F r o m S y n c I t e r a t o r
+ Handle<Code> code = isolate->builtins()->AsyncIteratorValueUnwrap();
+ Handle<SharedFunctionInfo> info =
+ factory->NewSharedFunctionInfo(factory->empty_string(), code, false);
+ info->set_internal_formal_parameter_count(1);
+ info->set_length(1);
+ native_context()->set_async_iterator_value_unwrap_shared_fun(*info);
+ }
+
{ // --- A r r a y ---
Handle<JSFunction> array_function =
InstallFunction(global, "Array", JS_ARRAY_TYPE, JSArray::kSize,
@@ -1371,6 +1430,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate, factory->ArrayIterator_string(),
JS_FAST_ARRAY_VALUE_ITERATOR_TYPE, JSArrayIterator::kSize,
array_iterator_prototype, Builtins::kIllegal);
+ array_iterator_function->shared()->set_native(false);
array_iterator_function->shared()->set_instance_class_name(
isolate->heap()->ArrayIterator_string());
@@ -1585,6 +1645,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kStringPrototypeLocaleCompare, 1, true);
SimpleInstallFunction(prototype, "normalize",
Builtins::kStringPrototypeNormalize, 0, false);
+ SimpleInstallFunction(prototype, "replace",
+ Builtins::kStringPrototypeReplace, 2, true);
+ SimpleInstallFunction(prototype, "split", Builtins::kStringPrototypeSplit,
+ 2, true);
SimpleInstallFunction(prototype, "substr", Builtins::kStringPrototypeSubstr,
2, true);
SimpleInstallFunction(prototype, "substring",
@@ -1599,13 +1663,22 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kStringPrototypeTrimLeft, 0, false);
SimpleInstallFunction(prototype, "trimRight",
Builtins::kStringPrototypeTrimRight, 0, false);
+ SimpleInstallFunction(prototype, "toLocaleLowerCase",
+ Builtins::kStringPrototypeToLocaleLowerCase, 0,
+ false);
+ SimpleInstallFunction(prototype, "toLocaleUpperCase",
+ Builtins::kStringPrototypeToLocaleUpperCase, 0,
+ false);
+ SimpleInstallFunction(prototype, "toLowerCase",
+ Builtins::kStringPrototypeToLowerCase, 0, false);
+ SimpleInstallFunction(prototype, "toUpperCase",
+ Builtins::kStringPrototypeToUpperCase, 0, false);
SimpleInstallFunction(prototype, "valueOf",
Builtins::kStringPrototypeValueOf, 0, true);
Handle<JSFunction> iterator = SimpleCreateFunction(
isolate, factory->NewStringFromAsciiChecked("[Symbol.iterator]"),
Builtins::kStringPrototypeIterator, 0, true);
- iterator->shared()->set_native(true);
iterator->shared()->set_builtin_function_id(kStringIterator);
JSObject::AddProperty(prototype, factory->iterator_symbol(), iterator,
static_cast<PropertyAttributes>(DONT_ENUM));
@@ -1641,6 +1714,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate, factory->NewStringFromAsciiChecked("StringIterator"),
JS_STRING_ITERATOR_TYPE, JSStringIterator::kSize,
string_iterator_prototype, Builtins::kIllegal);
+ string_iterator_function->shared()->set_native(false);
native_context()->set_string_iterator_map(
string_iterator_function->initial_map());
}
@@ -1923,6 +1997,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> function =
SimpleCreateFunction(isolate, factory->empty_string(),
Builtins::kPromiseInternalConstructor, 1, true);
+ function->shared()->set_native(false);
InstallWithIntrinsicDefaultProto(
isolate, function, Context::PROMISE_INTERNAL_CONSTRUCTOR_INDEX);
}
@@ -1934,18 +2009,11 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Context::IS_PROMISE_INDEX);
}
- { // Internal: PerformPromiseThen
- Handle<JSFunction> function =
- SimpleCreateFunction(isolate, factory->empty_string(),
- Builtins::kPerformPromiseThen, 4, false);
- InstallWithIntrinsicDefaultProto(isolate, function,
- Context::PERFORM_PROMISE_THEN_INDEX);
- }
-
{ // Internal: ResolvePromise
// Also exposed as extrasUtils.resolvePromise.
Handle<JSFunction> function = SimpleCreateFunction(
isolate, factory->empty_string(), Builtins::kResolvePromise, 2, true);
+ function->shared()->set_native(false);
InstallWithIntrinsicDefaultProto(isolate, function,
Context::PROMISE_RESOLVE_INDEX);
}
@@ -1975,6 +2043,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> function =
SimpleCreateFunction(isolate, factory->empty_string(),
Builtins::kInternalPromiseReject, 3, true);
+ function->shared()->set_native(false);
InstallWithIntrinsicDefaultProto(isolate, function,
Context::PROMISE_INTERNAL_REJECT_INDEX);
}
@@ -2191,6 +2260,15 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
native_context()->set_regexp_last_match_info(*last_match_info);
Handle<RegExpMatchInfo> internal_match_info = factory->NewRegExpMatchInfo();
native_context()->set_regexp_internal_match_info(*internal_match_info);
+
+ // Force the RegExp constructor to fast properties, so that we can use the
+ // fast paths for various things like
+ //
+ // x instanceof RegExp
+ //
+ // etc. We should probably come up with a more principled approach once
+ // the JavaScript builtins are gone.
+ JSObject::MigrateSlowToFast(regexp_fun, 0, "Bootstrapping");
}
{ // -- E r r o r
@@ -2441,6 +2519,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
CreateFunction(isolate, factory->InternalizeUtf8String("TypedArray"),
JS_TYPED_ARRAY_TYPE, JSTypedArray::kSize, prototype,
Builtins::kIllegal);
+ typed_array_fun->shared()->set_native(false);
InstallSpeciesGetter(typed_array_fun);
// Install the "constructor" property on the {prototype}.
@@ -2479,6 +2558,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
values->shared()->set_builtin_function_id(kTypedArrayValues);
JSObject::AddProperty(prototype, factory->iterator_symbol(), values,
DONT_ENUM);
+
+ // TODO(caitp): alphasort accessors/methods
+ SimpleInstallFunction(prototype, "copyWithin",
+ Builtins::kTypedArrayPrototypeCopyWithin, 2, false);
}
{ // -- T y p e d A r r a y s
@@ -2909,6 +2992,8 @@ void Genesis::InitializeExperimentalGlobal() {
HARMONY_STAGED(FEATURE_INITIALIZE_GLOBAL)
HARMONY_SHIPPING(FEATURE_INITIALIZE_GLOBAL)
#undef FEATURE_INITIALIZE_GLOBAL
+
+ InitializeGlobal_enable_fast_array_builtins();
}
@@ -2979,7 +3064,7 @@ bool Bootstrapper::CompileNative(Isolate* isolate, Vector<const char> name,
// environment has been at least partially initialized. Add a stack check
// before entering JS code to catch overflow early.
StackLimitCheck check(isolate);
- if (check.JsHasOverflowed(1 * KB)) {
+ if (check.JsHasOverflowed(4 * KB)) {
isolate->StackOverflow();
return false;
}
@@ -2991,8 +3076,7 @@ bool Bootstrapper::CompileNative(Isolate* isolate, Vector<const char> name,
Handle<SharedFunctionInfo> function_info =
Compiler::GetSharedFunctionInfoForScript(
source, script_name, 0, 0, ScriptOriginOptions(), Handle<Object>(),
- context, NULL, NULL, ScriptCompiler::kNoCompileOptions, natives_flag,
- false);
+ context, NULL, NULL, ScriptCompiler::kNoCompileOptions, natives_flag);
if (function_info.is_null()) return false;
DCHECK(context->IsNativeContext());
@@ -3055,7 +3139,7 @@ bool Genesis::CompileExtension(Isolate* isolate, v8::Extension* extension) {
function_info = Compiler::GetSharedFunctionInfoForScript(
source, script_name, 0, 0, ScriptOriginOptions(), Handle<Object>(),
context, extension, NULL, ScriptCompiler::kNoCompileOptions,
- EXTENSION_CODE, false);
+ EXTENSION_CODE);
if (function_info.is_null()) return false;
cache->Add(name, function_info);
}
@@ -3131,6 +3215,8 @@ void Genesis::ConfigureUtilsObject(GlobalContextType context_type) {
// The utils object can be removed for cases that reach this point.
native_context()->set_natives_utils_object(heap()->undefined_value());
+ native_context()->set_extras_utils_object(heap()->undefined_value());
+ native_context()->set_exports_container(heap()->undefined_value());
}
@@ -3350,41 +3436,85 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
script_source_mapping_url, attribs);
script_map->AppendDescriptor(&d);
}
+ }
+
+ { // -- A s y n c F u n c t i o n
+ // Builtin functions for AsyncFunction.
+ PrototypeIterator iter(native_context->async_function_map());
+ Handle<JSObject> async_function_prototype(iter.GetCurrent<JSObject>());
+
+ static const bool kUseStrictFunctionMap = true;
+ Handle<JSFunction> async_function_constructor = InstallFunction(
+ container, "AsyncFunction", JS_FUNCTION_TYPE, JSFunction::kSize,
+ async_function_prototype, Builtins::kAsyncFunctionConstructor,
+ kUseStrictFunctionMap);
+ async_function_constructor->shared()->DontAdaptArguments();
+ async_function_constructor->shared()->SetConstructStub(
+ *isolate->builtins()->AsyncFunctionConstructor());
+ async_function_constructor->shared()->set_length(1);
+ InstallWithIntrinsicDefaultProto(isolate, async_function_constructor,
+ Context::ASYNC_FUNCTION_FUNCTION_INDEX);
+ JSObject::ForceSetPrototype(async_function_constructor,
+ isolate->function_function());
+
+ JSObject::AddProperty(
+ async_function_prototype, factory->constructor_string(),
+ async_function_constructor,
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+ JSFunction::SetPrototype(async_function_constructor,
+ async_function_prototype);
+
+ {
+ Handle<JSFunction> function =
+ SimpleCreateFunction(isolate, factory->empty_string(),
+ Builtins::kAsyncFunctionAwaitCaught, 3, false);
+ InstallWithIntrinsicDefaultProto(
+ isolate, function, Context::ASYNC_FUNCTION_AWAIT_CAUGHT_INDEX);
+ }
+
+ {
+ Handle<JSFunction> function =
+ SimpleCreateFunction(isolate, factory->empty_string(),
+ Builtins::kAsyncFunctionAwaitUncaught, 3, false);
+ InstallWithIntrinsicDefaultProto(
+ isolate, function, Context::ASYNC_FUNCTION_AWAIT_UNCAUGHT_INDEX);
+ }
+
+ {
+ Handle<Code> code =
+ isolate->builtins()->AsyncFunctionAwaitRejectClosure();
+ Handle<SharedFunctionInfo> info =
+ factory->NewSharedFunctionInfo(factory->empty_string(), code, false);
+ info->set_internal_formal_parameter_count(1);
+ info->set_length(1);
+ native_context->set_async_function_await_reject_shared_fun(*info);
+ }
+
+ {
+ Handle<Code> code =
+ isolate->builtins()->AsyncFunctionAwaitResolveClosure();
+ Handle<SharedFunctionInfo> info =
+ factory->NewSharedFunctionInfo(factory->empty_string(), code, false);
+ info->set_internal_formal_parameter_count(1);
+ info->set_length(1);
+ native_context->set_async_function_await_resolve_shared_fun(*info);
+ }
+
+ {
+ Handle<JSFunction> function =
+ SimpleCreateFunction(isolate, factory->empty_string(),
+ Builtins::kAsyncFunctionPromiseCreate, 0, false);
+ InstallWithIntrinsicDefaultProto(
+ isolate, function, Context::ASYNC_FUNCTION_PROMISE_CREATE_INDEX);
+ }
{
- PrototypeIterator iter(native_context->async_function_map());
- Handle<JSObject> async_function_prototype(iter.GetCurrent<JSObject>());
-
- static const bool kUseStrictFunctionMap = true;
- Handle<JSFunction> async_function_constructor = InstallFunction(
- container, "AsyncFunction", JS_FUNCTION_TYPE, JSFunction::kSize,
- async_function_prototype, Builtins::kAsyncFunctionConstructor,
- kUseStrictFunctionMap);
- async_function_constructor->shared()->DontAdaptArguments();
- async_function_constructor->shared()->SetConstructStub(
- *isolate->builtins()->AsyncFunctionConstructor());
- async_function_constructor->shared()->set_length(1);
- InstallWithIntrinsicDefaultProto(isolate, async_function_constructor,
- Context::ASYNC_FUNCTION_FUNCTION_INDEX);
- JSObject::ForceSetPrototype(async_function_constructor,
- isolate->function_function());
-
- JSObject::AddProperty(
- async_function_prototype, factory->constructor_string(),
- async_function_constructor,
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
-
- JSFunction::SetPrototype(async_function_constructor,
- async_function_prototype);
-
- Handle<JSFunction> async_function_next =
- SimpleInstallFunction(container, "AsyncFunctionNext",
- Builtins::kGeneratorPrototypeNext, 1, true);
- Handle<JSFunction> async_function_throw =
- SimpleInstallFunction(container, "AsyncFunctionThrow",
- Builtins::kGeneratorPrototypeThrow, 1, true);
- async_function_next->shared()->set_native(false);
- async_function_throw->shared()->set_native(false);
+ Handle<JSFunction> function = SimpleCreateFunction(
+ isolate, factory->empty_string(),
+ Builtins::kAsyncFunctionPromiseRelease, 1, false);
+ InstallWithIntrinsicDefaultProto(
+ isolate, function, Context::ASYNC_FUNCTION_PROMISE_RELEASE_INDEX);
}
}
@@ -3442,24 +3572,7 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
Accessors::FunctionSetPrototype(callsite_fun, proto).Assert();
}
}
-}
-
-
-void Bootstrapper::ExportExperimentalFromRuntime(Isolate* isolate,
- Handle<JSObject> container) {
- HandleScope scope(isolate);
-
-#ifdef V8_I18N_SUPPORT
-#define INITIALIZE_FLAG(FLAG) \
- { \
- Handle<String> name = \
- isolate->factory()->NewStringFromAsciiChecked(#FLAG); \
- JSObject::AddProperty(container, name, \
- isolate->factory()->ToBoolean(FLAG), NONE); \
- }
-
-#undef INITIALIZE_FLAG
-#endif
+ isolate->native_context()->set_exports_container(*container);
}
@@ -3472,14 +3585,13 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_named_captures)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_property)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_sent)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_tailcalls)
-#ifdef V8_I18N_SUPPORT
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(datetime_format_to_parts)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(icu_case_mapping)
-#endif
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_restrictive_generators)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_trailing_commas)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_tostring)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_class_fields)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_object_spread)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_object_rest_spread)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_dynamic_import)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_template_escapes)
void InstallPublicSymbol(Factory* factory, Handle<Context> native_context,
const char* name, Handle<Symbol> value) {
@@ -3494,6 +3606,31 @@ void InstallPublicSymbol(Factory* factory, Handle<Context> native_context,
JSObject::AddProperty(symbol, name_string, value, attributes);
}
+void Genesis::InitializeGlobal_enable_fast_array_builtins() {
+ if (!FLAG_enable_fast_array_builtins) return;
+
+ Handle<JSGlobalObject> global(native_context()->global_object());
+ Isolate* isolate = global->GetIsolate();
+ Factory* factory = isolate->factory();
+
+ LookupIterator it1(global, factory->NewStringFromAsciiChecked("Array"),
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
+ Handle<Object> array_object = Object::GetProperty(&it1).ToHandleChecked();
+ LookupIterator it2(array_object,
+ factory->NewStringFromAsciiChecked("prototype"),
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
+ Handle<Object> array_prototype = Object::GetProperty(&it2).ToHandleChecked();
+ LookupIterator it3(array_prototype,
+ factory->NewStringFromAsciiChecked("forEach"),
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
+ Handle<Object> for_each_function =
+ Object::GetProperty(&it3).ToHandleChecked();
+ Handle<JSFunction>::cast(for_each_function)
+ ->set_code(isolate->builtins()->builtin(Builtins::kArrayForEach));
+ Handle<JSFunction>::cast(for_each_function)
+ ->shared()
+ ->set_code(isolate->builtins()->builtin(Builtins::kArrayForEach));
+}
void Genesis::InitializeGlobal_harmony_sharedarraybuffer() {
if (!FLAG_harmony_sharedarraybuffer) return;
@@ -3523,38 +3660,6 @@ void Genesis::InitializeGlobal_harmony_sharedarraybuffer() {
Builtins::kAtomicsStore, 3, true);
}
-
-void Genesis::InitializeGlobal_harmony_simd() {
- if (!FLAG_harmony_simd) return;
-
- Handle<JSGlobalObject> global(
- JSGlobalObject::cast(native_context()->global_object()));
- Isolate* isolate = global->GetIsolate();
- Factory* factory = isolate->factory();
-
- Handle<String> name = factory->InternalizeUtf8String("SIMD");
- Handle<JSFunction> cons = factory->NewFunction(name);
- JSFunction::SetInstancePrototype(
- cons,
- Handle<Object>(native_context()->initial_object_prototype(), isolate));
- cons->shared()->set_instance_class_name(*name);
- Handle<JSObject> simd_object = factory->NewJSObject(cons, TENURED);
- DCHECK(simd_object->IsJSObject());
- JSObject::AddProperty(global, name, simd_object, DONT_ENUM);
-
-// Install SIMD type functions. Set the instance class names since
-// InstallFunction only does this when we install on the JSGlobalObject.
-#define SIMD128_INSTALL_FUNCTION(TYPE, Type, type, lane_count, lane_type) \
- Handle<JSFunction> type##_function = InstallFunction( \
- simd_object, #Type, JS_VALUE_TYPE, JSValue::kSize, \
- isolate->initial_object_prototype(), Builtins::kIllegal); \
- native_context()->set_##type##_function(*type##_function); \
- type##_function->shared()->set_instance_class_name(*factory->Type##_string());
- SIMD128_TYPES(SIMD128_INSTALL_FUNCTION)
-#undef SIMD128_INSTALL_FUNCTION
-}
-
-
void Genesis::InitializeGlobal_harmony_array_prototype_values() {
if (!FLAG_harmony_array_prototype_values) return;
Handle<JSFunction> array_constructor(native_context()->array_function());
@@ -3576,6 +3681,143 @@ void Genesis::InitializeGlobal_harmony_array_prototype_values() {
NONE);
}
+void Genesis::InitializeGlobal_harmony_async_iteration() {
+ if (!FLAG_harmony_async_iteration) return;
+ Handle<JSFunction> symbol_fun(native_context()->symbol_function());
+ InstallConstant(isolate(), symbol_fun, "asyncIterator",
+ factory()->async_iterator_symbol());
+}
+
+void Genesis::InitializeGlobal_harmony_promise_finally() {
+ if (!FLAG_harmony_promise_finally) return;
+
+ Handle<JSFunction> constructor(native_context()->promise_function());
+ Handle<JSObject> prototype(JSObject::cast(constructor->instance_prototype()));
+ SimpleInstallFunction(prototype, "finally", Builtins::kPromiseFinally, 1,
+ true, DONT_ENUM);
+
+ // The promise prototype map has changed because we added a property
+ // to prototype, so we update the saved map.
+ Handle<Map> prototype_map(prototype->map());
+ Map::SetShouldBeFastPrototypeMap(prototype_map, true, isolate());
+ native_context()->set_promise_prototype_map(*prototype_map);
+
+ {
+ Handle<Code> code =
+ handle(isolate()->builtins()->builtin(Builtins::kPromiseThenFinally),
+ isolate());
+ Handle<SharedFunctionInfo> info = factory()->NewSharedFunctionInfo(
+ factory()->empty_string(), code, false);
+ info->set_internal_formal_parameter_count(1);
+ info->set_length(1);
+ info->set_native(true);
+ native_context()->set_promise_then_finally_shared_fun(*info);
+ }
+
+ {
+ Handle<Code> code =
+ handle(isolate()->builtins()->builtin(Builtins::kPromiseCatchFinally),
+ isolate());
+ Handle<SharedFunctionInfo> info = factory()->NewSharedFunctionInfo(
+ factory()->empty_string(), code, false);
+ info->set_internal_formal_parameter_count(1);
+ info->set_length(1);
+ info->set_native(true);
+ native_context()->set_promise_catch_finally_shared_fun(*info);
+ }
+
+ {
+ Handle<Code> code = handle(
+ isolate()->builtins()->builtin(Builtins::kPromiseValueThunkFinally),
+ isolate());
+ Handle<SharedFunctionInfo> info = factory()->NewSharedFunctionInfo(
+ factory()->empty_string(), code, false);
+ info->set_internal_formal_parameter_count(0);
+ info->set_length(0);
+ native_context()->set_promise_value_thunk_finally_shared_fun(*info);
+ }
+
+ {
+ Handle<Code> code =
+ handle(isolate()->builtins()->builtin(Builtins::kPromiseThrowerFinally),
+ isolate());
+ Handle<SharedFunctionInfo> info = factory()->NewSharedFunctionInfo(
+ factory()->empty_string(), code, false);
+ info->set_internal_formal_parameter_count(0);
+ info->set_length(0);
+ native_context()->set_promise_thrower_finally_shared_fun(*info);
+ }
+}
+
+#ifdef V8_I18N_SUPPORT
+void Genesis::InitializeGlobal_datetime_format_to_parts() {
+ if (!FLAG_datetime_format_to_parts) return;
+ Handle<JSReceiver> exports_container(
+ JSReceiver::cast(native_context()->exports_container()));
+ Handle<JSObject> date_time_format_prototype(JSObject::cast(
+ native_context()->intl_date_time_format_function()->prototype()));
+ Handle<JSFunction> format_date_to_parts = Handle<JSFunction>::cast(
+ JSReceiver::GetProperty(
+ exports_container,
+ factory()->InternalizeUtf8String("FormatDateToParts"))
+ .ToHandleChecked());
+ InstallFunction(date_time_format_prototype, format_date_to_parts,
+ factory()->InternalizeUtf8String("formatToParts"));
+}
+
+namespace {
+
+void SetFunction(Handle<JSObject> target, Handle<JSFunction> function,
+ Handle<Name> name, PropertyAttributes attributes = DONT_ENUM) {
+ JSObject::SetOwnPropertyIgnoreAttributes(target, name, function, attributes)
+ .ToHandleChecked();
+}
+
+} // namespace
+
+void Genesis::InitializeGlobal_icu_case_mapping() {
+ if (!FLAG_icu_case_mapping) return;
+
+ Handle<JSReceiver> exports_container(
+ JSReceiver::cast(native_context()->exports_container()));
+
+ Handle<JSObject> string_prototype(
+ JSObject::cast(native_context()->string_function()->prototype()));
+
+ Handle<JSFunction> to_lower_case = Handle<JSFunction>::cast(
+ JSReceiver::GetProperty(
+ exports_container,
+ factory()->InternalizeUtf8String("ToLowerCaseI18N"))
+ .ToHandleChecked());
+ SetFunction(string_prototype, to_lower_case,
+ factory()->InternalizeUtf8String("toLowerCase"));
+
+ Handle<JSFunction> to_upper_case = Handle<JSFunction>::cast(
+ JSReceiver::GetProperty(
+ exports_container,
+ factory()->InternalizeUtf8String("ToUpperCaseI18N"))
+ .ToHandleChecked());
+ SetFunction(string_prototype, to_upper_case,
+ factory()->InternalizeUtf8String("toUpperCase"));
+
+ Handle<JSFunction> to_locale_lower_case = Handle<JSFunction>::cast(
+ JSReceiver::GetProperty(
+ exports_container,
+ factory()->InternalizeUtf8String("ToLocaleLowerCaseI18N"))
+ .ToHandleChecked());
+ SetFunction(string_prototype, to_locale_lower_case,
+ factory()->InternalizeUtf8String("toLocaleLowerCase"));
+
+ Handle<JSFunction> to_locale_upper_case = Handle<JSFunction>::cast(
+ JSReceiver::GetProperty(
+ exports_container,
+ factory()->InternalizeUtf8String("ToLocaleUpperCaseI18N"))
+ .ToHandleChecked());
+ SetFunction(string_prototype, to_locale_upper_case,
+ factory()->InternalizeUtf8String("toLocaleUpperCase"));
+}
+#endif
+
Handle<JSFunction> Genesis::InstallArrayBuffer(Handle<JSObject> target,
const char* name,
Builtins::Name call,
@@ -3748,10 +3990,11 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
// Store the map for the %StringPrototype% after the natives has been compiled
// and the String function has been set up.
Handle<JSFunction> string_function(native_context()->string_function());
- DCHECK(JSObject::cast(
- string_function->initial_map()->prototype())->HasFastProperties());
+ JSObject* string_function_prototype =
+ JSObject::cast(string_function->initial_map()->prototype());
+ DCHECK(string_function_prototype->HasFastProperties());
native_context()->set_string_function_prototype_map(
- HeapObject::cast(string_function->initial_map()->prototype())->map());
+ string_function_prototype->map());
Handle<JSGlobalObject> global_object =
handle(native_context()->global_object());
@@ -4023,8 +4266,6 @@ bool Genesis::InstallExperimentalNatives() {
static const char* harmony_tailcalls_natives[] = {nullptr};
static const char* harmony_sharedarraybuffer_natives[] = {
"native harmony-atomics.js", NULL};
- static const char* harmony_simd_natives[] = {"native harmony-simd.js",
- nullptr};
static const char* harmony_do_expressions_natives[] = {nullptr};
static const char* harmony_regexp_lookbehind_natives[] = {nullptr};
static const char* harmony_regexp_named_captures_natives[] = {nullptr};
@@ -4032,15 +4273,18 @@ bool Genesis::InstallExperimentalNatives() {
static const char* harmony_function_sent_natives[] = {nullptr};
static const char* harmony_array_prototype_values_natives[] = {nullptr};
#ifdef V8_I18N_SUPPORT
- static const char* icu_case_mapping_natives[] = {"native icu-case-mapping.js",
- nullptr};
- static const char* datetime_format_to_parts_natives[] = {
- "native datetime-format-to-parts.js", nullptr};
+ static const char* icu_case_mapping_natives[] = {nullptr};
+ static const char* datetime_format_to_parts_natives[] = {nullptr};
#endif
static const char* harmony_restrictive_generators_natives[] = {nullptr};
static const char* harmony_trailing_commas_natives[] = {nullptr};
+ static const char* harmony_function_tostring_natives[] = {nullptr};
static const char* harmony_class_fields_natives[] = {nullptr};
- static const char* harmony_object_spread_natives[] = {nullptr};
+ static const char* harmony_object_rest_spread_natives[] = {nullptr};
+ static const char* harmony_async_iteration_natives[] = {nullptr};
+ static const char* harmony_dynamic_import_natives[] = {nullptr};
+ static const char* harmony_promise_finally_natives[] = {nullptr};
+ static const char* harmony_template_escapes_natives[] = {nullptr};
for (int i = ExperimentalNatives::GetDebuggerCount();
i < ExperimentalNatives::GetBuiltinsCount(); i++) {
@@ -4158,7 +4402,6 @@ void Genesis::InstallExperimentalBuiltinFunctionIds() {
}
}
-
#undef INSTALL_BUILTIN_ID
@@ -4196,25 +4439,7 @@ bool Genesis::InstallSpecialObjects(Handle<Context> native_context) {
WasmJs::Install(isolate);
}
- // Expose the debug global object in global if a name for it is specified.
- if (FLAG_expose_debug_as != NULL && strlen(FLAG_expose_debug_as) != 0) {
- // If loading fails we just bail out without installing the
- // debugger but without tanking the whole context.
- Debug* debug = isolate->debug();
- if (!debug->Load()) return true;
- Handle<Context> debug_context = debug->debug_context();
- // Set the security token for the debugger context to the same as
- // the shell native context to allow calling between these (otherwise
- // exposing debug global object doesn't make much sense).
- debug_context->set_security_token(native_context->security_token());
- Handle<String> debug_string =
- factory->InternalizeUtf8String(FLAG_expose_debug_as);
- uint32_t index;
- if (debug_string->AsArrayIndex(&index)) return true;
- Handle<Object> global_proxy(debug_context->global_proxy(), isolate);
- JSObject::AddProperty(handle(native_context->global_proxy()), debug_string,
- global_proxy, DONT_ENUM);
- }
+ InstallFFIMap(isolate);
return true;
}
@@ -4433,6 +4658,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
} else {
DCHECK_EQ(kDescriptor, details.location());
if (details.kind() == kData) {
+ DCHECK(!FLAG_track_constant_fields);
HandleScope inner(isolate());
Handle<Name> key = Handle<Name>(descs->GetKey(i));
Handle<Object> value(descs->GetValue(i), isolate());
@@ -4663,6 +4889,7 @@ Genesis::Genesis(
Handle<JSFunction> empty_function = CreateEmptyFunction(isolate);
CreateStrictModeFunctionMaps(empty_function);
CreateIteratorMaps(empty_function);
+ CreateAsyncIteratorMaps();
CreateAsyncFunctionMaps(empty_function);
Handle<JSGlobalObject> global_object =
CreateNewGlobals(global_proxy_template, global_proxy);
@@ -4690,6 +4917,15 @@ Genesis::Genesis(
if (FLAG_experimental_extras) {
if (!InstallExperimentalExtraNatives()) return;
}
+
+ // Store String.prototype's map again in case it has been changed by
+ // experimental natives.
+ Handle<JSFunction> string_function(native_context()->string_function());
+ JSObject* string_function_prototype =
+ JSObject::cast(string_function->initial_map()->prototype());
+ DCHECK(string_function_prototype->HasFastProperties());
+ native_context()->set_string_function_prototype_map(
+ string_function_prototype->map());
}
// The serializer cannot serialize typed arrays. Reset those typed arrays
// for each new context.
@@ -4738,11 +4974,19 @@ Genesis::Genesis(Isolate* isolate,
global_proxy = factory()->NewUninitializedJSGlobalProxy(proxy_size);
}
- // CreateNewGlobals.
+ // Create a remote object as the global object.
Handle<ObjectTemplateInfo> global_proxy_data =
- v8::Utils::OpenHandle(*global_proxy_template);
+ Utils::OpenHandle(*global_proxy_template);
Handle<FunctionTemplateInfo> global_constructor(
FunctionTemplateInfo::cast(global_proxy_data->constructor()));
+
+ Handle<ObjectTemplateInfo> global_object_template(
+ ObjectTemplateInfo::cast(global_constructor->prototype_template()));
+ Handle<JSObject> global_object =
+ ApiNatives::InstantiateRemoteObject(
+ global_object_template).ToHandleChecked();
+
+ // (Re)initialize the global proxy object.
Handle<SharedFunctionInfo> shared =
FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(isolate,
global_constructor);
@@ -4758,19 +5002,20 @@ Genesis::Genesis(Isolate* isolate,
JSFunction::SetInitialMap(global_proxy_function, global_proxy_map,
factory()->null_value());
global_proxy_map->set_is_access_check_needed(true);
- global_proxy_map->set_is_callable();
- global_proxy_map->set_is_constructor(true);
global_proxy_map->set_has_hidden_prototype(true);
Handle<String> global_name = factory()->global_string();
global_proxy_function->shared()->set_instance_class_name(*global_name);
factory()->ReinitializeJSGlobalProxy(global_proxy, global_proxy_function);
- // GlobalProxy.
+ // A remote global proxy has no native context.
global_proxy->set_native_context(heap()->null_value());
- // DetachGlobal.
- JSObject::ForceSetPrototype(global_proxy, factory()->null_value());
+ // Configure the hidden prototype chain of the global proxy.
+ JSObject::ForceSetPrototype(global_proxy, global_object);
+ // TODO(dcheng): This is a hack. Why does this need to be manually called
+ // here? Line 4812 should have taken care of it?
+ global_proxy->map()->set_has_hidden_prototype(true);
global_proxy_ = global_proxy;
}
diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h
index a1ba9dd713..81ef396e06 100644
--- a/deps/v8/src/bootstrapper.h
+++ b/deps/v8/src/bootstrapper.h
@@ -121,8 +121,6 @@ class Bootstrapper final {
static bool CompileExperimentalExtraBuiltin(Isolate* isolate, int index);
static void ExportFromRuntime(Isolate* isolate, Handle<JSObject> container);
- static void ExportExperimentalFromRuntime(Isolate* isolate,
- Handle<JSObject> container);
private:
Isolate* isolate_;
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index 02103b3e8f..2d03783d11 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -552,6 +552,8 @@ namespace {
void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
bool create_implicit_receiver,
bool check_derived_construct) {
+ Label post_instantiation_deopt_entry;
+
// ----------- S t a t e -------------
// -- r0 : number of arguments
// -- r1 : constructor function
@@ -601,6 +603,9 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
__ PushRoot(Heap::kTheHoleValueRootIndex);
}
+ // Deoptimizer re-enters stub code here.
+ __ bind(&post_instantiation_deopt_entry);
+
// Set up pointer to last argument.
__ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
@@ -633,7 +638,8 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
// Store offset of return address for deoptimizer.
if (create_implicit_receiver && !is_api_function) {
- masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+ masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
+ masm->pc_offset());
}
// Restore context from the frame.
@@ -697,6 +703,35 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
__ IncrementCounter(isolate->counters()->constructed_objects(), 1, r1, r2);
}
__ Jump(lr);
+
+ // Store offset of trampoline address for deoptimizer. This is the bailout
+ // point after the receiver instantiation but before the function invocation.
+ // We need to restore some registers in order to continue the above code.
+ if (create_implicit_receiver && !is_api_function) {
+ masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
+ masm->pc_offset());
+
+ // ----------- S t a t e -------------
+ // -- r0 : newly allocated object
+ // -- sp[0] : constructor function
+ // -----------------------------------
+
+ __ pop(r1);
+ __ push(r0);
+ __ push(r0);
+
+ // Retrieve smi-tagged arguments count from the stack.
+ __ ldr(r0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ __ SmiUntag(r0);
+
+ // Retrieve the new target value from the stack. This was placed into the
+ // frame description in place of the receiver by the optimizing compiler.
+ __ add(r3, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2));
+
+ // Continue with constructor function invocation.
+ __ b(&post_instantiation_deopt_entry);
+ }
}
} // namespace
@@ -1002,7 +1037,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Register debug_info = kInterpreterBytecodeArrayRegister;
DCHECK(!debug_info.is(r0));
__ ldr(debug_info, FieldMemOperand(r0, SharedFunctionInfo::kDebugInfoOffset));
- __ cmp(debug_info, Operand(DebugInfo::uninitialized()));
+ __ SmiTst(debug_info);
// Load original bytecode array or the debug copy.
__ ldr(kInterpreterBytecodeArrayRegister,
FieldMemOperand(r0, SharedFunctionInfo::kFunctionDataOffset), eq);
@@ -1016,8 +1051,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ b(ne, &switch_to_different_code_kind);
// Increment invocation count for the function.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kLiteralsOffset));
- __ ldr(r2, FieldMemOperand(r2, LiteralsArray::kFeedbackVectorOffset));
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kFeedbackVectorOffset));
+ __ ldr(r2, FieldMemOperand(r2, Cell::kValueOffset));
__ ldr(r9, FieldMemOperand(
r2, FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
@@ -1148,7 +1183,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
// static
void Builtins::Generate_InterpreterPushArgsAndCallImpl(
MacroAssembler* masm, TailCallMode tail_call_mode,
- CallableType function_type) {
+ InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
// -- r0 : the number of arguments (not including the receiver)
// -- r2 : the address of the first argument to be pushed. Subsequent
@@ -1164,12 +1199,14 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
Generate_InterpreterPushArgs(masm, r3, r2, r4, r5, &stack_overflow);
// Call the target.
- if (function_type == CallableType::kJSFunction) {
+ if (mode == InterpreterPushArgsMode::kJSFunction) {
__ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
tail_call_mode),
RelocInfo::CODE_TARGET);
+ } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Jump(masm->isolate()->builtins()->CallWithSpread(),
+ RelocInfo::CODE_TARGET);
} else {
- DCHECK_EQ(function_type, CallableType::kAny);
__ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
tail_call_mode),
RelocInfo::CODE_TARGET);
@@ -1185,7 +1222,7 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
// static
void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
- MacroAssembler* masm, CallableType construct_type) {
+ MacroAssembler* masm, InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
// -- r0 : argument count (not including receiver)
// -- r3 : new target
@@ -1203,7 +1240,7 @@ void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
Generate_InterpreterPushArgs(masm, r0, r4, r5, r6, &stack_overflow);
__ AssertUndefinedOrAllocationSite(r2, r5);
- if (construct_type == CallableType::kJSFunction) {
+ if (mode == InterpreterPushArgsMode::kJSFunction) {
__ AssertFunction(r1);
// Tail call to the function-specific construct stub (still in the caller
@@ -1212,9 +1249,12 @@ void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
__ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kConstructStubOffset));
// Jump to the construct function.
__ add(pc, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
-
+ } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ // Call the constructor with r0, r1, and r3 unmodified.
+ __ Jump(masm->isolate()->builtins()->ConstructWithSpread(),
+ RelocInfo::CODE_TARGET);
} else {
- DCHECK_EQ(construct_type, CallableType::kAny);
+ DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
// Call the constructor with r0, r1, and r3 unmodified.
__ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
@@ -1336,20 +1376,26 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
Register argument_count = r0;
Register closure = r1;
Register new_target = r3;
+ Register map = argument_count;
+ Register index = r2;
+
+ // Do we have a valid feedback vector?
+ __ ldr(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ ldr(index, FieldMemOperand(index, Cell::kValueOffset));
+ __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex,
+ &gotta_call_runtime_no_stack);
+
__ push(argument_count);
__ push(new_target);
__ push(closure);
- Register map = argument_count;
- Register index = r2;
__ ldr(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ ldr(map,
FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
__ ldr(index, FieldMemOperand(map, FixedArray::kLengthOffset));
__ cmp(index, Operand(Smi::FromInt(2)));
- __ b(lt, &gotta_call_runtime);
+ __ b(lt, &try_shared);
- // Find literals.
// r3 : native context
// r2 : length / index
// r0 : optimized code map
@@ -1369,20 +1415,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
__ cmp(temp, native_context);
__ b(ne, &loop_bottom);
- // Literals available?
- __ ldr(temp, FieldMemOperand(array_pointer,
- SharedFunctionInfo::kOffsetToPreviousLiterals));
- __ ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
- __ JumpIfSmi(temp, &gotta_call_runtime);
-
- // Save the literals in the closure.
- __ ldr(r4, MemOperand(sp, 0));
- __ str(temp, FieldMemOperand(r4, JSFunction::kLiteralsOffset));
- __ push(index);
- __ RecordWriteField(r4, JSFunction::kLiteralsOffset, temp, index,
- kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ pop(index);
// Code available?
Register entry = r4;
@@ -1392,7 +1424,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
- // Found literals and code. Get them into the closure and return.
+ // Found code. Get it into the closure and return.
__ pop(closure);
// Store code entry in the closure.
__ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -1427,9 +1459,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ cmp(index, Operand(Smi::FromInt(1)));
__ b(gt, &loop_top);
- // We found neither literals nor code.
- __ jmp(&gotta_call_runtime);
-
+ // We found no code.
__ bind(&try_shared);
__ pop(closure);
__ pop(new_target);
@@ -2063,20 +2093,20 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ bind(&target_not_constructor);
{
__ str(r1, MemOperand(sp, 0));
- __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ __ TailCallRuntime(Runtime::kThrowNotConstructor);
}
// 4c. The new.target is not a constructor, throw an appropriate TypeError.
__ bind(&new_target_not_constructor);
{
__ str(r3, MemOperand(sp, 0));
- __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ __ TailCallRuntime(Runtime::kThrowNotConstructor);
}
}
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ SmiTag(r0);
- __ mov(r4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ mov(r4, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ stm(db_w, sp, r0.bit() | r1.bit() | r4.bit() |
(FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
fp.bit() | lr.bit());
@@ -2244,6 +2274,72 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
}
}
+// static
+void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
+ // ----------- S t a t e -------------
+ // -- r1 : the target to call (can be any Object)
+ // -- r2 : start index (to support rest parameters)
+ // -- lr : return address.
+ // -- sp[0] : thisArgument
+ // -----------------------------------
+
+ // Check if we have an arguments adaptor frame below the function frame.
+ Label arguments_adaptor, arguments_done;
+ __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(ip, MemOperand(r3, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ cmp(ip, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ b(eq, &arguments_adaptor);
+ {
+ __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r0, FieldMemOperand(
+ r0, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ mov(r3, fp);
+ }
+ __ b(&arguments_done);
+ __ bind(&arguments_adaptor);
+ {
+ // Load the length from the ArgumentsAdaptorFrame.
+ __ ldr(r0, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ }
+ __ bind(&arguments_done);
+
+ Label stack_empty, stack_done, stack_overflow;
+ __ SmiUntag(r0);
+ __ sub(r0, r0, r2, SetCC);
+ __ b(le, &stack_empty);
+ {
+ // Check for stack overflow.
+ Generate_StackOverflowCheck(masm, r0, r2, &stack_overflow);
+
+ // Forward the arguments from the caller frame.
+ {
+ Label loop;
+ __ add(r3, r3, Operand(kPointerSize));
+ __ mov(r2, r0);
+ __ bind(&loop);
+ {
+ __ ldr(ip, MemOperand(r3, r2, LSL, kPointerSizeLog2));
+ __ push(ip);
+ __ sub(r2, r2, Operand(1), SetCC);
+ __ b(ne, &loop);
+ }
+ }
+ }
+ __ b(&stack_done);
+ __ bind(&stack_overflow);
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ bind(&stack_empty);
+ {
+ // We just pass the receiver, which is already on the stack.
+ __ mov(r0, Operand(0));
+ }
+ __ bind(&stack_done);
+
+ __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
namespace {
// Drops top JavaScript frame and an arguments adaptor frame below it (if
@@ -2294,7 +2390,7 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
Label no_interpreter_frame;
__ ldr(scratch3,
MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ cmp(scratch3, Operand(Smi::FromInt(StackFrame::STUB)));
+ __ cmp(scratch3, Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
__ b(ne, &no_interpreter_frame);
__ ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ bind(&no_interpreter_frame);
@@ -2306,7 +2402,8 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
__ ldr(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ ldr(scratch3,
MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ cmp(scratch3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ cmp(scratch3,
+ Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(ne, &no_arguments_adaptor);
// Drop current frame and load arguments count from arguments adaptor frame.
@@ -2614,6 +2711,161 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
}
}
+static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
+ Register argc = r0;
+ Register constructor = r1;
+ Register new_target = r3;
+
+ Register scratch = r2;
+ Register scratch2 = r6;
+
+ Register spread = r4;
+ Register spread_map = r5;
+
+ Register spread_len = r5;
+
+ Label runtime_call, push_args;
+ __ ldr(spread, MemOperand(sp, 0));
+ __ JumpIfSmi(spread, &runtime_call);
+ __ ldr(spread_map, FieldMemOperand(spread, HeapObject::kMapOffset));
+
+ // Check that the spread is an array.
+ __ CompareInstanceType(spread_map, scratch, JS_ARRAY_TYPE);
+ __ b(ne, &runtime_call);
+
+ // Check that we have the original ArrayPrototype.
+ __ ldr(scratch, FieldMemOperand(spread_map, Map::kPrototypeOffset));
+ __ ldr(scratch2, NativeContextMemOperand());
+ __ ldr(scratch2,
+ ContextMemOperand(scratch2, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+ __ cmp(scratch, scratch2);
+ __ b(ne, &runtime_call);
+
+ // Check that the ArrayPrototype hasn't been modified in a way that would
+ // affect iteration.
+ __ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
+ __ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
+ __ cmp(scratch, Operand(Smi::FromInt(Isolate::kProtectorValid)));
+ __ b(ne, &runtime_call);
+
+ // Check that the map of the initial array iterator hasn't changed.
+ __ ldr(scratch2, NativeContextMemOperand());
+ __ ldr(scratch,
+ ContextMemOperand(scratch2,
+ Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
+ __ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
+ __ ldr(scratch2,
+ ContextMemOperand(
+ scratch2, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
+ __ cmp(scratch, scratch2);
+ __ b(ne, &runtime_call);
+
+ // For FastPacked kinds, iteration will have the same effect as simply
+ // accessing each property in order.
+ Label no_protector_check;
+ __ ldr(scratch, FieldMemOperand(spread_map, Map::kBitField2Offset));
+ __ DecodeField<Map::ElementsKindBits>(scratch);
+ __ cmp(scratch, Operand(FAST_HOLEY_ELEMENTS));
+ __ b(hi, &runtime_call);
+ // For non-FastHoley kinds, we can skip the protector check.
+ __ cmp(scratch, Operand(FAST_SMI_ELEMENTS));
+ __ b(eq, &no_protector_check);
+ __ cmp(scratch, Operand(FAST_ELEMENTS));
+ __ b(eq, &no_protector_check);
+ // Check the ArrayProtector cell.
+ __ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
+ __ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
+ __ cmp(scratch, Operand(Smi::FromInt(Isolate::kProtectorValid)));
+ __ b(ne, &runtime_call);
+
+ __ bind(&no_protector_check);
+ // Load the FixedArray backing store, but use the length from the array.
+ __ ldr(spread_len, FieldMemOperand(spread, JSArray::kLengthOffset));
+ __ SmiUntag(spread_len);
+ __ ldr(spread, FieldMemOperand(spread, JSArray::kElementsOffset));
+ __ b(&push_args);
+
+ __ bind(&runtime_call);
+ {
+ // Call the builtin for the result of the spread.
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(argc);
+ __ Push(constructor);
+ __ Push(new_target);
+ __ Push(argc);
+ __ Push(spread);
+ __ CallRuntime(Runtime::kSpreadIterableFixed);
+ __ mov(spread, r0);
+ __ Pop(argc);
+ __ Pop(new_target);
+ __ Pop(constructor);
+ __ SmiUntag(argc);
+ }
+
+ {
+ // Calculate the new nargs including the result of the spread.
+ __ ldr(spread_len, FieldMemOperand(spread, FixedArray::kLengthOffset));
+ __ SmiUntag(spread_len);
+
+ __ bind(&push_args);
+ // argc += spread_len - 1. Subtract 1 for the spread itself.
+ __ add(argc, argc, spread_len);
+ __ sub(argc, argc, Operand(1));
+
+ // Pop the spread argument off the stack.
+ __ Pop(scratch);
+ }
+
+ // Check for stack overflow.
+ {
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ Label done;
+ __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
+ // Make scratch the space we have left. The stack might already be
+ // overflowed here which will cause scratch to become negative.
+ __ sub(scratch, sp, scratch);
+ // Check if the arguments will overflow the stack.
+ __ cmp(scratch, Operand(spread_len, LSL, kPointerSizeLog2));
+ __ b(gt, &done); // Signed comparison.
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ bind(&done);
+ }
+
+ // Put the evaluated spread onto the stack as additional arguments.
+ {
+ __ mov(scratch, Operand(0));
+ Label done, push, loop;
+ __ bind(&loop);
+ __ cmp(scratch, spread_len);
+ __ b(eq, &done);
+ __ add(scratch2, spread, Operand(scratch, LSL, kPointerSizeLog2));
+ __ ldr(scratch2, FieldMemOperand(scratch2, FixedArray::kHeaderSize));
+ __ JumpIfNotRoot(scratch2, Heap::kTheHoleValueRootIndex, &push);
+ __ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
+ __ bind(&push);
+ __ Push(scratch2);
+ __ add(scratch, scratch, Operand(1));
+ __ b(&loop);
+ __ bind(&done);
+ }
+}
+
+// static
+void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : the number of arguments (not including the receiver)
+ // -- r1 : the constructor to call (can be any Object)
+ // -----------------------------------
+
+ // CheckSpreadAndPushToStack will push r3 to save it.
+ __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+ CheckSpreadAndPushToStack(masm);
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+ TailCallMode::kDisallow),
+ RelocInfo::CODE_TARGET);
+}
+
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2729,6 +2981,19 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
}
// static
+void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : the number of arguments (not including the receiver)
+ // -- r1 : the constructor to call (can be any Object)
+ // -- r3 : the new target (either the same as the constructor or
+ // the JSFunction on which new was invoked initially)
+ // -----------------------------------
+
+ CheckSpreadAndPushToStack(masm);
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
+// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r1 : requested object size (untagged)
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index f22dc612dc..74e6c70133 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -540,6 +540,8 @@ namespace {
void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
bool create_implicit_receiver,
bool check_derived_construct) {
+ Label post_instantiation_deopt_entry;
+
// ----------- S t a t e -------------
// -- x0 : number of arguments
// -- x1 : constructor function
@@ -597,6 +599,9 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
__ PushRoot(Heap::kTheHoleValueRootIndex);
}
+ // Deoptimizer re-enters stub code here.
+ __ Bind(&post_instantiation_deopt_entry);
+
// Set up pointer to last argument.
__ Add(x2, fp, StandardFrameConstants::kCallerSPOffset);
@@ -635,7 +640,8 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
// Store offset of return address for deoptimizer.
if (create_implicit_receiver && !is_api_function) {
- masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+ masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
+ masm->pc_offset());
}
// Restore the context from the frame.
@@ -698,6 +704,34 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
__ IncrementCounter(isolate->counters()->constructed_objects(), 1, x1, x2);
}
__ Ret();
+
+ // Store offset of trampoline address for deoptimizer. This is the bailout
+ // point after the receiver instantiation but before the function invocation.
+ // We need to restore some registers in order to continue the above code.
+ if (create_implicit_receiver && !is_api_function) {
+ masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
+ masm->pc_offset());
+
+ // ----------- S t a t e -------------
+ // -- x0 : newly allocated object
+ // -- sp[0] : constructor function
+ // -----------------------------------
+
+ __ Pop(x1);
+ __ Push(x0, x0);
+
+ // Retrieve smi-tagged arguments count from the stack.
+ __ Ldr(x0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ __ SmiUntag(x0);
+
+ // Retrieve the new target value from the stack. This was placed into the
+ // frame description in place of the receiver by the optimizing compiler.
+ __ Add(x3, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ Ldr(x3, MemOperand(x3, x0, LSL, kPointerSizeLog2));
+
+ // Continue with constructor function invocation.
+ __ B(&post_instantiation_deopt_entry);
+ }
}
} // namespace
@@ -1007,8 +1041,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Label load_debug_bytecode_array, bytecode_array_loaded;
DCHECK(!debug_info.is(x0));
__ Ldr(debug_info, FieldMemOperand(x0, SharedFunctionInfo::kDebugInfoOffset));
- __ Cmp(debug_info, Operand(DebugInfo::uninitialized()));
- __ B(ne, &load_debug_bytecode_array);
+ __ JumpIfNotSmi(debug_info, &load_debug_bytecode_array);
__ Ldr(kInterpreterBytecodeArrayRegister,
FieldMemOperand(x0, SharedFunctionInfo::kFunctionDataOffset));
__ Bind(&bytecode_array_loaded);
@@ -1020,11 +1053,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ B(ne, &switch_to_different_code_kind);
// Increment invocation count for the function.
- __ Ldr(x11, FieldMemOperand(x1, JSFunction::kLiteralsOffset));
- __ Ldr(x11, FieldMemOperand(x11, LiteralsArray::kFeedbackVectorOffset));
- __ Ldr(x10, FieldMemOperand(x11, FeedbackVector::kInvocationCountIndex *
- kPointerSize +
- FeedbackVector::kHeaderSize));
+ __ Ldr(x11, FieldMemOperand(x1, JSFunction::kFeedbackVectorOffset));
+ __ Ldr(x11, FieldMemOperand(x11, Cell::kValueOffset));
+ __ Ldr(x10, FieldMemOperand(
+ x11, FeedbackVector::kInvocationCountIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
__ Add(x10, x10, Operand(Smi::FromInt(1)));
__ Str(x10, FieldMemOperand(
x11, FeedbackVector::kInvocationCountIndex * kPointerSize +
@@ -1163,7 +1196,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
// static
void Builtins::Generate_InterpreterPushArgsAndCallImpl(
MacroAssembler* masm, TailCallMode tail_call_mode,
- CallableType function_type) {
+ InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
// -- x0 : the number of arguments (not including the receiver)
// -- x2 : the address of the first argument to be pushed. Subsequent
@@ -1180,12 +1213,14 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
Generate_InterpreterPushArgs(masm, x3, x2, x4, x5, x6, &stack_overflow);
// Call the target.
- if (function_type == CallableType::kJSFunction) {
+ if (mode == InterpreterPushArgsMode::kJSFunction) {
__ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
tail_call_mode),
RelocInfo::CODE_TARGET);
+ } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Jump(masm->isolate()->builtins()->CallWithSpread(),
+ RelocInfo::CODE_TARGET);
} else {
- DCHECK_EQ(function_type, CallableType::kAny);
__ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
tail_call_mode),
RelocInfo::CODE_TARGET);
@@ -1200,7 +1235,7 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
// static
void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
- MacroAssembler* masm, CallableType construct_type) {
+ MacroAssembler* masm, InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
// -- x0 : argument count (not including receiver)
// -- x3 : new target
@@ -1217,7 +1252,7 @@ void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
Generate_InterpreterPushArgs(masm, x0, x4, x5, x6, x7, &stack_overflow);
__ AssertUndefinedOrAllocationSite(x2, x6);
- if (construct_type == CallableType::kJSFunction) {
+ if (mode == InterpreterPushArgsMode::kJSFunction) {
__ AssertFunction(x1);
// Tail call to the function-specific construct stub (still in the caller
@@ -1226,8 +1261,12 @@ void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
__ Ldr(x4, FieldMemOperand(x4, SharedFunctionInfo::kConstructStubOffset));
__ Add(x4, x4, Code::kHeaderSize - kHeapObjectTag);
__ Br(x4);
+ } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ // Call the constructor with x0, x1, and x3 unmodified.
+ __ Jump(masm->isolate()->builtins()->ConstructWithSpread(),
+ RelocInfo::CODE_TARGET);
} else {
- DCHECK_EQ(construct_type, CallableType::kAny);
+ DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
// Call the constructor with x0, x1, and x3 unmodified.
__ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
@@ -1346,14 +1385,19 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
Register closure = x1;
Register map = x13;
Register index = x2;
+
+ // Do we have a valid feedback vector?
+ __ Ldr(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ Ldr(index, FieldMemOperand(index, Cell::kValueOffset));
+ __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
+
__ Ldr(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(map,
FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
__ Ldrsw(index, UntagSmiFieldMemOperand(map, FixedArray::kLengthOffset));
__ Cmp(index, Operand(2));
- __ B(lt, &gotta_call_runtime);
+ __ B(lt, &try_shared);
- // Find literals.
// x3 : native context
// x2 : length / index
// x13 : optimized code map
@@ -1373,17 +1417,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ Ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
__ Cmp(temp, native_context);
__ B(ne, &loop_bottom);
- // Literals available?
- __ Ldr(temp, FieldMemOperand(array_pointer,
- SharedFunctionInfo::kOffsetToPreviousLiterals));
- __ Ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
- __ JumpIfSmi(temp, &gotta_call_runtime);
-
- // Save the literals in the closure.
- __ Str(temp, FieldMemOperand(closure, JSFunction::kLiteralsOffset));
- __ RecordWriteField(closure, JSFunction::kLiteralsOffset, temp, x7,
- kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
// Code available?
Register entry = x7;
@@ -1393,7 +1426,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ Ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
- // Found literals and code. Get them into the closure and return.
+ // Found code. Get it into the closure and return.
__ Add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, entry, x5);
@@ -1422,9 +1455,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ Cmp(index, Operand(1));
__ B(gt, &loop_top);
- // We found neither literals nor code.
- __ B(&gotta_call_runtime);
-
+ // We found no code.
__ Bind(&try_shared);
__ Ldr(entry,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
@@ -2117,20 +2148,20 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ Bind(&target_not_constructor);
{
__ Poke(target, 0);
- __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ __ TailCallRuntime(Runtime::kThrowNotConstructor);
}
// 4c. The new.target is not a constructor, throw an appropriate TypeError.
__ Bind(&new_target_not_constructor);
{
__ Poke(new_target, 0);
- __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ __ TailCallRuntime(Runtime::kThrowNotConstructor);
}
}
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ SmiTag(x10, x0);
- __ Mov(x11, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ Mov(x11, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR));
__ Push(lr, fp);
__ Push(x11, x1, x10);
__ Add(fp, jssp,
@@ -2328,6 +2359,72 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
}
}
+// static
+void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
+ // ----------- S t a t e -------------
+ // -- x1 : the target to call (can be any Object)
+ // -- x2 : start index (to support rest parameters)
+ // -- lr : return address.
+ // -- sp[0] : thisArgument
+ // -----------------------------------
+
+ // Check if we have an arguments adaptor frame below the function frame.
+ Label arguments_adaptor, arguments_done;
+ __ Ldr(x3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(x4, MemOperand(x3, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ Cmp(x4, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR));
+ __ B(eq, &arguments_adaptor);
+ {
+ __ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Ldr(x0, FieldMemOperand(x0, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldrsw(x0, FieldMemOperand(
+ x0, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ Mov(x3, fp);
+ }
+ __ B(&arguments_done);
+ __ Bind(&arguments_adaptor);
+ {
+ // Just load the length from ArgumentsAdaptorFrame.
+ __ Ldrsw(x0, UntagSmiMemOperand(
+ x3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ }
+ __ Bind(&arguments_done);
+
+ Label stack_empty, stack_done, stack_overflow;
+ __ Subs(x0, x0, x2);
+ __ B(le, &stack_empty);
+ {
+ // Check for stack overflow.
+ Generate_StackOverflowCheck(masm, x0, x2, &stack_overflow);
+
+ // Forward the arguments from the caller frame.
+ {
+ Label loop;
+ __ Add(x3, x3, kPointerSize);
+ __ Mov(x2, x0);
+ __ bind(&loop);
+ {
+ __ Ldr(x4, MemOperand(x3, x2, LSL, kPointerSizeLog2));
+ __ Push(x4);
+ __ Subs(x2, x2, 1);
+ __ B(ne, &loop);
+ }
+ }
+ }
+ __ B(&stack_done);
+ __ Bind(&stack_overflow);
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ Bind(&stack_empty);
+ {
+ // We just pass the receiver, which is already on the stack.
+ __ Mov(x0, 0);
+ }
+ __ Bind(&stack_done);
+
+ __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
namespace {
// Drops top JavaScript frame and an arguments adaptor frame below it (if
@@ -2378,7 +2475,7 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
Label no_interpreter_frame;
__ Ldr(scratch3,
MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ Cmp(scratch3, Operand(Smi::FromInt(StackFrame::STUB)));
+ __ Cmp(scratch3, Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
__ B(ne, &no_interpreter_frame);
__ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ bind(&no_interpreter_frame);
@@ -2390,7 +2487,8 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
__ Ldr(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ Ldr(scratch3,
MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ Cmp(scratch3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ Cmp(scratch3,
+ Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ B(ne, &no_arguments_adaptor);
// Drop current frame and load arguments count from arguments adaptor frame.
@@ -2693,6 +2791,155 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
}
}
+static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
+ Register argc = x0;
+ Register constructor = x1;
+ Register new_target = x3;
+
+ Register scratch = x2;
+ Register scratch2 = x6;
+
+ Register spread = x4;
+ Register spread_map = x5;
+
+ Register spread_len = x5;
+
+ Label runtime_call, push_args;
+ __ Peek(spread, 0);
+ __ JumpIfSmi(spread, &runtime_call);
+ __ Ldr(spread_map, FieldMemOperand(spread, HeapObject::kMapOffset));
+
+ // Check that the spread is an array.
+ __ CompareInstanceType(spread_map, scratch, JS_ARRAY_TYPE);
+ __ B(ne, &runtime_call);
+
+ // Check that we have the original ArrayPrototype.
+ __ Ldr(scratch, FieldMemOperand(spread_map, Map::kPrototypeOffset));
+ __ Ldr(scratch2, NativeContextMemOperand());
+ __ Ldr(scratch2,
+ ContextMemOperand(scratch2, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+ __ Cmp(scratch, scratch2);
+ __ B(ne, &runtime_call);
+
+ // Check that the ArrayPrototype hasn't been modified in a way that would
+ // affect iteration.
+ __ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
+ __ Ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
+ __ Cmp(scratch, Smi::FromInt(Isolate::kProtectorValid));
+ __ B(ne, &runtime_call);
+
+ // Check that the map of the initial array iterator hasn't changed.
+ __ Ldr(scratch2, NativeContextMemOperand());
+ __ Ldr(scratch,
+ ContextMemOperand(scratch2,
+ Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
+ __ Ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
+ __ Ldr(scratch2,
+ ContextMemOperand(
+ scratch2, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
+ __ Cmp(scratch, scratch2);
+ __ B(ne, &runtime_call);
+
+ // For FastPacked kinds, iteration will have the same effect as simply
+ // accessing each property in order.
+ Label no_protector_check;
+ __ Ldr(scratch, FieldMemOperand(spread_map, Map::kBitField2Offset));
+ __ DecodeField<Map::ElementsKindBits>(scratch);
+ __ Cmp(scratch, FAST_HOLEY_ELEMENTS);
+ __ B(hi, &runtime_call);
+ // For non-FastHoley kinds, we can skip the protector check.
+ __ Cmp(scratch, FAST_SMI_ELEMENTS);
+ __ B(eq, &no_protector_check);
+ __ Cmp(scratch, FAST_ELEMENTS);
+ __ B(eq, &no_protector_check);
+ // Check the ArrayProtector cell.
+ __ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
+ __ Ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
+ __ Cmp(scratch, Smi::FromInt(Isolate::kProtectorValid));
+ __ B(ne, &runtime_call);
+
+ __ Bind(&no_protector_check);
+ // Load the FixedArray backing store, but use the length from the array.
+ __ Ldrsw(spread_len, UntagSmiFieldMemOperand(spread, JSArray::kLengthOffset));
+ __ Ldr(spread, FieldMemOperand(spread, JSArray::kElementsOffset));
+ __ B(&push_args);
+
+ __ Bind(&runtime_call);
+ {
+ // Call the builtin for the result of the spread.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(argc);
+ __ Push(constructor, new_target, argc, spread);
+ __ CallRuntime(Runtime::kSpreadIterableFixed);
+ __ Mov(spread, x0);
+ __ Pop(argc, new_target, constructor);
+ __ SmiUntag(argc);
+ }
+
+ {
+ // Calculate the new nargs including the result of the spread.
+ __ Ldrsw(spread_len,
+ UntagSmiFieldMemOperand(spread, FixedArray::kLengthOffset));
+
+ __ Bind(&push_args);
+ // argc += spread_len - 1. Subtract 1 for the spread itself.
+ __ Add(argc, argc, spread_len);
+ __ Sub(argc, argc, 1);
+
+ // Pop the spread argument off the stack.
+ __ Pop(scratch);
+ }
+
+ // Check for stack overflow.
+ {
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ Label done;
+ __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
+ // Make scratch the space we have left. The stack might already be
+ // overflowed here which will cause scratch to become negative.
+ __ Sub(scratch, masm->StackPointer(), scratch);
+ // Check if the arguments will overflow the stack.
+ __ Cmp(scratch, Operand(spread_len, LSL, kPointerSizeLog2));
+ __ B(gt, &done); // Signed comparison.
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ Bind(&done);
+ }
+
+ // Put the evaluated spread onto the stack as additional arguments.
+ {
+ __ Mov(scratch, 0);
+ Label done, push, loop;
+ __ Bind(&loop);
+ __ Cmp(scratch, spread_len);
+ __ B(eq, &done);
+ __ Add(scratch2, spread, Operand(scratch, LSL, kPointerSizeLog2));
+ __ Ldr(scratch2, FieldMemOperand(scratch2, FixedArray::kHeaderSize));
+ __ JumpIfNotRoot(scratch2, Heap::kTheHoleValueRootIndex, &push);
+ __ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
+ __ bind(&push);
+ __ Push(scratch2);
+ __ Add(scratch, scratch, Operand(1));
+ __ B(&loop);
+ __ Bind(&done);
+ }
+}
+
+// static
+void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : the number of arguments (not including the receiver)
+ // -- x1 : the constructor to call (can be any Object)
+ // -----------------------------------
+
+ // CheckSpreadAndPushToStack will push r3 to save it.
+ __ LoadRoot(x3, Heap::kUndefinedValueRootIndex);
+ CheckSpreadAndPushToStack(masm);
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+ TailCallMode::kDisallow),
+ RelocInfo::CODE_TARGET);
+}
+
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2814,6 +3061,19 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
}
// static
+void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : the number of arguments (not including the receiver)
+ // -- x1 : the constructor to call (can be any Object)
+ // -- x3 : the new target (either the same as the constructor or
+ // the JSFunction on which new was invoked initially)
+ // -----------------------------------
+
+ CheckSpreadAndPushToStack(masm);
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
+// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_AllocateInNewSpace");
// ----------- S t a t e -------------
diff --git a/deps/v8/src/builtins/builtins-api.cc b/deps/v8/src/builtins/builtins-api.cc
index d3798c3857..eb34638fa0 100644
--- a/deps/v8/src/builtins/builtins-api.cc
+++ b/deps/v8/src/builtins/builtins-api.cc
@@ -7,6 +7,10 @@
#include "src/api-arguments.h"
#include "src/api-natives.h"
#include "src/builtins/builtins-utils.h"
+#include "src/counters.h"
+#include "src/log.h"
+#include "src/objects-inl.h"
+#include "src/prototype.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-arguments.cc b/deps/v8/src/builtins/builtins-arguments.cc
new file mode 100644
index 0000000000..337c862a8a
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-arguments.cc
@@ -0,0 +1,425 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-arguments.h"
+#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
+#include "src/interface-descriptors.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+typedef compiler::Node Node;
+
+std::tuple<Node*, Node*, Node*>
+ArgumentsBuiltinsAssembler::GetArgumentsFrameAndCount(Node* function,
+ ParameterMode mode) {
+ CSA_ASSERT(this, HasInstanceType(function, JS_FUNCTION_TYPE));
+
+ Variable frame_ptr(this, MachineType::PointerRepresentation());
+ frame_ptr.Bind(LoadParentFramePointer());
+ CSA_ASSERT(this,
+ WordEqual(function,
+ LoadBufferObject(frame_ptr.value(),
+ StandardFrameConstants::kFunctionOffset,
+ MachineType::Pointer())));
+ Variable argument_count(this, ParameterRepresentation(mode));
+ VariableList list({&frame_ptr, &argument_count}, zone());
+ Label done_argument_count(this, list);
+
+ // Determine the number of passed parameters, which is either the count stored
+ // in an arguments adapter frame or fetched from the shared function info.
+ Node* frame_ptr_above = LoadBufferObject(
+ frame_ptr.value(), StandardFrameConstants::kCallerFPOffset,
+ MachineType::Pointer());
+ Node* shared =
+ LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset);
+ Node* formal_parameter_count = LoadSharedFunctionInfoSpecialField(
+ shared, SharedFunctionInfo::kFormalParameterCountOffset, mode);
+ argument_count.Bind(formal_parameter_count);
+ Node* marker_or_function = LoadBufferObject(
+ frame_ptr_above, CommonFrameConstants::kContextOrFrameTypeOffset);
+ GotoIf(
+ MarkerIsNotFrameType(marker_or_function, StackFrame::ARGUMENTS_ADAPTOR),
+ &done_argument_count);
+ Node* adapted_parameter_count = LoadBufferObject(
+ frame_ptr_above, ArgumentsAdaptorFrameConstants::kLengthOffset);
+ frame_ptr.Bind(frame_ptr_above);
+ argument_count.Bind(TaggedToParameter(adapted_parameter_count, mode));
+ Goto(&done_argument_count);
+
+ Bind(&done_argument_count);
+ return std::tuple<Node*, Node*, Node*>(
+ frame_ptr.value(), argument_count.value(), formal_parameter_count);
+}
+
+std::tuple<Node*, Node*, Node*>
+ArgumentsBuiltinsAssembler::AllocateArgumentsObject(Node* map,
+ Node* arguments_count,
+ Node* parameter_map_count,
+ ParameterMode mode,
+ int base_size) {
+ // Allocate the parameter object (either a Rest parameter object, a strict
+ // argument object or a sloppy arguments object) and the elements/mapped
+ // arguments together.
+ int elements_offset = base_size;
+ Node* element_count = arguments_count;
+ if (parameter_map_count != nullptr) {
+ base_size += FixedArray::kHeaderSize;
+ element_count = IntPtrOrSmiAdd(element_count, parameter_map_count, mode);
+ }
+ bool empty = IsIntPtrOrSmiConstantZero(arguments_count);
+ DCHECK_IMPLIES(empty, parameter_map_count == nullptr);
+ Node* size =
+ empty ? IntPtrConstant(base_size)
+ : ElementOffsetFromIndex(element_count, FAST_ELEMENTS, mode,
+ base_size + FixedArray::kHeaderSize);
+ Node* result = Allocate(size);
+ Comment("Initialize arguments object");
+ StoreMapNoWriteBarrier(result, map);
+ Node* empty_fixed_array = LoadRoot(Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectField(result, JSArray::kPropertiesOffset, empty_fixed_array);
+ Node* smi_arguments_count = ParameterToTagged(arguments_count, mode);
+ StoreObjectFieldNoWriteBarrier(result, JSArray::kLengthOffset,
+ smi_arguments_count);
+ Node* arguments = nullptr;
+ if (!empty) {
+ arguments = InnerAllocate(result, elements_offset);
+ StoreObjectFieldNoWriteBarrier(arguments, FixedArray::kLengthOffset,
+ smi_arguments_count);
+ Node* fixed_array_map = LoadRoot(Heap::kFixedArrayMapRootIndex);
+ StoreMapNoWriteBarrier(arguments, fixed_array_map);
+ }
+ Node* parameter_map = nullptr;
+ if (parameter_map_count != nullptr) {
+ Node* parameter_map_offset = ElementOffsetFromIndex(
+ arguments_count, FAST_ELEMENTS, mode, FixedArray::kHeaderSize);
+ parameter_map = InnerAllocate(arguments, parameter_map_offset);
+ StoreObjectFieldNoWriteBarrier(result, JSArray::kElementsOffset,
+ parameter_map);
+ Node* sloppy_elements_map =
+ LoadRoot(Heap::kSloppyArgumentsElementsMapRootIndex);
+ StoreMapNoWriteBarrier(parameter_map, sloppy_elements_map);
+ parameter_map_count = ParameterToTagged(parameter_map_count, mode);
+ StoreObjectFieldNoWriteBarrier(parameter_map, FixedArray::kLengthOffset,
+ parameter_map_count);
+ } else {
+ if (empty) {
+ StoreObjectFieldNoWriteBarrier(result, JSArray::kElementsOffset,
+ empty_fixed_array);
+ } else {
+ StoreObjectFieldNoWriteBarrier(result, JSArray::kElementsOffset,
+ arguments);
+ }
+ }
+ return std::tuple<Node*, Node*, Node*>(result, arguments, parameter_map);
+}
+
+Node* ArgumentsBuiltinsAssembler::ConstructParametersObjectFromArgs(
+ Node* map, Node* frame_ptr, Node* arg_count, Node* first_arg,
+ Node* rest_count, ParameterMode param_mode, int base_size) {
+ // Allocate the parameter object (either a Rest parameter object, a strict
+ // argument object or a sloppy arguments object) and the elements together and
+ // fill in the contents with the arguments above |formal_parameter_count|.
+ Node* result;
+ Node* elements;
+ Node* unused;
+ std::tie(result, elements, unused) =
+ AllocateArgumentsObject(map, rest_count, nullptr, param_mode, base_size);
+ DCHECK(unused == nullptr);
+ CodeStubArguments arguments(this, arg_count, frame_ptr, param_mode);
+ Variable offset(this, MachineType::PointerRepresentation());
+ offset.Bind(IntPtrConstant(FixedArrayBase::kHeaderSize - kHeapObjectTag));
+ VariableList list({&offset}, zone());
+ arguments.ForEach(list,
+ [this, elements, &offset](Node* arg) {
+ StoreNoWriteBarrier(MachineRepresentation::kTagged,
+ elements, offset.value(), arg);
+ Increment(offset, kPointerSize);
+ },
+ first_arg, nullptr, param_mode);
+ return result;
+}
+
+Node* ArgumentsBuiltinsAssembler::EmitFastNewRestParameter(Node* context,
+ Node* function) {
+ Node* frame_ptr;
+ Node* argument_count;
+ Node* formal_parameter_count;
+
+ ParameterMode mode = OptimalParameterMode();
+ Node* zero = IntPtrOrSmiConstant(0, mode);
+
+ std::tie(frame_ptr, argument_count, formal_parameter_count) =
+ GetArgumentsFrameAndCount(function, mode);
+
+ Variable result(this, MachineRepresentation::kTagged);
+ Label no_rest_parameters(this), runtime(this, Label::kDeferred),
+ done(this, &result);
+
+ Node* rest_count =
+ IntPtrOrSmiSub(argument_count, formal_parameter_count, mode);
+ Node* const native_context = LoadNativeContext(context);
+ Node* const array_map = LoadJSArrayElementsMap(FAST_ELEMENTS, native_context);
+ GotoIf(IntPtrOrSmiLessThanOrEqual(rest_count, zero, mode),
+ &no_rest_parameters);
+
+ GotoIfFixedArraySizeDoesntFitInNewSpace(
+ rest_count, &runtime, JSArray::kSize + FixedArray::kHeaderSize, mode);
+
+ // Allocate the Rest JSArray and the elements together and fill in the
+ // contents with the arguments above |formal_parameter_count|.
+ result.Bind(ConstructParametersObjectFromArgs(
+ array_map, frame_ptr, argument_count, formal_parameter_count, rest_count,
+ mode, JSArray::kSize));
+ Goto(&done);
+
+ Bind(&no_rest_parameters);
+ {
+ Node* arguments;
+ Node* elements;
+ Node* unused;
+ std::tie(arguments, elements, unused) =
+ AllocateArgumentsObject(array_map, zero, nullptr, mode, JSArray::kSize);
+ result.Bind(arguments);
+ Goto(&done);
+ }
+
+ Bind(&runtime);
+ {
+ result.Bind(CallRuntime(Runtime::kNewRestParameter, context, function));
+ Goto(&done);
+ }
+
+ Bind(&done);
+ return result.value();
+}
+
+TF_BUILTIN(FastNewRestParameter, ArgumentsBuiltinsAssembler) {
+ Node* function = Parameter(FastNewArgumentsDescriptor::kFunction);
+ Node* context = Parameter(FastNewArgumentsDescriptor::kContext);
+ Return(EmitFastNewRestParameter(context, function));
+}
+
+Node* ArgumentsBuiltinsAssembler::EmitFastNewStrictArguments(Node* context,
+ Node* function) {
+ Variable result(this, MachineRepresentation::kTagged);
+ Label done(this, &result), empty(this), runtime(this, Label::kDeferred);
+
+ Node* frame_ptr;
+ Node* argument_count;
+ Node* formal_parameter_count;
+
+ ParameterMode mode = OptimalParameterMode();
+ Node* zero = IntPtrOrSmiConstant(0, mode);
+
+ std::tie(frame_ptr, argument_count, formal_parameter_count) =
+ GetArgumentsFrameAndCount(function, mode);
+
+ GotoIfFixedArraySizeDoesntFitInNewSpace(
+ argument_count, &runtime,
+ JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize, mode);
+
+ Node* const native_context = LoadNativeContext(context);
+ Node* const map =
+ LoadContextElement(native_context, Context::STRICT_ARGUMENTS_MAP_INDEX);
+ GotoIf(WordEqual(argument_count, zero), &empty);
+
+ result.Bind(ConstructParametersObjectFromArgs(
+ map, frame_ptr, argument_count, zero, argument_count, mode,
+ JSStrictArgumentsObject::kSize));
+ Goto(&done);
+
+ Bind(&empty);
+ {
+ Node* arguments;
+ Node* elements;
+ Node* unused;
+ std::tie(arguments, elements, unused) = AllocateArgumentsObject(
+ map, zero, nullptr, mode, JSStrictArgumentsObject::kSize);
+ result.Bind(arguments);
+ Goto(&done);
+ }
+
+ Bind(&runtime);
+ {
+ result.Bind(CallRuntime(Runtime::kNewStrictArguments, context, function));
+ Goto(&done);
+ }
+
+ Bind(&done);
+ return result.value();
+}
+
+TF_BUILTIN(FastNewStrictArguments, ArgumentsBuiltinsAssembler) {
+ Node* function = Parameter(FastNewArgumentsDescriptor::kFunction);
+ Node* context = Parameter(FastNewArgumentsDescriptor::kContext);
+ Return(EmitFastNewStrictArguments(context, function));
+}
+
+Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
+ Node* function) {
+ Node* frame_ptr;
+ Node* argument_count;
+ Node* formal_parameter_count;
+ Variable result(this, MachineRepresentation::kTagged);
+
+ ParameterMode mode = OptimalParameterMode();
+ Node* zero = IntPtrOrSmiConstant(0, mode);
+
+ Label done(this, &result), empty(this), no_parameters(this),
+ runtime(this, Label::kDeferred);
+
+ std::tie(frame_ptr, argument_count, formal_parameter_count) =
+ GetArgumentsFrameAndCount(function, mode);
+
+ GotoIf(WordEqual(argument_count, zero), &empty);
+
+ GotoIf(WordEqual(formal_parameter_count, zero), &no_parameters);
+
+ {
+ Comment("Mapped parameter JSSloppyArgumentsObject");
+
+ Node* mapped_count =
+ IntPtrOrSmiMin(argument_count, formal_parameter_count, mode);
+
+ Node* parameter_map_size =
+ IntPtrOrSmiAdd(mapped_count, IntPtrOrSmiConstant(2, mode), mode);
+
+ // Verify that the overall allocation will fit in new space.
+ Node* elements_allocated =
+ IntPtrOrSmiAdd(argument_count, parameter_map_size, mode);
+ GotoIfFixedArraySizeDoesntFitInNewSpace(
+ elements_allocated, &runtime,
+ JSSloppyArgumentsObject::kSize + FixedArray::kHeaderSize * 2, mode);
+
+ Node* const native_context = LoadNativeContext(context);
+ Node* const map = LoadContextElement(
+ native_context, Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
+ Node* argument_object;
+ Node* elements;
+ Node* map_array;
+ std::tie(argument_object, elements, map_array) =
+ AllocateArgumentsObject(map, argument_count, parameter_map_size, mode,
+ JSSloppyArgumentsObject::kSize);
+ StoreObjectFieldNoWriteBarrier(
+ argument_object, JSSloppyArgumentsObject::kCalleeOffset, function);
+ StoreFixedArrayElement(map_array, 0, context, SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(map_array, 1, elements, SKIP_WRITE_BARRIER);
+
+ Comment("Fill in non-mapped parameters");
+ Node* argument_offset =
+ ElementOffsetFromIndex(argument_count, FAST_ELEMENTS, mode,
+ FixedArray::kHeaderSize - kHeapObjectTag);
+ Node* mapped_offset =
+ ElementOffsetFromIndex(mapped_count, FAST_ELEMENTS, mode,
+ FixedArray::kHeaderSize - kHeapObjectTag);
+ CodeStubArguments arguments(this, argument_count, frame_ptr, mode);
+ Variable current_argument(this, MachineType::PointerRepresentation());
+ current_argument.Bind(arguments.AtIndexPtr(argument_count, mode));
+ VariableList var_list1({&current_argument}, zone());
+ mapped_offset = BuildFastLoop(
+ var_list1, argument_offset, mapped_offset,
+ [this, elements, &current_argument](Node* offset) {
+ Increment(current_argument, kPointerSize);
+ Node* arg = LoadBufferObject(current_argument.value(), 0);
+ StoreNoWriteBarrier(MachineRepresentation::kTagged, elements, offset,
+ arg);
+ },
+ -kPointerSize, INTPTR_PARAMETERS);
+
+ // Copy the parameter slots and the holes in the arguments.
+ // We need to fill in mapped_count slots. They index the context,
+ // where parameters are stored in reverse order, at
+ // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+argument_count-1
+ // The mapped parameter thus need to get indices
+ // MIN_CONTEXT_SLOTS+parameter_count-1 ..
+ // MIN_CONTEXT_SLOTS+argument_count-mapped_count
+ // We loop from right to left.
+ Comment("Fill in mapped parameters");
+ Variable context_index(this, OptimalParameterRepresentation());
+ context_index.Bind(IntPtrOrSmiSub(
+ IntPtrOrSmiAdd(IntPtrOrSmiConstant(Context::MIN_CONTEXT_SLOTS, mode),
+ formal_parameter_count, mode),
+ mapped_count, mode));
+ Node* the_hole = TheHoleConstant();
+ VariableList var_list2({&context_index}, zone());
+ const int kParameterMapHeaderSize =
+ FixedArray::kHeaderSize + 2 * kPointerSize;
+ Node* adjusted_map_array = IntPtrAdd(
+ BitcastTaggedToWord(map_array),
+ IntPtrConstant(kParameterMapHeaderSize - FixedArray::kHeaderSize));
+ Node* zero_offset = ElementOffsetFromIndex(
+ zero, FAST_ELEMENTS, mode, FixedArray::kHeaderSize - kHeapObjectTag);
+ BuildFastLoop(var_list2, mapped_offset, zero_offset,
+ [this, the_hole, elements, adjusted_map_array, &context_index,
+ mode](Node* offset) {
+ StoreNoWriteBarrier(MachineRepresentation::kTagged,
+ elements, offset, the_hole);
+ StoreNoWriteBarrier(
+ MachineRepresentation::kTagged, adjusted_map_array,
+ offset, ParameterToTagged(context_index.value(), mode));
+ Increment(context_index, 1, mode);
+ },
+ -kPointerSize, INTPTR_PARAMETERS);
+
+ result.Bind(argument_object);
+ Goto(&done);
+ }
+
+ Bind(&no_parameters);
+ {
+ Comment("No parameters JSSloppyArgumentsObject");
+ GotoIfFixedArraySizeDoesntFitInNewSpace(
+ argument_count, &runtime,
+ JSSloppyArgumentsObject::kSize + FixedArray::kHeaderSize, mode);
+ Node* const native_context = LoadNativeContext(context);
+ Node* const map =
+ LoadContextElement(native_context, Context::SLOPPY_ARGUMENTS_MAP_INDEX);
+ result.Bind(ConstructParametersObjectFromArgs(
+ map, frame_ptr, argument_count, zero, argument_count, mode,
+ JSSloppyArgumentsObject::kSize));
+ StoreObjectFieldNoWriteBarrier(
+ result.value(), JSSloppyArgumentsObject::kCalleeOffset, function);
+ Goto(&done);
+ }
+
+ Bind(&empty);
+ {
+ Comment("Empty JSSloppyArgumentsObject");
+ Node* const native_context = LoadNativeContext(context);
+ Node* const map =
+ LoadContextElement(native_context, Context::SLOPPY_ARGUMENTS_MAP_INDEX);
+ Node* arguments;
+ Node* elements;
+ Node* unused;
+ std::tie(arguments, elements, unused) = AllocateArgumentsObject(
+ map, zero, nullptr, mode, JSSloppyArgumentsObject::kSize);
+ result.Bind(arguments);
+ StoreObjectFieldNoWriteBarrier(
+ result.value(), JSSloppyArgumentsObject::kCalleeOffset, function);
+ Goto(&done);
+ }
+
+ Bind(&runtime);
+ {
+ result.Bind(CallRuntime(Runtime::kNewSloppyArguments, context, function));
+ Goto(&done);
+ }
+
+ Bind(&done);
+ return result.value();
+}
+
+TF_BUILTIN(FastNewSloppyArguments, ArgumentsBuiltinsAssembler) {
+ Node* function = Parameter(FastNewArgumentsDescriptor::kFunction);
+ Node* context = Parameter(FastNewArgumentsDescriptor::kContext);
+ Return(EmitFastNewSloppyArguments(context, function));
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-arguments.h b/deps/v8/src/builtins/builtins-arguments.h
new file mode 100644
index 0000000000..e7c3823930
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-arguments.h
@@ -0,0 +1,55 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+typedef compiler::Node Node;
+typedef compiler::CodeAssemblerState CodeAssemblerState;
+typedef compiler::CodeAssemblerLabel CodeAssemblerLabel;
+
+class ArgumentsBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit ArgumentsBuiltinsAssembler(CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ Node* EmitFastNewStrictArguments(Node* context, Node* function);
+ Node* EmitFastNewSloppyArguments(Node* context, Node* function);
+ Node* EmitFastNewRestParameter(Node* context, Node* function);
+
+ private:
+ // Calculates and returns the the frame pointer, argument count and formal
+ // parameter count to be used to access a function's parameters, taking
+ // argument adapter frames into account. The tuple is of the form:
+ // <frame_ptr, # parameters actually passed, formal parameter count>
+ std::tuple<Node*, Node*, Node*> GetArgumentsFrameAndCount(Node* function,
+ ParameterMode mode);
+
+ // Allocates an an arguments (either rest, strict or sloppy) together with the
+ // FixedArray elements for the arguments and a parameter map (for sloppy
+ // arguments only). A tuple is returned with pointers to the arguments object,
+ // the elements and parameter map in the form:
+ // <argument object, arguments FixedArray, parameter map or nullptr>
+ std::tuple<Node*, Node*, Node*> AllocateArgumentsObject(
+ Node* map, Node* arguments, Node* mapped_arguments,
+ ParameterMode param_mode, int base_size);
+
+ // For Rest parameters and Strict arguments, the copying of parameters from
+ // the stack into the arguments object is straight-forward and shares much of
+ // the same underlying logic, which is encapsulated by this function. It
+ // allocates an arguments-like object of size |base_size| with the map |map|,
+ // and then copies |rest_count| arguments from the stack frame pointed to by
+ // |frame_ptr| starting from |first_arg|. |arg_count| == |first_arg| +
+ // |rest_count|.
+ Node* ConstructParametersObjectFromArgs(Node* map, Node* frame_ptr,
+ Node* arg_count, Node* first_arg,
+ Node* rest_count,
+ ParameterMode param_mode,
+ int base_size);
+};
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc
index 047d88ecea..183820e5dd 100644
--- a/deps/v8/src/builtins/builtins-array.cc
+++ b/deps/v8/src/builtins/builtins-array.cc
@@ -8,7 +8,12 @@
#include "src/code-factory.h"
#include "src/code-stub-assembler.h"
#include "src/contexts.h"
+#include "src/counters.h"
#include "src/elements.h"
+#include "src/isolate.h"
+#include "src/lookup.h"
+#include "src/objects-inl.h"
+#include "src/prototype.h"
namespace v8 {
namespace internal {
@@ -201,7 +206,7 @@ void Builtins::Generate_FastArrayPush(compiler::CodeAssemblerState* state) {
Node* context = assembler.Parameter(BuiltinDescriptor::kContext);
Node* new_target = assembler.Parameter(BuiltinDescriptor::kNewTarget);
- CodeStubArguments args(&assembler, argc);
+ CodeStubArguments args(&assembler, assembler.ChangeInt32ToIntPtr(argc));
Node* receiver = args.GetReceiver();
Node* kind = nullptr;
@@ -270,6 +275,15 @@ void Builtins::Generate_FastArrayPush(compiler::CodeAssemblerState* state) {
assembler.CallRuntime(Runtime::kSetProperty, context, receiver, length, arg,
assembler.SmiConstant(STRICT));
assembler.Increment(arg_index);
+ // The runtime SetProperty call could have converted the array to dictionary
+ // mode, which must be detected to abort the fast-path.
+ Node* map = assembler.LoadMap(receiver);
+ Node* bit_field2 = assembler.LoadMapBitField2(map);
+ Node* kind = assembler.DecodeWord32<Map::ElementsKindBits>(bit_field2);
+ assembler.GotoIf(assembler.Word32Equal(
+ kind, assembler.Int32Constant(DICTIONARY_ELEMENTS)),
+ &default_label);
+
assembler.GotoIfNotNumber(arg, &object_push);
assembler.Goto(&double_push);
}
@@ -310,6 +324,14 @@ void Builtins::Generate_FastArrayPush(compiler::CodeAssemblerState* state) {
assembler.CallRuntime(Runtime::kSetProperty, context, receiver, length, arg,
assembler.SmiConstant(STRICT));
assembler.Increment(arg_index);
+ // The runtime SetProperty call could have converted the array to dictionary
+ // mode, which must be detected to abort the fast-path.
+ Node* map = assembler.LoadMap(receiver);
+ Node* bit_field2 = assembler.LoadMapBitField2(map);
+ Node* kind = assembler.DecodeWord32<Map::ElementsKindBits>(bit_field2);
+ assembler.GotoIf(assembler.Word32Equal(
+ kind, assembler.Int32Constant(DICTIONARY_ELEMENTS)),
+ &default_label);
assembler.Goto(&object_push);
}
@@ -318,7 +340,7 @@ void Builtins::Generate_FastArrayPush(compiler::CodeAssemblerState* state) {
assembler.Bind(&default_label);
{
args.ForEach(
- [&assembler, receiver, context, &arg_index](Node* arg) {
+ [&assembler, receiver, context](Node* arg) {
Node* length = assembler.LoadJSArrayLength(receiver);
assembler.CallRuntime(Runtime::kSetProperty, context, receiver,
length, arg, assembler.SmiConstant(STRICT));
@@ -409,6 +431,242 @@ BUILTIN(ArrayUnshift) {
return Smi::FromInt(new_length);
}
+class ForEachCodeStubAssembler : public CodeStubAssembler {
+ public:
+ explicit ForEachCodeStubAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ void VisitOneElement(Node* context, Node* this_arg, Node* o, Node* k,
+ Node* callbackfn) {
+ Comment("begin VisitOneElement");
+
+ // a. Let Pk be ToString(k).
+ Node* p_k = ToString(context, k);
+
+ // b. Let kPresent be HasProperty(O, Pk).
+ // c. ReturnIfAbrupt(kPresent).
+ Node* k_present =
+ CallStub(CodeFactory::HasProperty(isolate()), context, p_k, o);
+
+ // d. If kPresent is true, then
+ Label not_present(this);
+ GotoIf(WordNotEqual(k_present, TrueConstant()), &not_present);
+
+ // i. Let kValue be Get(O, Pk).
+ // ii. ReturnIfAbrupt(kValue).
+ Node* k_value =
+ CallStub(CodeFactory::GetProperty(isolate()), context, o, k);
+
+ // iii. Let funcResult be Call(callbackfn, T, «kValue, k, O»).
+ // iv. ReturnIfAbrupt(funcResult).
+ CallJS(CodeFactory::Call(isolate()), context, callbackfn, this_arg, k_value,
+ k, o);
+
+ Goto(&not_present);
+ Bind(&not_present);
+ Comment("end VisitOneElement");
+ }
+
+ void VisitAllFastElements(Node* context, ElementsKind kind, Node* this_arg,
+ Node* o, Node* len, Node* callbackfn,
+ ParameterMode mode) {
+ Comment("begin VisitAllFastElements");
+ Variable original_map(this, MachineRepresentation::kTagged);
+ original_map.Bind(LoadMap(o));
+ VariableList list({&original_map}, zone());
+ BuildFastLoop(
+ list, IntPtrOrSmiConstant(0, mode), TaggedToParameter(len, mode),
+ [context, kind, this, o, &original_map, callbackfn, this_arg,
+ mode](Node* index) {
+ Label one_element_done(this), array_changed(this, Label::kDeferred),
+ hole_element(this);
+
+ // Check if o's map has changed during the callback. If so, we have to
+ // fall back to the slower spec implementation for the rest of the
+ // iteration.
+ Node* o_map = LoadMap(o);
+ GotoIf(WordNotEqual(o_map, original_map.value()), &array_changed);
+
+ // Check if o's length has changed during the callback and if the
+ // index is now out of range of the new length.
+ Node* tagged_index = ParameterToTagged(index, mode);
+ GotoIf(SmiGreaterThanOrEqual(tagged_index, LoadJSArrayLength(o)),
+ &array_changed);
+
+ // Re-load the elements array. If may have been resized.
+ Node* elements = LoadElements(o);
+
+ // Fast case: load the element directly from the elements FixedArray
+ // and call the callback if the element is not the hole.
+ DCHECK(kind == FAST_ELEMENTS || kind == FAST_DOUBLE_ELEMENTS);
+ int base_size = kind == FAST_ELEMENTS
+ ? FixedArray::kHeaderSize
+ : (FixedArray::kHeaderSize - kHeapObjectTag);
+ Node* offset = ElementOffsetFromIndex(index, kind, mode, base_size);
+ Node* value = nullptr;
+ if (kind == FAST_ELEMENTS) {
+ value = LoadObjectField(elements, offset);
+ GotoIf(WordEqual(value, TheHoleConstant()), &hole_element);
+ } else {
+ Node* double_value =
+ LoadDoubleWithHoleCheck(elements, offset, &hole_element);
+ value = AllocateHeapNumberWithValue(double_value);
+ }
+ CallJS(CodeFactory::Call(isolate()), context, callbackfn, this_arg,
+ value, tagged_index, o);
+ Goto(&one_element_done);
+
+ Bind(&hole_element);
+ BranchIfPrototypesHaveNoElements(o_map, &one_element_done,
+ &array_changed);
+
+ // O's changed during the forEach. Use the implementation precisely
+ // specified in the spec for the rest of the iteration, also making
+ // the failed original_map sticky in case of a subseuent change that
+ // goes back to the original map.
+ Bind(&array_changed);
+ VisitOneElement(context, this_arg, o, ParameterToTagged(index, mode),
+ callbackfn);
+ original_map.Bind(UndefinedConstant());
+ Goto(&one_element_done);
+
+ Bind(&one_element_done);
+ },
+ 1, mode, IndexAdvanceMode::kPost);
+ Comment("end VisitAllFastElements");
+ }
+};
+
+TF_BUILTIN(ArrayForEach, ForEachCodeStubAssembler) {
+ Label non_array(this), examine_elements(this), fast_elements(this),
+ slow(this), maybe_double_elements(this), fast_double_elements(this);
+
+ Node* receiver = Parameter(ForEachDescriptor::kReceiver);
+ Node* callbackfn = Parameter(ForEachDescriptor::kCallback);
+ Node* this_arg = Parameter(ForEachDescriptor::kThisArg);
+ Node* context = Parameter(ForEachDescriptor::kContext);
+
+ // TODO(danno): Seriously? Do we really need to throw the exact error message
+ // on null and undefined so that the webkit tests pass?
+ Label throw_null_undefined_exception(this, Label::kDeferred);
+ GotoIf(WordEqual(receiver, NullConstant()), &throw_null_undefined_exception);
+ GotoIf(WordEqual(receiver, UndefinedConstant()),
+ &throw_null_undefined_exception);
+
+ // By the book: taken directly from the ECMAScript 2015 specification
+
+ // 1. Let O be ToObject(this value).
+ // 2. ReturnIfAbrupt(O)
+ Node* o = CallStub(CodeFactory::ToObject(isolate()), context, receiver);
+
+ // 3. Let len be ToLength(Get(O, "length")).
+ // 4. ReturnIfAbrupt(len).
+ Variable merged_length(this, MachineRepresentation::kTagged);
+ Label has_length(this, &merged_length), not_js_array(this);
+ GotoIf(DoesntHaveInstanceType(o, JS_ARRAY_TYPE), &not_js_array);
+ merged_length.Bind(LoadJSArrayLength(o));
+ Goto(&has_length);
+ Bind(&not_js_array);
+ Node* len_property =
+ CallStub(CodeFactory::GetProperty(isolate()), context, o,
+ HeapConstant(isolate()->factory()->length_string()));
+ merged_length.Bind(
+ CallStub(CodeFactory::ToLength(isolate()), context, len_property));
+ Goto(&has_length);
+ Bind(&has_length);
+ Node* len = merged_length.value();
+
+ // 5. If IsCallable(callbackfn) is false, throw a TypeError exception.
+ Label type_exception(this, Label::kDeferred);
+ GotoIf(TaggedIsSmi(callbackfn), &type_exception);
+ GotoIfNot(IsCallableMap(LoadMap(callbackfn)), &type_exception);
+
+ // 6. If thisArg was supplied, let T be thisArg; else let T be undefined.
+ // [Already done by the arguments adapter]
+
+ // Non-smi lengths must use the slow path.
+ GotoIf(TaggedIsNotSmi(len), &slow);
+
+ BranchIfFastJSArray(o, context,
+ CodeStubAssembler::FastJSArrayAccessMode::INBOUNDS_READ,
+ &examine_elements, &slow);
+
+ Bind(&examine_elements);
+
+ ParameterMode mode = OptimalParameterMode();
+
+ // Select by ElementsKind
+ Node* o_map = LoadMap(o);
+ Node* bit_field2 = LoadMapBitField2(o_map);
+ Node* kind = DecodeWord32<Map::ElementsKindBits>(bit_field2);
+ Branch(Int32GreaterThan(kind, Int32Constant(FAST_HOLEY_ELEMENTS)),
+ &maybe_double_elements, &fast_elements);
+
+ Bind(&fast_elements);
+ {
+ VisitAllFastElements(context, FAST_ELEMENTS, this_arg, o, len, callbackfn,
+ mode);
+
+ // No exception, return success
+ Return(UndefinedConstant());
+ }
+
+ Bind(&maybe_double_elements);
+ Branch(Int32GreaterThan(kind, Int32Constant(FAST_HOLEY_DOUBLE_ELEMENTS)),
+ &slow, &fast_double_elements);
+
+ Bind(&fast_double_elements);
+ {
+ VisitAllFastElements(context, FAST_DOUBLE_ELEMENTS, this_arg, o, len,
+ callbackfn, mode);
+
+ // No exception, return success
+ Return(UndefinedConstant());
+ }
+
+ Bind(&slow);
+ {
+ // By the book: taken from the ECMAScript 2015 specification (cont.)
+
+ // 7. Let k be 0.
+ Variable k(this, MachineRepresentation::kTagged);
+ k.Bind(SmiConstant(0));
+
+ // 8. Repeat, while k < len
+ Label loop(this, &k);
+ Label after_loop(this);
+ Goto(&loop);
+ Bind(&loop);
+ {
+ GotoUnlessNumberLessThan(k.value(), len, &after_loop);
+
+ VisitOneElement(context, this_arg, o, k.value(), callbackfn);
+
+ // e. Increase k by 1.
+ k.Bind(NumberInc(k.value()));
+ Goto(&loop);
+ }
+ Bind(&after_loop);
+ Return(UndefinedConstant());
+ }
+
+ Bind(&throw_null_undefined_exception);
+ {
+ CallRuntime(Runtime::kThrowTypeError, context,
+ SmiConstant(MessageTemplate::kCalledOnNullOrUndefined),
+ HeapConstant(isolate()->factory()->NewStringFromAsciiChecked(
+ "Array.prototype.forEach")));
+ Unreachable();
+ }
+
+ Bind(&type_exception);
+ {
+ CallRuntime(Runtime::kThrowTypeError, context,
+ SmiConstant(MessageTemplate::kCalledNonCallable), callbackfn);
+ Unreachable();
+ }
+}
+
BUILTIN(ArraySlice) {
HandleScope scope(isolate);
Handle<Object> receiver = args.receiver();
@@ -1421,442 +1679,310 @@ void Builtins::Generate_ArrayIsArray(compiler::CodeAssemblerState* state) {
assembler.CallRuntime(Runtime::kArrayIsArray, context, object));
}
-void Builtins::Generate_ArrayIncludes(compiler::CodeAssemblerState* state) {
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Label Label;
- typedef CodeStubAssembler::Variable Variable;
- CodeStubAssembler assembler(state);
-
- Node* array = assembler.Parameter(0);
- Node* search_element = assembler.Parameter(1);
- Node* start_from = assembler.Parameter(2);
- Node* context = assembler.Parameter(3 + 2);
+TF_BUILTIN(ArrayIncludes, CodeStubAssembler) {
+ Node* const array = Parameter(0);
+ Node* const search_element = Parameter(1);
+ Node* const start_from = Parameter(2);
+ Node* const context = Parameter(3 + 2);
- Node* intptr_zero = assembler.IntPtrConstant(0);
- Node* intptr_one = assembler.IntPtrConstant(1);
+ Variable len_var(this, MachineType::PointerRepresentation()),
+ index_var(this, MachineType::PointerRepresentation());
- Node* the_hole = assembler.TheHoleConstant();
- Node* undefined = assembler.UndefinedConstant();
+ Label init_k(this), return_true(this), return_false(this), call_runtime(this);
+ Label init_len(this), select_loop(this);
- Variable len_var(&assembler, MachineType::PointerRepresentation()),
- index_var(&assembler, MachineType::PointerRepresentation()),
- start_from_var(&assembler, MachineType::PointerRepresentation());
-
- Label init_k(&assembler), return_true(&assembler), return_false(&assembler),
- call_runtime(&assembler);
-
- Label init_len(&assembler);
-
- index_var.Bind(intptr_zero);
- len_var.Bind(intptr_zero);
+ index_var.Bind(IntPtrConstant(0));
+ len_var.Bind(IntPtrConstant(0));
// Take slow path if not a JSArray, if retrieving elements requires
// traversing prototype, or if access checks are required.
- assembler.BranchIfFastJSArray(
- array, context, CodeStubAssembler::FastJSArrayAccessMode::INBOUNDS_READ,
- &init_len, &call_runtime);
+ BranchIfFastJSArray(array, context,
+ CodeStubAssembler::FastJSArrayAccessMode::INBOUNDS_READ,
+ &init_len, &call_runtime);
- assembler.Bind(&init_len);
+ Bind(&init_len);
{
// Handle case where JSArray length is not an Smi in the runtime
- Node* len = assembler.LoadObjectField(array, JSArray::kLengthOffset);
- assembler.GotoUnless(assembler.TaggedIsSmi(len), &call_runtime);
-
- len_var.Bind(assembler.SmiToWord(len));
- assembler.Branch(assembler.WordEqual(len_var.value(), intptr_zero),
- &return_false, &init_k);
- }
-
- assembler.Bind(&init_k);
- {
- Label done(&assembler), init_k_smi(&assembler), init_k_heap_num(&assembler),
- init_k_zero(&assembler), init_k_n(&assembler);
- Node* tagged_n = assembler.ToInteger(context, start_from);
-
- assembler.Branch(assembler.TaggedIsSmi(tagged_n), &init_k_smi,
- &init_k_heap_num);
+ Node* len = LoadObjectField(array, JSArray::kLengthOffset);
+ GotoIfNot(TaggedIsSmi(len), &call_runtime);
- assembler.Bind(&init_k_smi);
- {
- start_from_var.Bind(assembler.SmiUntag(tagged_n));
- assembler.Goto(&init_k_n);
- }
-
- assembler.Bind(&init_k_heap_num);
- {
- Label do_return_false(&assembler);
- // This round is lossless for all valid lengths.
- Node* fp_len = assembler.RoundIntPtrToFloat64(len_var.value());
- Node* fp_n = assembler.LoadHeapNumberValue(tagged_n);
- assembler.GotoIf(assembler.Float64GreaterThanOrEqual(fp_n, fp_len),
- &do_return_false);
- start_from_var.Bind(assembler.ChangeInt32ToIntPtr(
- assembler.TruncateFloat64ToWord32(fp_n)));
- assembler.Goto(&init_k_n);
-
- assembler.Bind(&do_return_false);
- {
- index_var.Bind(intptr_zero);
- assembler.Goto(&return_false);
- }
- }
-
- assembler.Bind(&init_k_n);
- {
- Label if_positive(&assembler), if_negative(&assembler), done(&assembler);
- assembler.Branch(
- assembler.IntPtrLessThan(start_from_var.value(), intptr_zero),
- &if_negative, &if_positive);
+ len_var.Bind(SmiToWord(len));
- assembler.Bind(&if_positive);
- {
- index_var.Bind(start_from_var.value());
- assembler.Goto(&done);
- }
-
- assembler.Bind(&if_negative);
- {
- index_var.Bind(
- assembler.IntPtrAdd(len_var.value(), start_from_var.value()));
- assembler.Branch(
- assembler.IntPtrLessThan(index_var.value(), intptr_zero),
- &init_k_zero, &done);
- }
+ GotoIf(IsUndefined(start_from), &select_loop);
- assembler.Bind(&init_k_zero);
- {
- index_var.Bind(intptr_zero);
- assembler.Goto(&done);
- }
-
- assembler.Bind(&done);
- }
+ // Bailout to slow path if startIndex is not an Smi.
+ Branch(TaggedIsSmi(start_from), &init_k, &call_runtime);
}
+ Bind(&init_k);
+ CSA_ASSERT(this, TaggedIsSmi(start_from));
+ Node* const untagged_start_from = SmiToWord(start_from);
+ index_var.Bind(Select(
+ IntPtrGreaterThanOrEqual(untagged_start_from, IntPtrConstant(0)),
+ [=]() { return untagged_start_from; },
+ [=]() {
+ Node* const index = IntPtrAdd(len_var.value(), untagged_start_from);
+ return SelectConstant(IntPtrLessThan(index, IntPtrConstant(0)),
+ IntPtrConstant(0), index,
+ MachineType::PointerRepresentation());
+ },
+ MachineType::PointerRepresentation()));
+
+ Goto(&select_loop);
+ Bind(&select_loop);
static int32_t kElementsKind[] = {
FAST_SMI_ELEMENTS, FAST_HOLEY_SMI_ELEMENTS, FAST_ELEMENTS,
FAST_HOLEY_ELEMENTS, FAST_DOUBLE_ELEMENTS, FAST_HOLEY_DOUBLE_ELEMENTS,
};
- Label if_smiorobjects(&assembler), if_packed_doubles(&assembler),
- if_holey_doubles(&assembler);
+ Label if_smiorobjects(this), if_packed_doubles(this), if_holey_doubles(this);
Label* element_kind_handlers[] = {&if_smiorobjects, &if_smiorobjects,
&if_smiorobjects, &if_smiorobjects,
&if_packed_doubles, &if_holey_doubles};
- Node* map = assembler.LoadMap(array);
- Node* elements_kind = assembler.LoadMapElementsKind(map);
- Node* elements = assembler.LoadElements(array);
- assembler.Switch(elements_kind, &return_false, kElementsKind,
- element_kind_handlers, arraysize(kElementsKind));
+ Node* map = LoadMap(array);
+ Node* elements_kind = LoadMapElementsKind(map);
+ Node* elements = LoadElements(array);
+ Switch(elements_kind, &return_false, kElementsKind, element_kind_handlers,
+ arraysize(kElementsKind));
- assembler.Bind(&if_smiorobjects);
+ Bind(&if_smiorobjects);
{
- Variable search_num(&assembler, MachineRepresentation::kFloat64);
- Label ident_loop(&assembler, &index_var),
- heap_num_loop(&assembler, &search_num),
- string_loop(&assembler, &index_var), simd_loop(&assembler),
- undef_loop(&assembler, &index_var), not_smi(&assembler),
- not_heap_num(&assembler);
-
- assembler.GotoUnless(assembler.TaggedIsSmi(search_element), &not_smi);
- search_num.Bind(assembler.SmiToFloat64(search_element));
- assembler.Goto(&heap_num_loop);
-
- assembler.Bind(&not_smi);
- assembler.GotoIf(assembler.WordEqual(search_element, undefined),
- &undef_loop);
- Node* map = assembler.LoadMap(search_element);
- assembler.GotoUnless(assembler.IsHeapNumberMap(map), &not_heap_num);
- search_num.Bind(assembler.LoadHeapNumberValue(search_element));
- assembler.Goto(&heap_num_loop);
-
- assembler.Bind(&not_heap_num);
- Node* search_type = assembler.LoadMapInstanceType(map);
- assembler.GotoIf(assembler.IsStringInstanceType(search_type), &string_loop);
- assembler.GotoIf(
- assembler.Word32Equal(search_type,
- assembler.Int32Constant(SIMD128_VALUE_TYPE)),
- &simd_loop);
- assembler.Goto(&ident_loop);
-
- assembler.Bind(&ident_loop);
+ Variable search_num(this, MachineRepresentation::kFloat64);
+ Label ident_loop(this, &index_var), heap_num_loop(this, &search_num),
+ string_loop(this, &index_var), undef_loop(this, &index_var),
+ not_smi(this), not_heap_num(this);
+
+ GotoIfNot(TaggedIsSmi(search_element), &not_smi);
+ search_num.Bind(SmiToFloat64(search_element));
+ Goto(&heap_num_loop);
+
+ Bind(&not_smi);
+ GotoIf(WordEqual(search_element, UndefinedConstant()), &undef_loop);
+ Node* map = LoadMap(search_element);
+ GotoIfNot(IsHeapNumberMap(map), &not_heap_num);
+ search_num.Bind(LoadHeapNumberValue(search_element));
+ Goto(&heap_num_loop);
+
+ Bind(&not_heap_num);
+ Node* search_type = LoadMapInstanceType(map);
+ GotoIf(IsStringInstanceType(search_type), &string_loop);
+ Goto(&ident_loop);
+
+ Bind(&ident_loop);
{
- assembler.GotoUnless(
- assembler.UintPtrLessThan(index_var.value(), len_var.value()),
- &return_false);
- Node* element_k =
- assembler.LoadFixedArrayElement(elements, index_var.value());
- assembler.GotoIf(assembler.WordEqual(element_k, search_element),
- &return_true);
+ GotoIfNot(UintPtrLessThan(index_var.value(), len_var.value()),
+ &return_false);
+ Node* element_k = LoadFixedArrayElement(elements, index_var.value());
+ GotoIf(WordEqual(element_k, search_element), &return_true);
- index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
- assembler.Goto(&ident_loop);
+ index_var.Bind(IntPtrAdd(index_var.value(), IntPtrConstant(1)));
+ Goto(&ident_loop);
}
- assembler.Bind(&undef_loop);
+ Bind(&undef_loop);
{
- assembler.GotoUnless(
- assembler.UintPtrLessThan(index_var.value(), len_var.value()),
- &return_false);
- Node* element_k =
- assembler.LoadFixedArrayElement(elements, index_var.value());
- assembler.GotoIf(assembler.WordEqual(element_k, undefined), &return_true);
- assembler.GotoIf(assembler.WordEqual(element_k, the_hole), &return_true);
+ GotoIfNot(UintPtrLessThan(index_var.value(), len_var.value()),
+ &return_false);
+ Node* element_k = LoadFixedArrayElement(elements, index_var.value());
+ GotoIf(WordEqual(element_k, UndefinedConstant()), &return_true);
+ GotoIf(WordEqual(element_k, TheHoleConstant()), &return_true);
- index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
- assembler.Goto(&undef_loop);
+ index_var.Bind(IntPtrAdd(index_var.value(), IntPtrConstant(1)));
+ Goto(&undef_loop);
}
- assembler.Bind(&heap_num_loop);
+ Bind(&heap_num_loop);
{
- Label nan_loop(&assembler, &index_var),
- not_nan_loop(&assembler, &index_var);
- assembler.BranchIfFloat64IsNaN(search_num.value(), &nan_loop,
- &not_nan_loop);
+ Label nan_loop(this, &index_var), not_nan_loop(this, &index_var);
+ BranchIfFloat64IsNaN(search_num.value(), &nan_loop, &not_nan_loop);
- assembler.Bind(&not_nan_loop);
+ Bind(&not_nan_loop);
{
- Label continue_loop(&assembler), not_smi(&assembler);
- assembler.GotoUnless(
- assembler.UintPtrLessThan(index_var.value(), len_var.value()),
- &return_false);
- Node* element_k =
- assembler.LoadFixedArrayElement(elements, index_var.value());
- assembler.GotoUnless(assembler.TaggedIsSmi(element_k), &not_smi);
- assembler.Branch(
- assembler.Float64Equal(search_num.value(),
- assembler.SmiToFloat64(element_k)),
- &return_true, &continue_loop);
-
- assembler.Bind(&not_smi);
- assembler.GotoUnless(
- assembler.IsHeapNumberMap(assembler.LoadMap(element_k)),
- &continue_loop);
- assembler.Branch(
- assembler.Float64Equal(search_num.value(),
- assembler.LoadHeapNumberValue(element_k)),
- &return_true, &continue_loop);
-
- assembler.Bind(&continue_loop);
- index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
- assembler.Goto(&not_nan_loop);
+ Label continue_loop(this), not_smi(this);
+ GotoIfNot(UintPtrLessThan(index_var.value(), len_var.value()),
+ &return_false);
+ Node* element_k = LoadFixedArrayElement(elements, index_var.value());
+ GotoIfNot(TaggedIsSmi(element_k), &not_smi);
+ Branch(Float64Equal(search_num.value(), SmiToFloat64(element_k)),
+ &return_true, &continue_loop);
+
+ Bind(&not_smi);
+ GotoIfNot(IsHeapNumber(element_k), &continue_loop);
+ Branch(Float64Equal(search_num.value(), LoadHeapNumberValue(element_k)),
+ &return_true, &continue_loop);
+
+ Bind(&continue_loop);
+ index_var.Bind(IntPtrAdd(index_var.value(), IntPtrConstant(1)));
+ Goto(&not_nan_loop);
}
- assembler.Bind(&nan_loop);
+ Bind(&nan_loop);
{
- Label continue_loop(&assembler);
- assembler.GotoUnless(
- assembler.UintPtrLessThan(index_var.value(), len_var.value()),
- &return_false);
- Node* element_k =
- assembler.LoadFixedArrayElement(elements, index_var.value());
- assembler.GotoIf(assembler.TaggedIsSmi(element_k), &continue_loop);
- assembler.GotoUnless(
- assembler.IsHeapNumberMap(assembler.LoadMap(element_k)),
- &continue_loop);
- assembler.BranchIfFloat64IsNaN(assembler.LoadHeapNumberValue(element_k),
- &return_true, &continue_loop);
-
- assembler.Bind(&continue_loop);
- index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
- assembler.Goto(&nan_loop);
+ Label continue_loop(this);
+ GotoIfNot(UintPtrLessThan(index_var.value(), len_var.value()),
+ &return_false);
+ Node* element_k = LoadFixedArrayElement(elements, index_var.value());
+ GotoIf(TaggedIsSmi(element_k), &continue_loop);
+ GotoIfNot(IsHeapNumber(element_k), &continue_loop);
+ BranchIfFloat64IsNaN(LoadHeapNumberValue(element_k), &return_true,
+ &continue_loop);
+
+ Bind(&continue_loop);
+ index_var.Bind(IntPtrAdd(index_var.value(), IntPtrConstant(1)));
+ Goto(&nan_loop);
}
}
- assembler.Bind(&string_loop);
+ Bind(&string_loop);
{
- Label continue_loop(&assembler);
- assembler.GotoUnless(
- assembler.UintPtrLessThan(index_var.value(), len_var.value()),
- &return_false);
- Node* element_k =
- assembler.LoadFixedArrayElement(elements, index_var.value());
- assembler.GotoIf(assembler.TaggedIsSmi(element_k), &continue_loop);
- assembler.GotoUnless(
- assembler.IsStringInstanceType(assembler.LoadInstanceType(element_k)),
- &continue_loop);
+ Label continue_loop(this);
+ GotoIfNot(UintPtrLessThan(index_var.value(), len_var.value()),
+ &return_false);
+ Node* element_k = LoadFixedArrayElement(elements, index_var.value());
+ GotoIf(TaggedIsSmi(element_k), &continue_loop);
+ GotoIfNot(IsStringInstanceType(LoadInstanceType(element_k)),
+ &continue_loop);
// TODO(bmeurer): Consider inlining the StringEqual logic here.
- Callable callable = CodeFactory::StringEqual(assembler.isolate());
- Node* result =
- assembler.CallStub(callable, context, search_element, element_k);
- assembler.Branch(
- assembler.WordEqual(assembler.BooleanConstant(true), result),
- &return_true, &continue_loop);
+ Node* result = CallStub(CodeFactory::StringEqual(isolate()), context,
+ search_element, element_k);
+ Branch(WordEqual(BooleanConstant(true), result), &return_true,
+ &continue_loop);
- assembler.Bind(&continue_loop);
- index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
- assembler.Goto(&string_loop);
- }
-
- assembler.Bind(&simd_loop);
- {
- Label continue_loop(&assembler, &index_var),
- loop_body(&assembler, &index_var);
- Node* map = assembler.LoadMap(search_element);
-
- assembler.Goto(&loop_body);
- assembler.Bind(&loop_body);
- assembler.GotoUnless(
- assembler.UintPtrLessThan(index_var.value(), len_var.value()),
- &return_false);
-
- Node* element_k =
- assembler.LoadFixedArrayElement(elements, index_var.value());
- assembler.GotoIf(assembler.TaggedIsSmi(element_k), &continue_loop);
-
- Node* map_k = assembler.LoadMap(element_k);
- assembler.BranchIfSimd128Equal(search_element, map, element_k, map_k,
- &return_true, &continue_loop);
-
- assembler.Bind(&continue_loop);
- index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
- assembler.Goto(&loop_body);
+ Bind(&continue_loop);
+ index_var.Bind(IntPtrAdd(index_var.value(), IntPtrConstant(1)));
+ Goto(&string_loop);
}
}
- assembler.Bind(&if_packed_doubles);
+ Bind(&if_packed_doubles);
{
- Label nan_loop(&assembler, &index_var),
- not_nan_loop(&assembler, &index_var), hole_loop(&assembler, &index_var),
- search_notnan(&assembler);
- Variable search_num(&assembler, MachineRepresentation::kFloat64);
+ Label nan_loop(this, &index_var), not_nan_loop(this, &index_var),
+ hole_loop(this, &index_var), search_notnan(this);
+ Variable search_num(this, MachineRepresentation::kFloat64);
- assembler.GotoUnless(assembler.TaggedIsSmi(search_element), &search_notnan);
- search_num.Bind(assembler.SmiToFloat64(search_element));
- assembler.Goto(&not_nan_loop);
+ GotoIfNot(TaggedIsSmi(search_element), &search_notnan);
+ search_num.Bind(SmiToFloat64(search_element));
+ Goto(&not_nan_loop);
- assembler.Bind(&search_notnan);
- assembler.GotoUnless(
- assembler.IsHeapNumberMap(assembler.LoadMap(search_element)),
- &return_false);
+ Bind(&search_notnan);
+ GotoIfNot(IsHeapNumber(search_element), &return_false);
- search_num.Bind(assembler.LoadHeapNumberValue(search_element));
+ search_num.Bind(LoadHeapNumberValue(search_element));
- assembler.BranchIfFloat64IsNaN(search_num.value(), &nan_loop,
- &not_nan_loop);
+ BranchIfFloat64IsNaN(search_num.value(), &nan_loop, &not_nan_loop);
// Search for HeapNumber
- assembler.Bind(&not_nan_loop);
+ Bind(&not_nan_loop);
{
- Label continue_loop(&assembler);
- assembler.GotoUnless(
- assembler.UintPtrLessThan(index_var.value(), len_var.value()),
- &return_false);
- Node* element_k = assembler.LoadFixedDoubleArrayElement(
- elements, index_var.value(), MachineType::Float64());
- assembler.Branch(assembler.Float64Equal(element_k, search_num.value()),
- &return_true, &continue_loop);
- assembler.Bind(&continue_loop);
- index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
- assembler.Goto(&not_nan_loop);
+ Label continue_loop(this);
+ GotoIfNot(UintPtrLessThan(index_var.value(), len_var.value()),
+ &return_false);
+ Node* element_k = LoadFixedDoubleArrayElement(elements, index_var.value(),
+ MachineType::Float64());
+ Branch(Float64Equal(element_k, search_num.value()), &return_true,
+ &continue_loop);
+ Bind(&continue_loop);
+ index_var.Bind(IntPtrAdd(index_var.value(), IntPtrConstant(1)));
+ Goto(&not_nan_loop);
}
// Search for NaN
- assembler.Bind(&nan_loop);
+ Bind(&nan_loop);
{
- Label continue_loop(&assembler);
- assembler.GotoUnless(
- assembler.UintPtrLessThan(index_var.value(), len_var.value()),
- &return_false);
- Node* element_k = assembler.LoadFixedDoubleArrayElement(
- elements, index_var.value(), MachineType::Float64());
- assembler.BranchIfFloat64IsNaN(element_k, &return_true, &continue_loop);
- assembler.Bind(&continue_loop);
- index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
- assembler.Goto(&nan_loop);
+ Label continue_loop(this);
+ GotoIfNot(UintPtrLessThan(index_var.value(), len_var.value()),
+ &return_false);
+ Node* element_k = LoadFixedDoubleArrayElement(elements, index_var.value(),
+ MachineType::Float64());
+ BranchIfFloat64IsNaN(element_k, &return_true, &continue_loop);
+ Bind(&continue_loop);
+ index_var.Bind(IntPtrAdd(index_var.value(), IntPtrConstant(1)));
+ Goto(&nan_loop);
}
}
- assembler.Bind(&if_holey_doubles);
+ Bind(&if_holey_doubles);
{
- Label nan_loop(&assembler, &index_var),
- not_nan_loop(&assembler, &index_var), hole_loop(&assembler, &index_var),
- search_notnan(&assembler);
- Variable search_num(&assembler, MachineRepresentation::kFloat64);
+ Label nan_loop(this, &index_var), not_nan_loop(this, &index_var),
+ hole_loop(this, &index_var), search_notnan(this);
+ Variable search_num(this, MachineRepresentation::kFloat64);
- assembler.GotoUnless(assembler.TaggedIsSmi(search_element), &search_notnan);
- search_num.Bind(assembler.SmiToFloat64(search_element));
- assembler.Goto(&not_nan_loop);
+ GotoIfNot(TaggedIsSmi(search_element), &search_notnan);
+ search_num.Bind(SmiToFloat64(search_element));
+ Goto(&not_nan_loop);
- assembler.Bind(&search_notnan);
- assembler.GotoIf(assembler.WordEqual(search_element, undefined),
- &hole_loop);
- assembler.GotoUnless(
- assembler.IsHeapNumberMap(assembler.LoadMap(search_element)),
- &return_false);
+ Bind(&search_notnan);
+ GotoIf(WordEqual(search_element, UndefinedConstant()), &hole_loop);
+ GotoIfNot(IsHeapNumber(search_element), &return_false);
- search_num.Bind(assembler.LoadHeapNumberValue(search_element));
+ search_num.Bind(LoadHeapNumberValue(search_element));
- assembler.BranchIfFloat64IsNaN(search_num.value(), &nan_loop,
- &not_nan_loop);
+ BranchIfFloat64IsNaN(search_num.value(), &nan_loop, &not_nan_loop);
// Search for HeapNumber
- assembler.Bind(&not_nan_loop);
+ Bind(&not_nan_loop);
{
- Label continue_loop(&assembler);
- assembler.GotoUnless(
- assembler.UintPtrLessThan(index_var.value(), len_var.value()),
- &return_false);
+ Label continue_loop(this);
+ GotoIfNot(UintPtrLessThan(index_var.value(), len_var.value()),
+ &return_false);
// Load double value or continue if it contains a double hole.
- Node* element_k = assembler.LoadFixedDoubleArrayElement(
+ Node* element_k = LoadFixedDoubleArrayElement(
elements, index_var.value(), MachineType::Float64(), 0,
CodeStubAssembler::INTPTR_PARAMETERS, &continue_loop);
- assembler.Branch(assembler.Float64Equal(element_k, search_num.value()),
- &return_true, &continue_loop);
- assembler.Bind(&continue_loop);
- index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
- assembler.Goto(&not_nan_loop);
+ Branch(Float64Equal(element_k, search_num.value()), &return_true,
+ &continue_loop);
+ Bind(&continue_loop);
+ index_var.Bind(IntPtrAdd(index_var.value(), IntPtrConstant(1)));
+ Goto(&not_nan_loop);
}
// Search for NaN
- assembler.Bind(&nan_loop);
+ Bind(&nan_loop);
{
- Label continue_loop(&assembler);
- assembler.GotoUnless(
- assembler.UintPtrLessThan(index_var.value(), len_var.value()),
- &return_false);
+ Label continue_loop(this);
+ GotoIfNot(UintPtrLessThan(index_var.value(), len_var.value()),
+ &return_false);
// Load double value or continue if it contains a double hole.
- Node* element_k = assembler.LoadFixedDoubleArrayElement(
+ Node* element_k = LoadFixedDoubleArrayElement(
elements, index_var.value(), MachineType::Float64(), 0,
CodeStubAssembler::INTPTR_PARAMETERS, &continue_loop);
- assembler.BranchIfFloat64IsNaN(element_k, &return_true, &continue_loop);
- assembler.Bind(&continue_loop);
- index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
- assembler.Goto(&nan_loop);
+ BranchIfFloat64IsNaN(element_k, &return_true, &continue_loop);
+ Bind(&continue_loop);
+ index_var.Bind(IntPtrAdd(index_var.value(), IntPtrConstant(1)));
+ Goto(&nan_loop);
}
// Search for the Hole
- assembler.Bind(&hole_loop);
+ Bind(&hole_loop);
{
- assembler.GotoUnless(
- assembler.UintPtrLessThan(index_var.value(), len_var.value()),
- &return_false);
+ GotoIfNot(UintPtrLessThan(index_var.value(), len_var.value()),
+ &return_false);
// Check if the element is a double hole, but don't load it.
- assembler.LoadFixedDoubleArrayElement(
+ LoadFixedDoubleArrayElement(
elements, index_var.value(), MachineType::None(), 0,
CodeStubAssembler::INTPTR_PARAMETERS, &return_true);
- index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
- assembler.Goto(&hole_loop);
+ index_var.Bind(IntPtrAdd(index_var.value(), IntPtrConstant(1)));
+ Goto(&hole_loop);
}
}
- assembler.Bind(&return_true);
- assembler.Return(assembler.BooleanConstant(true));
+ Bind(&return_true);
+ Return(TrueConstant());
- assembler.Bind(&return_false);
- assembler.Return(assembler.BooleanConstant(false));
+ Bind(&return_false);
+ Return(FalseConstant());
- assembler.Bind(&call_runtime);
- assembler.Return(assembler.CallRuntime(Runtime::kArrayIncludes_Slow, context,
- array, search_element, start_from));
+ Bind(&call_runtime);
+ Return(CallRuntime(Runtime::kArrayIncludes_Slow, context, array,
+ search_element, start_from));
}
void Builtins::Generate_ArrayIndexOf(compiler::CodeAssemblerState* state) {
@@ -1897,7 +2023,7 @@ void Builtins::Generate_ArrayIndexOf(compiler::CodeAssemblerState* state) {
{
// Handle case where JSArray length is not an Smi in the runtime
Node* len = assembler.LoadObjectField(array, JSArray::kLengthOffset);
- assembler.GotoUnless(assembler.TaggedIsSmi(len), &call_runtime);
+ assembler.GotoIfNot(assembler.TaggedIsSmi(len), &call_runtime);
len_var.Bind(assembler.SmiToWord(len));
assembler.Branch(assembler.WordEqual(len_var.value(), intptr_zero),
@@ -1906,36 +2032,29 @@ void Builtins::Generate_ArrayIndexOf(compiler::CodeAssemblerState* state) {
assembler.Bind(&init_k);
{
- Label done(&assembler), init_k_smi(&assembler), init_k_heap_num(&assembler),
+ // For now only deal with undefined and Smis here; we must be really careful
+ // with side-effects from the ToInteger conversion as the side-effects might
+ // render our assumptions about the receiver being a fast JSArray and the
+ // length invalid.
+ Label done(&assembler), init_k_smi(&assembler), init_k_other(&assembler),
init_k_zero(&assembler), init_k_n(&assembler);
- Node* tagged_n = assembler.ToInteger(context, start_from);
- assembler.Branch(assembler.TaggedIsSmi(tagged_n), &init_k_smi,
- &init_k_heap_num);
+ assembler.Branch(assembler.TaggedIsSmi(start_from), &init_k_smi,
+ &init_k_other);
assembler.Bind(&init_k_smi);
{
- start_from_var.Bind(assembler.SmiUntag(tagged_n));
+ start_from_var.Bind(assembler.SmiUntag(start_from));
assembler.Goto(&init_k_n);
}
- assembler.Bind(&init_k_heap_num);
+ assembler.Bind(&init_k_other);
{
- Label do_return_not_found(&assembler);
- // This round is lossless for all valid lengths.
- Node* fp_len = assembler.RoundIntPtrToFloat64(len_var.value());
- Node* fp_n = assembler.LoadHeapNumberValue(tagged_n);
- assembler.GotoIf(assembler.Float64GreaterThanOrEqual(fp_n, fp_len),
- &do_return_not_found);
- start_from_var.Bind(assembler.ChangeInt32ToIntPtr(
- assembler.TruncateFloat64ToWord32(fp_n)));
+ // The fromIndex must be undefined here, otherwise bailout and let the
+ // runtime deal with the full ToInteger conversion.
+ assembler.GotoIfNot(assembler.IsUndefined(start_from), &call_runtime);
+ start_from_var.Bind(intptr_zero);
assembler.Goto(&init_k_n);
-
- assembler.Bind(&do_return_not_found);
- {
- index_var.Bind(intptr_zero);
- assembler.Goto(&return_not_found);
- }
}
assembler.Bind(&init_k_n);
@@ -1992,11 +2111,10 @@ void Builtins::Generate_ArrayIndexOf(compiler::CodeAssemblerState* state) {
Variable search_num(&assembler, MachineRepresentation::kFloat64);
Label ident_loop(&assembler, &index_var),
heap_num_loop(&assembler, &search_num),
- string_loop(&assembler, &index_var), simd_loop(&assembler),
- undef_loop(&assembler, &index_var), not_smi(&assembler),
- not_heap_num(&assembler);
+ string_loop(&assembler, &index_var), undef_loop(&assembler, &index_var),
+ not_smi(&assembler), not_heap_num(&assembler);
- assembler.GotoUnless(assembler.TaggedIsSmi(search_element), &not_smi);
+ assembler.GotoIfNot(assembler.TaggedIsSmi(search_element), &not_smi);
search_num.Bind(assembler.SmiToFloat64(search_element));
assembler.Goto(&heap_num_loop);
@@ -2004,22 +2122,18 @@ void Builtins::Generate_ArrayIndexOf(compiler::CodeAssemblerState* state) {
assembler.GotoIf(assembler.WordEqual(search_element, undefined),
&undef_loop);
Node* map = assembler.LoadMap(search_element);
- assembler.GotoUnless(assembler.IsHeapNumberMap(map), &not_heap_num);
+ assembler.GotoIfNot(assembler.IsHeapNumberMap(map), &not_heap_num);
search_num.Bind(assembler.LoadHeapNumberValue(search_element));
assembler.Goto(&heap_num_loop);
assembler.Bind(&not_heap_num);
Node* search_type = assembler.LoadMapInstanceType(map);
assembler.GotoIf(assembler.IsStringInstanceType(search_type), &string_loop);
- assembler.GotoIf(
- assembler.Word32Equal(search_type,
- assembler.Int32Constant(SIMD128_VALUE_TYPE)),
- &simd_loop);
assembler.Goto(&ident_loop);
assembler.Bind(&ident_loop);
{
- assembler.GotoUnless(
+ assembler.GotoIfNot(
assembler.UintPtrLessThan(index_var.value(), len_var.value()),
&return_not_found);
Node* element_k =
@@ -2033,7 +2147,7 @@ void Builtins::Generate_ArrayIndexOf(compiler::CodeAssemblerState* state) {
assembler.Bind(&undef_loop);
{
- assembler.GotoUnless(
+ assembler.GotoIfNot(
assembler.UintPtrLessThan(index_var.value(), len_var.value()),
&return_not_found);
Node* element_k =
@@ -2054,19 +2168,19 @@ void Builtins::Generate_ArrayIndexOf(compiler::CodeAssemblerState* state) {
assembler.Bind(&not_nan_loop);
{
Label continue_loop(&assembler), not_smi(&assembler);
- assembler.GotoUnless(
+ assembler.GotoIfNot(
assembler.UintPtrLessThan(index_var.value(), len_var.value()),
&return_not_found);
Node* element_k =
assembler.LoadFixedArrayElement(elements, index_var.value());
- assembler.GotoUnless(assembler.TaggedIsSmi(element_k), &not_smi);
+ assembler.GotoIfNot(assembler.TaggedIsSmi(element_k), &not_smi);
assembler.Branch(
assembler.Float64Equal(search_num.value(),
assembler.SmiToFloat64(element_k)),
&return_found, &continue_loop);
assembler.Bind(&not_smi);
- assembler.GotoUnless(
+ assembler.GotoIfNot(
assembler.IsHeapNumberMap(assembler.LoadMap(element_k)),
&continue_loop);
assembler.Branch(
@@ -2083,13 +2197,13 @@ void Builtins::Generate_ArrayIndexOf(compiler::CodeAssemblerState* state) {
assembler.Bind(&string_loop);
{
Label continue_loop(&assembler);
- assembler.GotoUnless(
+ assembler.GotoIfNot(
assembler.UintPtrLessThan(index_var.value(), len_var.value()),
&return_not_found);
Node* element_k =
assembler.LoadFixedArrayElement(elements, index_var.value());
assembler.GotoIf(assembler.TaggedIsSmi(element_k), &continue_loop);
- assembler.GotoUnless(
+ assembler.GotoIfNot(
assembler.IsStringInstanceType(assembler.LoadInstanceType(element_k)),
&continue_loop);
@@ -2105,31 +2219,6 @@ void Builtins::Generate_ArrayIndexOf(compiler::CodeAssemblerState* state) {
index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
assembler.Goto(&string_loop);
}
-
- assembler.Bind(&simd_loop);
- {
- Label continue_loop(&assembler, &index_var),
- loop_body(&assembler, &index_var);
- Node* map = assembler.LoadMap(search_element);
-
- assembler.Goto(&loop_body);
- assembler.Bind(&loop_body);
- assembler.GotoUnless(
- assembler.UintPtrLessThan(index_var.value(), len_var.value()),
- &return_not_found);
-
- Node* element_k =
- assembler.LoadFixedArrayElement(elements, index_var.value());
- assembler.GotoIf(assembler.TaggedIsSmi(element_k), &continue_loop);
-
- Node* map_k = assembler.LoadMap(element_k);
- assembler.BranchIfSimd128Equal(search_element, map, element_k, map_k,
- &return_found, &continue_loop);
-
- assembler.Bind(&continue_loop);
- index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
- assembler.Goto(&loop_body);
- }
}
assembler.Bind(&if_packed_doubles);
@@ -2137,12 +2226,12 @@ void Builtins::Generate_ArrayIndexOf(compiler::CodeAssemblerState* state) {
Label not_nan_loop(&assembler, &index_var), search_notnan(&assembler);
Variable search_num(&assembler, MachineRepresentation::kFloat64);
- assembler.GotoUnless(assembler.TaggedIsSmi(search_element), &search_notnan);
+ assembler.GotoIfNot(assembler.TaggedIsSmi(search_element), &search_notnan);
search_num.Bind(assembler.SmiToFloat64(search_element));
assembler.Goto(&not_nan_loop);
assembler.Bind(&search_notnan);
- assembler.GotoUnless(
+ assembler.GotoIfNot(
assembler.IsHeapNumberMap(assembler.LoadMap(search_element)),
&return_not_found);
@@ -2155,7 +2244,7 @@ void Builtins::Generate_ArrayIndexOf(compiler::CodeAssemblerState* state) {
assembler.Bind(&not_nan_loop);
{
Label continue_loop(&assembler);
- assembler.GotoUnless(
+ assembler.GotoIfNot(
assembler.UintPtrLessThan(index_var.value(), len_var.value()),
&return_not_found);
Node* element_k = assembler.LoadFixedDoubleArrayElement(
@@ -2173,12 +2262,12 @@ void Builtins::Generate_ArrayIndexOf(compiler::CodeAssemblerState* state) {
Label not_nan_loop(&assembler, &index_var), search_notnan(&assembler);
Variable search_num(&assembler, MachineRepresentation::kFloat64);
- assembler.GotoUnless(assembler.TaggedIsSmi(search_element), &search_notnan);
+ assembler.GotoIfNot(assembler.TaggedIsSmi(search_element), &search_notnan);
search_num.Bind(assembler.SmiToFloat64(search_element));
assembler.Goto(&not_nan_loop);
assembler.Bind(&search_notnan);
- assembler.GotoUnless(
+ assembler.GotoIfNot(
assembler.IsHeapNumberMap(assembler.LoadMap(search_element)),
&return_not_found);
@@ -2191,7 +2280,7 @@ void Builtins::Generate_ArrayIndexOf(compiler::CodeAssemblerState* state) {
assembler.Bind(&not_nan_loop);
{
Label continue_loop(&assembler);
- assembler.GotoUnless(
+ assembler.GotoIfNot(
assembler.UintPtrLessThan(index_var.value(), len_var.value()),
&return_not_found);
@@ -2347,7 +2436,7 @@ void Builtins::Generate_ArrayIteratorPrototypeNext(
CSA_ASSERT(&assembler, assembler.TaggedIsSmi(length));
CSA_ASSERT(&assembler, assembler.TaggedIsSmi(index));
- assembler.GotoUnless(assembler.SmiBelow(index, length), &set_done);
+ assembler.GotoIfNot(assembler.SmiBelow(index, length), &set_done);
Node* one = assembler.SmiConstant(Smi::FromInt(1));
assembler.StoreObjectFieldNoWriteBarrier(iterator,
@@ -2552,7 +2641,7 @@ void Builtins::Generate_ArrayIteratorPrototypeNext(
CSA_ASSERT(&assembler, assembler.TaggedIsSmi(length));
CSA_ASSERT(&assembler, assembler.TaggedIsSmi(index));
- assembler.GotoUnless(assembler.SmiBelow(index, length), &set_done);
+ assembler.GotoIfNot(assembler.SmiBelow(index, length), &set_done);
Node* one = assembler.SmiConstant(1);
assembler.StoreObjectFieldNoWriteBarrier(
@@ -2744,19 +2833,17 @@ void Builtins::Generate_ArrayIteratorPrototypeNext(
assembler.Bind(&throw_bad_receiver);
{
// The {receiver} is not a valid JSArrayIterator.
- Node* result = assembler.CallRuntime(
- Runtime::kThrowIncompatibleMethodReceiver, context,
- assembler.HeapConstant(operation), iterator);
- assembler.Return(result);
+ assembler.CallRuntime(Runtime::kThrowIncompatibleMethodReceiver, context,
+ assembler.HeapConstant(operation), iterator);
+ assembler.Unreachable();
}
assembler.Bind(&if_isdetached);
{
Node* message = assembler.SmiConstant(MessageTemplate::kDetachedOperation);
- Node* result =
- assembler.CallRuntime(Runtime::kThrowTypeError, context, message,
- assembler.HeapConstant(operation));
- assembler.Return(result);
+ assembler.CallRuntime(Runtime::kThrowTypeError, context, message,
+ assembler.HeapConstant(operation));
+ assembler.Unreachable();
}
}
diff --git a/deps/v8/src/builtins/builtins-arraybuffer.cc b/deps/v8/src/builtins/builtins-arraybuffer.cc
index ad367587b1..e82c3850af 100644
--- a/deps/v8/src/builtins/builtins-arraybuffer.cc
+++ b/deps/v8/src/builtins/builtins-arraybuffer.cc
@@ -2,8 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/conversions.h"
+#include "src/counters.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-async-function.cc b/deps/v8/src/builtins/builtins-async-function.cc
new file mode 100644
index 0000000000..309d481533
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-async-function.cc
@@ -0,0 +1,208 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-async.h"
+#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/code-stub-assembler.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+typedef compiler::Node Node;
+typedef CodeStubAssembler::ParameterMode ParameterMode;
+typedef compiler::CodeAssemblerState CodeAssemblerState;
+
+class AsyncFunctionBuiltinsAssembler : public AsyncBuiltinsAssembler {
+ public:
+ explicit AsyncFunctionBuiltinsAssembler(CodeAssemblerState* state)
+ : AsyncBuiltinsAssembler(state) {}
+
+ protected:
+ void AsyncFunctionAwait(Node* const context, Node* const generator,
+ Node* const awaited, Node* const outer_promise,
+ const bool is_predicted_as_caught);
+
+ void AsyncFunctionAwaitResumeClosure(
+ Node* const context, Node* const sent_value,
+ JSGeneratorObject::ResumeMode resume_mode);
+};
+
+namespace {
+
+// Describe fields of Context associated with AsyncFunctionAwait resume
+// closures.
+// TODO(jgruber): Refactor to reuse code for upcoming async-generators.
+class AwaitContext {
+ public:
+ enum Fields { kGeneratorSlot = Context::MIN_CONTEXT_SLOTS, kLength };
+};
+
+} // anonymous namespace
+
+void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwaitResumeClosure(
+ Node* context, Node* sent_value,
+ JSGeneratorObject::ResumeMode resume_mode) {
+ DCHECK(resume_mode == JSGeneratorObject::kNext ||
+ resume_mode == JSGeneratorObject::kThrow);
+
+ Node* const generator =
+ LoadContextElement(context, AwaitContext::kGeneratorSlot);
+ CSA_SLOW_ASSERT(this, HasInstanceType(generator, JS_GENERATOR_OBJECT_TYPE));
+
+ // Inline version of GeneratorPrototypeNext / GeneratorPrototypeReturn with
+ // unnecessary runtime checks removed.
+ // TODO(jgruber): Refactor to reuse code from builtins-generator.cc.
+
+ // Ensure that the generator is neither closed nor running.
+ CSA_SLOW_ASSERT(
+ this,
+ SmiGreaterThan(
+ LoadObjectField(generator, JSGeneratorObject::kContinuationOffset),
+ SmiConstant(JSGeneratorObject::kGeneratorClosed)));
+
+ // Resume the {receiver} using our trampoline.
+ Callable callable = CodeFactory::ResumeGenerator(isolate());
+ CallStub(callable, context, sent_value, generator, SmiConstant(resume_mode));
+
+ // The resulting Promise is a throwaway, so it doesn't matter what it
+ // resolves to. What is important is that we don't end up keeping the
+ // whole chain of intermediate Promises alive by returning the return value
+ // of ResumeGenerator, as that would create a memory leak.
+}
+
+TF_BUILTIN(AsyncFunctionAwaitRejectClosure, AsyncFunctionBuiltinsAssembler) {
+ CSA_ASSERT_JS_ARGC_EQ(this, 1);
+ Node* const sentError = Parameter(1);
+ Node* const context = Parameter(4);
+
+ AsyncFunctionAwaitResumeClosure(context, sentError,
+ JSGeneratorObject::kThrow);
+ Return(UndefinedConstant());
+}
+
+TF_BUILTIN(AsyncFunctionAwaitResolveClosure, AsyncFunctionBuiltinsAssembler) {
+ CSA_ASSERT_JS_ARGC_EQ(this, 1);
+ Node* const sentValue = Parameter(1);
+ Node* const context = Parameter(4);
+
+ AsyncFunctionAwaitResumeClosure(context, sentValue, JSGeneratorObject::kNext);
+ Return(UndefinedConstant());
+}
+
+// ES#abstract-ops-async-function-await
+// AsyncFunctionAwait ( value )
+// Shared logic for the core of await. The parser desugars
+// await awaited
+// into
+// yield AsyncFunctionAwait{Caught,Uncaught}(.generator, awaited, .promise)
+// The 'awaited' parameter is the value; the generator stands in
+// for the asyncContext, and .promise is the larger promise under
+// construction by the enclosing async function.
+void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwait(
+ Node* const context, Node* const generator, Node* const awaited,
+ Node* const outer_promise, const bool is_predicted_as_caught) {
+ CSA_SLOW_ASSERT(this, HasInstanceType(generator, JS_GENERATOR_OBJECT_TYPE));
+ CSA_SLOW_ASSERT(this, HasInstanceType(outer_promise, JS_PROMISE_TYPE));
+
+ NodeGenerator1 create_closure_context = [&](Node* native_context) -> Node* {
+ Node* const context =
+ CreatePromiseContext(native_context, AwaitContext::kLength);
+ StoreContextElementNoWriteBarrier(context, AwaitContext::kGeneratorSlot,
+ generator);
+ return context;
+ };
+
+ // TODO(jgruber): AsyncBuiltinsAssembler::Await currently does not reuse
+ // the awaited promise if it is already a promise. Reuse is non-spec compliant
+ // but part of our old behavior gives us a couple of percent
+ // performance boost.
+ // TODO(jgruber): Use a faster specialized version of
+ // InternalPerformPromiseThen.
+
+ Node* const result = Await(
+ context, generator, awaited, outer_promise, create_closure_context,
+ Context::ASYNC_FUNCTION_AWAIT_RESOLVE_SHARED_FUN,
+ Context::ASYNC_FUNCTION_AWAIT_REJECT_SHARED_FUN, is_predicted_as_caught);
+
+ Return(result);
+}
+
+// Called by the parser from the desugaring of 'await' when catch
+// prediction indicates that there is a locally surrounding catch block.
+TF_BUILTIN(AsyncFunctionAwaitCaught, AsyncFunctionBuiltinsAssembler) {
+ CSA_ASSERT_JS_ARGC_EQ(this, 3);
+ Node* const generator = Parameter(1);
+ Node* const awaited = Parameter(2);
+ Node* const outer_promise = Parameter(3);
+ Node* const context = Parameter(6);
+
+ static const bool kIsPredictedAsCaught = true;
+
+ AsyncFunctionAwait(context, generator, awaited, outer_promise,
+ kIsPredictedAsCaught);
+}
+
+// Called by the parser from the desugaring of 'await' when catch
+// prediction indicates no locally surrounding catch block.
+TF_BUILTIN(AsyncFunctionAwaitUncaught, AsyncFunctionBuiltinsAssembler) {
+ CSA_ASSERT_JS_ARGC_EQ(this, 3);
+ Node* const generator = Parameter(1);
+ Node* const awaited = Parameter(2);
+ Node* const outer_promise = Parameter(3);
+ Node* const context = Parameter(6);
+
+ static const bool kIsPredictedAsCaught = false;
+
+ AsyncFunctionAwait(context, generator, awaited, outer_promise,
+ kIsPredictedAsCaught);
+}
+
+TF_BUILTIN(AsyncFunctionPromiseCreate, AsyncFunctionBuiltinsAssembler) {
+ CSA_ASSERT_JS_ARGC_EQ(this, 0);
+ Node* const context = Parameter(3);
+
+ Node* const promise = AllocateAndInitJSPromise(context);
+
+ Label if_is_debug_active(this, Label::kDeferred);
+ GotoIf(IsDebugActive(), &if_is_debug_active);
+
+ // Early exit if debug is not active.
+ Return(promise);
+
+ Bind(&if_is_debug_active);
+ {
+ // Push the Promise under construction in an async function on
+ // the catch prediction stack to handle exceptions thrown before
+ // the first await.
+ // Assign ID and create a recurring task to save stack for future
+ // resumptions from await.
+ CallRuntime(Runtime::kDebugAsyncFunctionPromiseCreated, context, promise);
+ Return(promise);
+ }
+}
+
+TF_BUILTIN(AsyncFunctionPromiseRelease, AsyncFunctionBuiltinsAssembler) {
+ CSA_ASSERT_JS_ARGC_EQ(this, 1);
+ Node* const promise = Parameter(1);
+ Node* const context = Parameter(4);
+
+ Label if_is_debug_active(this, Label::kDeferred);
+ GotoIf(IsDebugActive(), &if_is_debug_active);
+
+ // Early exit if debug is not active.
+ Return(UndefinedConstant());
+
+ Bind(&if_is_debug_active);
+ {
+ // Pop the Promise under construction in an async function on
+ // from catch prediction stack.
+ CallRuntime(Runtime::kDebugPopPromise, context);
+ Return(promise);
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-async-iterator.cc b/deps/v8/src/builtins/builtins-async-iterator.cc
new file mode 100644
index 0000000000..13d15ef2c6
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-async-iterator.cc
@@ -0,0 +1,326 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-async.h"
+#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
+#include "src/frames-inl.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+// Describe fields of Context associated with the AsyncIterator unwrap closure.
+class ValueUnwrapContext {
+ public:
+ enum Fields { kDoneSlot = Context::MIN_CONTEXT_SLOTS, kLength };
+};
+
+class AsyncFromSyncBuiltinsAssembler : public AsyncBuiltinsAssembler {
+ public:
+ explicit AsyncFromSyncBuiltinsAssembler(CodeAssemblerState* state)
+ : AsyncBuiltinsAssembler(state) {}
+
+ void ThrowIfNotAsyncFromSyncIterator(Node* const context, Node* const object,
+ Label* if_exception,
+ Variable* var_exception,
+ const char* method_name);
+
+ typedef std::function<void(Node* const context, Node* const promise,
+ Label* if_exception)>
+ UndefinedMethodHandler;
+ void Generate_AsyncFromSyncIteratorMethod(
+ Node* const context, Node* const iterator, Node* const sent_value,
+ Handle<Name> method_name, UndefinedMethodHandler&& if_method_undefined,
+ const char* operation_name,
+ Label::Type reject_label_type = Label::kDeferred,
+ Node* const initial_exception_value = nullptr);
+
+ Node* AllocateAsyncIteratorValueUnwrapContext(Node* native_context,
+ Node* done);
+
+ // Load "value" and "done" from an iterator result object. If an exception
+ // is thrown at any point, jumps to te `if_exception` label with exception
+ // stored in `var_exception`.
+ //
+ // Returns a Pair of Nodes, whose first element is the value of the "value"
+ // property, and whose second element is the value of the "done" property,
+ // converted to a Boolean if needed.
+ std::pair<Node*, Node*> LoadIteratorResult(Node* const context,
+ Node* const native_context,
+ Node* const iter_result,
+ Label* if_exception,
+ Variable* var_exception);
+
+ Node* CreateUnwrapClosure(Node* const native_context, Node* const done);
+};
+
+void AsyncFromSyncBuiltinsAssembler::ThrowIfNotAsyncFromSyncIterator(
+ Node* const context, Node* const object, Label* if_exception,
+ Variable* var_exception, const char* method_name) {
+ Label if_receiverisincompatible(this, Label::kDeferred), done(this);
+
+ GotoIf(TaggedIsSmi(object), &if_receiverisincompatible);
+ Branch(HasInstanceType(object, JS_ASYNC_FROM_SYNC_ITERATOR_TYPE), &done,
+ &if_receiverisincompatible);
+
+ Bind(&if_receiverisincompatible);
+ {
+ // If Type(O) is not Object, or if O does not have a [[SyncIterator]]
+ // internal slot, then
+
+ // Let badIteratorError be a new TypeError exception.
+ Node* const error =
+ MakeTypeError(MessageTemplate::kIncompatibleMethodReceiver, context,
+ CStringConstant(method_name), object);
+
+ // Perform ! Call(promiseCapability.[[Reject]], undefined,
+ // « badIteratorError »).
+ var_exception->Bind(error);
+ Goto(if_exception);
+ }
+
+ Bind(&done);
+}
+
+void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod(
+ Node* const context, Node* const iterator, Node* const sent_value,
+ Handle<Name> method_name, UndefinedMethodHandler&& if_method_undefined,
+ const char* operation_name, Label::Type reject_label_type,
+ Node* const initial_exception_value) {
+ Node* const native_context = LoadNativeContext(context);
+ Node* const promise = AllocateAndInitJSPromise(context);
+
+ Variable var_exception(this, MachineRepresentation::kTagged,
+ initial_exception_value == nullptr
+ ? UndefinedConstant()
+ : initial_exception_value);
+ Label reject_promise(this, reject_label_type);
+
+ ThrowIfNotAsyncFromSyncIterator(context, iterator, &reject_promise,
+ &var_exception, operation_name);
+
+ Node* const sync_iterator =
+ LoadObjectField(iterator, JSAsyncFromSyncIterator::kSyncIteratorOffset);
+
+ Node* const method = GetProperty(context, sync_iterator, method_name);
+
+ if (if_method_undefined) {
+ Label if_isnotundefined(this);
+
+ GotoIfNot(IsUndefined(method), &if_isnotundefined);
+ if_method_undefined(native_context, promise, &reject_promise);
+
+ Bind(&if_isnotundefined);
+ }
+
+ Node* const iter_result = CallJS(CodeFactory::Call(isolate()), context,
+ method, sync_iterator, sent_value);
+ GotoIfException(iter_result, &reject_promise, &var_exception);
+
+ Node* value;
+ Node* done;
+ std::tie(value, done) = LoadIteratorResult(
+ context, native_context, iter_result, &reject_promise, &var_exception);
+ Node* const wrapper = AllocateAndInitJSPromise(context);
+
+ // Perform ! Call(valueWrapperCapability.[[Resolve]], undefined, «
+ // throwValue »).
+ InternalResolvePromise(context, wrapper, value);
+
+ // Let onFulfilled be a new built-in function object as defined in
+ // Async Iterator Value Unwrap Functions.
+ // Set onFulfilled.[[Done]] to throwDone.
+ Node* const on_fulfilled = CreateUnwrapClosure(native_context, done);
+
+ // Perform ! PerformPromiseThen(valueWrapperCapability.[[Promise]],
+ // onFulfilled, undefined, promiseCapability).
+ Node* const undefined = UndefinedConstant();
+ InternalPerformPromiseThen(context, wrapper, on_fulfilled, undefined, promise,
+ undefined, undefined);
+ Return(promise);
+
+ Bind(&reject_promise);
+ {
+ Node* const exception = var_exception.value();
+ InternalPromiseReject(context, promise, exception, TrueConstant());
+
+ Return(promise);
+ }
+}
+
+std::pair<Node*, Node*> AsyncFromSyncBuiltinsAssembler::LoadIteratorResult(
+ Node* const context, Node* const native_context, Node* const iter_result,
+ Label* if_exception, Variable* var_exception) {
+ Label if_fastpath(this), if_slowpath(this), merge(this), to_boolean(this),
+ done(this), if_notanobject(this, Label::kDeferred);
+ GotoIf(TaggedIsSmi(iter_result), &if_notanobject);
+
+ Node* const iter_result_map = LoadMap(iter_result);
+ GotoIfNot(IsJSReceiverMap(iter_result_map), &if_notanobject);
+
+ Node* const fast_iter_result_map =
+ LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
+
+ Variable var_value(this, MachineRepresentation::kTagged);
+ Variable var_done(this, MachineRepresentation::kTagged);
+ Branch(WordEqual(iter_result_map, fast_iter_result_map), &if_fastpath,
+ &if_slowpath);
+
+ Bind(&if_fastpath);
+ {
+ var_value.Bind(
+ LoadObjectField(iter_result, JSIteratorResult::kValueOffset));
+ var_done.Bind(LoadObjectField(iter_result, JSIteratorResult::kDoneOffset));
+ Goto(&merge);
+ }
+
+ Bind(&if_slowpath);
+ {
+ // Let nextValue be IteratorValue(nextResult).
+ // IfAbruptRejectPromise(nextValue, promiseCapability).
+ Node* const value =
+ GetProperty(context, iter_result, factory()->value_string());
+ GotoIfException(value, if_exception, var_exception);
+
+ // Let nextDone be IteratorComplete(nextResult).
+ // IfAbruptRejectPromise(nextDone, promiseCapability).
+ Node* const done =
+ GetProperty(context, iter_result, factory()->done_string());
+ GotoIfException(done, if_exception, var_exception);
+
+ var_value.Bind(value);
+ var_done.Bind(done);
+ Goto(&merge);
+ }
+
+ Bind(&if_notanobject);
+ {
+ // Sync iterator result is not an object --- Produce a TypeError and jump
+ // to the `if_exception` path.
+ Node* const error = MakeTypeError(
+ MessageTemplate::kIteratorResultNotAnObject, context, iter_result);
+ var_exception->Bind(error);
+ Goto(if_exception);
+ }
+
+ Bind(&merge);
+ // Ensure `iterResult.done` is a Boolean.
+ GotoIf(TaggedIsSmi(var_done.value()), &to_boolean);
+ Branch(IsBoolean(var_done.value()), &done, &to_boolean);
+
+ Bind(&to_boolean);
+ {
+ Node* const result =
+ CallStub(CodeFactory::ToBoolean(isolate()), context, var_done.value());
+ var_done.Bind(result);
+ Goto(&done);
+ }
+
+ Bind(&done);
+ return std::make_pair(var_value.value(), var_done.value());
+}
+
+Node* AsyncFromSyncBuiltinsAssembler::CreateUnwrapClosure(Node* native_context,
+ Node* done) {
+ Node* const map = LoadContextElement(
+ native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
+ Node* const on_fulfilled_shared = LoadContextElement(
+ native_context, Context::ASYNC_ITERATOR_VALUE_UNWRAP_SHARED_FUN);
+ CSA_ASSERT(this,
+ HasInstanceType(on_fulfilled_shared, SHARED_FUNCTION_INFO_TYPE));
+ Node* const closure_context =
+ AllocateAsyncIteratorValueUnwrapContext(native_context, done);
+ return AllocateFunctionWithMapAndContext(map, on_fulfilled_shared,
+ closure_context);
+}
+
+Node* AsyncFromSyncBuiltinsAssembler::AllocateAsyncIteratorValueUnwrapContext(
+ Node* native_context, Node* done) {
+ CSA_ASSERT(this, IsNativeContext(native_context));
+ CSA_ASSERT(this, IsBoolean(done));
+
+ Node* const context =
+ CreatePromiseContext(native_context, ValueUnwrapContext::kLength);
+ StoreContextElementNoWriteBarrier(context, ValueUnwrapContext::kDoneSlot,
+ done);
+ return context;
+}
+} // namespace
+
+// https://tc39.github.io/proposal-async-iteration/
+// Section #sec-%asyncfromsynciteratorprototype%.next
+TF_BUILTIN(AsyncFromSyncIteratorPrototypeNext, AsyncFromSyncBuiltinsAssembler) {
+ Node* const iterator = Parameter(0);
+ Node* const value = Parameter(1);
+ Node* const context = Parameter(4);
+
+ Generate_AsyncFromSyncIteratorMethod(
+ context, iterator, value, factory()->next_string(),
+ UndefinedMethodHandler(), "[Async-from-Sync Iterator].prototype.next");
+}
+
+// https://tc39.github.io/proposal-async-iteration/
+// Section #sec-%asyncfromsynciteratorprototype%.return
+TF_BUILTIN(AsyncFromSyncIteratorPrototypeReturn,
+ AsyncFromSyncBuiltinsAssembler) {
+ Node* const iterator = Parameter(0);
+ Node* const value = Parameter(1);
+ Node* const context = Parameter(4);
+
+ auto if_return_undefined = [=](Node* const native_context,
+ Node* const promise, Label* if_exception) {
+ // If return is undefined, then
+ // Let iterResult be ! CreateIterResultObject(value, true)
+ Node* const iter_result =
+ CallStub(CodeFactory::CreateIterResultObject(isolate()), context, value,
+ TrueConstant());
+
+ // Perform ! Call(promiseCapability.[[Resolve]], undefined, « iterResult »).
+ // IfAbruptRejectPromise(nextDone, promiseCapability).
+ // Return promiseCapability.[[Promise]].
+ PromiseFulfill(context, promise, iter_result, v8::Promise::kFulfilled);
+ Return(promise);
+ };
+
+ Generate_AsyncFromSyncIteratorMethod(
+ context, iterator, value, factory()->return_string(), if_return_undefined,
+ "[Async-from-Sync Iterator].prototype.return");
+}
+
+// https://tc39.github.io/proposal-async-iteration/
+// Section #sec-%asyncfromsynciteratorprototype%.throw
+TF_BUILTIN(AsyncFromSyncIteratorPrototypeThrow,
+ AsyncFromSyncBuiltinsAssembler) {
+ Node* const iterator = Parameter(0);
+ Node* const reason = Parameter(1);
+ Node* const context = Parameter(4);
+
+ auto if_throw_undefined = [=](Node* const native_context, Node* const promise,
+ Label* if_exception) { Goto(if_exception); };
+
+ Generate_AsyncFromSyncIteratorMethod(
+ context, iterator, reason, factory()->throw_string(), if_throw_undefined,
+ "[Async-from-Sync Iterator].prototype.throw", Label::kNonDeferred,
+ reason);
+}
+
+TF_BUILTIN(AsyncIteratorValueUnwrap, AsyncFromSyncBuiltinsAssembler) {
+ Node* const value = Parameter(1);
+ Node* const context = Parameter(4);
+
+ Node* const done = LoadContextElement(context, ValueUnwrapContext::kDoneSlot);
+ CSA_ASSERT(this, IsBoolean(done));
+
+ Node* const unwrapped_value = CallStub(
+ CodeFactory::CreateIterResultObject(isolate()), context, value, done);
+
+ Return(unwrapped_value);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-async.cc b/deps/v8/src/builtins/builtins-async.cc
new file mode 100644
index 0000000000..4c64637671
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-async.cc
@@ -0,0 +1,92 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-async.h"
+#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
+#include "src/frames-inl.h"
+
+namespace v8 {
+namespace internal {
+
+Node* AsyncBuiltinsAssembler::Await(
+ Node* context, Node* generator, Node* value, Node* outer_promise,
+ const NodeGenerator1& create_closure_context, int on_resolve_context_index,
+ int on_reject_context_index, bool is_predicted_as_caught) {
+ // Let promiseCapability be ! NewPromiseCapability(%Promise%).
+ Node* const wrapped_value = AllocateAndInitJSPromise(context);
+
+ // Perform ! Call(promiseCapability.[[Resolve]], undefined, « promise »).
+ InternalResolvePromise(context, wrapped_value, value);
+
+ Node* const native_context = LoadNativeContext(context);
+
+ Node* const closure_context = create_closure_context(native_context);
+ Node* const map = LoadContextElement(
+ native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
+
+ // Load and allocate on_resolve closure
+ Node* const on_resolve_shared_fun =
+ LoadContextElement(native_context, on_resolve_context_index);
+ CSA_SLOW_ASSERT(
+ this, HasInstanceType(on_resolve_shared_fun, SHARED_FUNCTION_INFO_TYPE));
+ Node* const on_resolve = AllocateFunctionWithMapAndContext(
+ map, on_resolve_shared_fun, closure_context);
+
+ // Load and allocate on_reject closure
+ Node* const on_reject_shared_fun =
+ LoadContextElement(native_context, on_reject_context_index);
+ CSA_SLOW_ASSERT(
+ this, HasInstanceType(on_reject_shared_fun, SHARED_FUNCTION_INFO_TYPE));
+ Node* const on_reject = AllocateFunctionWithMapAndContext(
+ map, on_reject_shared_fun, closure_context);
+
+ Node* const throwaway_promise =
+ AllocateAndInitJSPromise(context, wrapped_value);
+
+ // The Promise will be thrown away and not handled, but it shouldn't trigger
+ // unhandled reject events as its work is done
+ PromiseSetHasHandler(throwaway_promise);
+
+ Label do_perform_promise_then(this);
+ GotoIfNot(IsDebugActive(), &do_perform_promise_then);
+ {
+ Label common(this);
+ GotoIf(TaggedIsSmi(value), &common);
+ GotoIfNot(HasInstanceType(value, JS_PROMISE_TYPE), &common);
+ {
+ // Mark the reject handler callback to be a forwarding edge, rather
+ // than a meaningful catch handler
+ Node* const key =
+ HeapConstant(factory()->promise_forwarding_handler_symbol());
+ CallRuntime(Runtime::kSetProperty, context, on_reject, key,
+ TrueConstant(), SmiConstant(STRICT));
+
+ if (is_predicted_as_caught) PromiseSetHandledHint(value);
+ }
+
+ Goto(&common);
+ Bind(&common);
+ // Mark the dependency to outer Promise in case the throwaway Promise is
+ // found on the Promise stack
+ CSA_SLOW_ASSERT(this, HasInstanceType(outer_promise, JS_PROMISE_TYPE));
+
+ Node* const key = HeapConstant(factory()->promise_handled_by_symbol());
+ CallRuntime(Runtime::kSetProperty, context, throwaway_promise, key,
+ outer_promise, SmiConstant(STRICT));
+ }
+
+ Goto(&do_perform_promise_then);
+ Bind(&do_perform_promise_then);
+ InternalPerformPromiseThen(context, wrapped_value, on_resolve, on_reject,
+ throwaway_promise, UndefinedConstant(),
+ UndefinedConstant());
+
+ return wrapped_value;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-async.h b/deps/v8/src/builtins/builtins-async.h
new file mode 100644
index 0000000000..9f5df6e04a
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-async.h
@@ -0,0 +1,35 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BUILTINS_BUILTINS_ASYNC_H_
+#define V8_BUILTINS_BUILTINS_ASYNC_H_
+
+#include "src/builtins/builtins-promise.h"
+
+namespace v8 {
+namespace internal {
+
+class AsyncBuiltinsAssembler : public PromiseBuiltinsAssembler {
+ public:
+ explicit AsyncBuiltinsAssembler(CodeAssemblerState* state)
+ : PromiseBuiltinsAssembler(state) {}
+
+ protected:
+ typedef std::function<Node*(Node*)> NodeGenerator1;
+
+ // Perform steps to resume generator after `value` is resolved.
+ // `on_reject_context_index` is an index into the Native Context, which should
+ // point to a SharedFunctioninfo instance used to create the closure. The
+ // value following the reject index should be a similar value for the resolve
+ // closure. Returns the Promise-wrapped `value`.
+ Node* Await(Node* context, Node* generator, Node* value, Node* outer_promise,
+ const NodeGenerator1& create_closure_context,
+ int on_resolve_context_index, int on_reject_context_index,
+ bool is_predicted_as_caught);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BUILTINS_BUILTINS_ASYNC_H_
diff --git a/deps/v8/src/builtins/builtins-boolean.cc b/deps/v8/src/builtins/builtins-boolean.cc
index 81232230ff..65bdb031b3 100644
--- a/deps/v8/src/builtins/builtins-boolean.cc
+++ b/deps/v8/src/builtins/builtins-boolean.cc
@@ -5,6 +5,8 @@
#include "src/builtins/builtins-utils.h"
#include "src/builtins/builtins.h"
#include "src/code-stub-assembler.h"
+#include "src/counters.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-call.cc b/deps/v8/src/builtins/builtins-call.cc
index e3054a9913..40ef3f0430 100644
--- a/deps/v8/src/builtins/builtins-call.cc
+++ b/deps/v8/src/builtins/builtins-call.cc
@@ -2,8 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/isolate.h"
+#include "src/macro-assembler.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -147,5 +150,14 @@ void Builtins::Generate_TailCall_ReceiverIsAny(MacroAssembler* masm) {
Generate_Call(masm, ConvertReceiverMode::kAny, TailCallMode::kAllow);
}
+void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm) {
+ Generate_CallForwardVarargs(masm, masm->isolate()->builtins()->Call());
+}
+
+void Builtins::Generate_CallFunctionForwardVarargs(MacroAssembler* masm) {
+ Generate_CallForwardVarargs(masm,
+ masm->isolate()->builtins()->CallFunction());
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-callsite.cc b/deps/v8/src/builtins/builtins-callsite.cc
index ae9c76dc05..37da78f457 100644
--- a/deps/v8/src/builtins/builtins-callsite.cc
+++ b/deps/v8/src/builtins/builtins-callsite.cc
@@ -5,6 +5,8 @@
#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
+#include "src/counters.h"
+#include "src/objects-inl.h"
#include "src/string-builder.h"
#include "src/wasm/wasm-module.h"
diff --git a/deps/v8/src/builtins/builtins-constructor.cc b/deps/v8/src/builtins/builtins-constructor.cc
index db3ffb0b91..ec79e4b1ee 100644
--- a/deps/v8/src/builtins/builtins-constructor.cc
+++ b/deps/v8/src/builtins/builtins-constructor.cc
@@ -8,7 +8,9 @@
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
#include "src/code-stub-assembler.h"
+#include "src/counters.h"
#include "src/interface-descriptors.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -47,7 +49,7 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewClosure(Node* shared_info,
Node* is_not_normal =
Word32And(compiler_hints,
Int32Constant(SharedFunctionInfo::kAllFunctionKindBitsMask));
- GotoUnless(is_not_normal, &if_normal);
+ GotoIfNot(is_not_normal, &if_normal);
Node* is_generator = Word32And(
compiler_hints, Int32Constant(FunctionKind::kGeneratorFunction
@@ -120,13 +122,34 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewClosure(Node* shared_info,
// Initialize the rest of the function.
Node* empty_fixed_array = HeapConstant(factory->empty_fixed_array());
- Node* empty_literals_array = HeapConstant(factory->empty_literals_array());
StoreObjectFieldNoWriteBarrier(result, JSObject::kPropertiesOffset,
empty_fixed_array);
StoreObjectFieldNoWriteBarrier(result, JSObject::kElementsOffset,
empty_fixed_array);
- StoreObjectFieldNoWriteBarrier(result, JSFunction::kLiteralsOffset,
- empty_literals_array);
+ Node* literals_cell = LoadFixedArrayElement(
+ feedback_vector, slot, 0, CodeStubAssembler::SMI_PARAMETERS);
+ {
+ // Bump the closure counter encoded in the cell's map.
+ Node* cell_map = LoadMap(literals_cell);
+ Label no_closures(this), one_closure(this), cell_done(this);
+
+ GotoIf(IsNoClosuresCellMap(cell_map), &no_closures);
+ GotoIf(IsOneClosureCellMap(cell_map), &one_closure);
+ CSA_ASSERT(this, IsManyClosuresCellMap(cell_map));
+ Goto(&cell_done);
+
+ Bind(&no_closures);
+ StoreMapNoWriteBarrier(literals_cell, Heap::kOneClosureCellMapRootIndex);
+ Goto(&cell_done);
+
+ Bind(&one_closure);
+ StoreMapNoWriteBarrier(literals_cell, Heap::kManyClosuresCellMapRootIndex);
+ Goto(&cell_done);
+
+ Bind(&cell_done);
+ }
+ StoreObjectFieldNoWriteBarrier(result, JSFunction::kFeedbackVectorOffset,
+ literals_cell);
StoreObjectFieldNoWriteBarrier(
result, JSFunction::kPrototypeOrInitialMapOffset, TheHoleConstant());
StoreObjectFieldNoWriteBarrier(result, JSFunction::kSharedFunctionInfoOffset,
@@ -400,11 +423,10 @@ Node* ConstructorBuiltinsAssembler::EmitFastCloneRegExp(Node* closure,
Variable result(this, MachineRepresentation::kTagged);
- Node* literals_array = LoadObjectField(closure, JSFunction::kLiteralsOffset);
- Node* boilerplate =
- LoadFixedArrayElement(literals_array, literal_index,
- LiteralsArray::kFirstLiteralIndex * kPointerSize,
- CodeStubAssembler::SMI_PARAMETERS);
+ Node* cell = LoadObjectField(closure, JSFunction::kFeedbackVectorOffset);
+ Node* feedback_vector = LoadObjectField(cell, Cell::kValueOffset);
+ Node* boilerplate = LoadFixedArrayElement(feedback_vector, literal_index, 0,
+ CodeStubAssembler::SMI_PARAMETERS);
GotoIf(IsUndefined(boilerplate), &call_runtime);
{
@@ -484,17 +506,14 @@ Node* ConstructorBuiltinsAssembler::EmitFastCloneShallowArray(
return_result(this);
Variable result(this, MachineRepresentation::kTagged);
- Node* literals_array = LoadObjectField(closure, JSFunction::kLiteralsOffset);
- Node* allocation_site =
- LoadFixedArrayElement(literals_array, literal_index,
- LiteralsArray::kFirstLiteralIndex * kPointerSize,
- CodeStubAssembler::SMI_PARAMETERS);
+ Node* cell = LoadObjectField(closure, JSFunction::kFeedbackVectorOffset);
+ Node* feedback_vector = LoadObjectField(cell, Cell::kValueOffset);
+ Node* allocation_site = LoadFixedArrayElement(
+ feedback_vector, literal_index, 0, CodeStubAssembler::SMI_PARAMETERS);
GotoIf(IsUndefined(allocation_site), call_runtime);
- allocation_site =
- LoadFixedArrayElement(literals_array, literal_index,
- LiteralsArray::kFirstLiteralIndex * kPointerSize,
- CodeStubAssembler::SMI_PARAMETERS);
+ allocation_site = LoadFixedArrayElement(feedback_vector, literal_index, 0,
+ CodeStubAssembler::SMI_PARAMETERS);
Node* boilerplate =
LoadObjectField(allocation_site, AllocationSite::kTransitionInfoOffset);
@@ -645,11 +664,10 @@ int ConstructorBuiltinsAssembler::FastCloneShallowObjectPropertiesCount(
Node* ConstructorBuiltinsAssembler::EmitFastCloneShallowObject(
CodeAssemblerLabel* call_runtime, Node* closure, Node* literals_index,
Node* properties_count) {
- Node* literals_array = LoadObjectField(closure, JSFunction::kLiteralsOffset);
- Node* allocation_site =
- LoadFixedArrayElement(literals_array, literals_index,
- LiteralsArray::kFirstLiteralIndex * kPointerSize,
- CodeStubAssembler::SMI_PARAMETERS);
+ Node* cell = LoadObjectField(closure, JSFunction::kFeedbackVectorOffset);
+ Node* feedback_vector = LoadObjectField(cell, Cell::kValueOffset);
+ Node* allocation_site = LoadFixedArrayElement(
+ feedback_vector, literals_index, 0, CodeStubAssembler::SMI_PARAMETERS);
GotoIf(IsUndefined(allocation_site), call_runtime);
// Calculate the object and allocation size based on the properties count.
@@ -665,7 +683,7 @@ Node* ConstructorBuiltinsAssembler::EmitFastCloneShallowObject(
Node* boilerplate_map = LoadMap(boilerplate);
Node* instance_size = LoadMapInstanceSize(boilerplate_map);
Node* size_in_words = WordShr(object_size, kPointerSizeLog2);
- GotoUnless(WordEqual(instance_size, size_in_words), call_runtime);
+ GotoIfNot(WordEqual(instance_size, size_in_words), call_runtime);
Node* copy = Allocate(allocation_size);
@@ -689,8 +707,7 @@ Node* ConstructorBuiltinsAssembler::EmitFastCloneShallowObject(
Bind(&loop_check);
{
offset.Bind(IntPtrAdd(offset.value(), IntPtrConstant(kPointerSize)));
- GotoUnless(IntPtrGreaterThanOrEqual(offset.value(), end_offset),
- &loop_body);
+ GotoIfNot(IntPtrGreaterThanOrEqual(offset.value(), end_offset), &loop_body);
}
if (FLAG_allocation_site_pretenuring) {
diff --git a/deps/v8/src/builtins/builtins-conversion.cc b/deps/v8/src/builtins/builtins-conversion.cc
index 177b739c4b..2aff1c5f07 100644
--- a/deps/v8/src/builtins/builtins-conversion.cc
+++ b/deps/v8/src/builtins/builtins-conversion.cc
@@ -6,6 +6,7 @@
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
#include "src/code-stub-assembler.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -24,6 +25,7 @@ Handle<Code> Builtins::NonPrimitiveToPrimitive(ToPrimitiveHint hint) {
}
namespace {
+
// ES6 section 7.1.1 ToPrimitive ( input [ , PreferredType ] )
void Generate_NonPrimitiveToPrimitive(CodeStubAssembler* assembler,
ToPrimitiveHint hint) {
@@ -52,7 +54,8 @@ void Generate_NonPrimitiveToPrimitive(CodeStubAssembler* assembler,
{
// Invoke the {exotic_to_prim} method on the {input} with a string
// representation of the {hint}.
- Callable callable = CodeFactory::Call(assembler->isolate());
+ Callable callable = CodeFactory::Call(
+ assembler->isolate(), ConvertReceiverMode::kNotNullOrUndefined);
Node* hint_string = assembler->HeapConstant(
assembler->factory()->ToPrimitiveHintString(hint));
Node* result = assembler->CallJS(callable, context, exotic_to_prim, input,
@@ -93,7 +96,8 @@ void Generate_NonPrimitiveToPrimitive(CodeStubAssembler* assembler,
assembler->TailCallStub(callable, context, input);
}
}
-} // anonymous namespace
+
+} // namespace
void Builtins::Generate_NonPrimitiveToPrimitive_Default(
compiler::CodeAssemblerState* state) {
@@ -177,16 +181,15 @@ void Builtins::Generate_ToString(compiler::CodeAssemblerState* state) {
Node* input_instance_type = assembler.LoadMapInstanceType(input_map);
Label not_string(&assembler);
- assembler.GotoUnless(assembler.IsStringInstanceType(input_instance_type),
- &not_string);
+ assembler.GotoIfNot(assembler.IsStringInstanceType(input_instance_type),
+ &not_string);
assembler.Return(input);
Label not_heap_number(&assembler);
assembler.Bind(&not_string);
{
- assembler.GotoUnless(assembler.IsHeapNumberMap(input_map),
- &not_heap_number);
+ assembler.GotoIfNot(assembler.IsHeapNumberMap(input_map), &not_heap_number);
assembler.Goto(&is_number);
}
@@ -221,6 +224,7 @@ Handle<Code> Builtins::OrdinaryToPrimitive(OrdinaryToPrimitiveHint hint) {
}
namespace {
+
// 7.1.1.1 OrdinaryToPrimitive ( O, hint )
void Generate_OrdinaryToPrimitive(CodeStubAssembler* assembler,
OrdinaryToPrimitiveHint hint) {
@@ -263,7 +267,8 @@ void Generate_OrdinaryToPrimitive(CodeStubAssembler* assembler,
assembler->Bind(&if_methodiscallable);
{
// Call the {method} on the {input}.
- Callable callable = CodeFactory::Call(assembler->isolate());
+ Callable callable = CodeFactory::Call(
+ assembler->isolate(), ConvertReceiverMode::kNotNullOrUndefined);
Node* result = assembler->CallJS(callable, context, method, input);
var_result.Bind(result);
@@ -287,7 +292,8 @@ void Generate_OrdinaryToPrimitive(CodeStubAssembler* assembler,
assembler->Bind(&return_result);
assembler->Return(var_result.value());
}
-} // anonymous namespace
+
+} // namespace
void Builtins::Generate_OrdinaryToPrimitive_Number(
compiler::CodeAssemblerState* state) {
@@ -361,9 +367,9 @@ void Builtins::Generate_ToLength(compiler::CodeAssemblerState* state) {
Node* len_value = assembler.LoadHeapNumberValue(len);
// Check if {len} is not greater than zero.
- assembler.GotoUnless(assembler.Float64GreaterThan(
- len_value, assembler.Float64Constant(0.0)),
- &return_zero);
+ assembler.GotoIfNot(assembler.Float64GreaterThan(
+ len_value, assembler.Float64Constant(0.0)),
+ &return_zero);
// Check if {len} is greater than or equal to 2^53-1.
assembler.GotoIf(
@@ -474,6 +480,17 @@ void Builtins::Generate_ToObject(compiler::CodeAssemblerState* state) {
assembler.Return(object);
}
+// Deprecated ES5 [[Class]] internal property (used to implement %_ClassOf).
+void Builtins::Generate_ClassOf(compiler::CodeAssemblerState* state) {
+ typedef compiler::Node Node;
+ typedef TypeofDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
+
+ Node* object = assembler.Parameter(Descriptor::kObject);
+
+ assembler.Return(assembler.ClassOf(object));
+}
+
// ES6 section 12.5.5 typeof operator
void Builtins::Generate_Typeof(compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
diff --git a/deps/v8/src/builtins/builtins-dataview.cc b/deps/v8/src/builtins/builtins-dataview.cc
index 45a5fd91a8..131749ccaa 100644
--- a/deps/v8/src/builtins/builtins-dataview.cc
+++ b/deps/v8/src/builtins/builtins-dataview.cc
@@ -2,8 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/conversions.h"
+#include "src/counters.h"
+#include "src/factory.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -42,8 +47,7 @@ BUILTIN(DataViewConstructor_ConstructStub) {
Handle<Object> offset;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, offset,
- Object::ToIndex(isolate, byte_offset,
- MessageTemplate::kInvalidDataViewOffset));
+ Object::ToIndex(isolate, byte_offset, MessageTemplate::kInvalidOffset));
// 5. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
// We currently violate the specification at this point.
@@ -55,8 +59,7 @@ BUILTIN(DataViewConstructor_ConstructStub) {
// 7. If offset > bufferByteLength, throw a RangeError exception
if (offset->Number() > buffer_byte_length) {
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate,
- NewRangeError(MessageTemplate::kInvalidDataViewOffset, offset));
+ isolate, NewRangeError(MessageTemplate::kInvalidOffset, offset));
}
Handle<Object> view_byte_length;
diff --git a/deps/v8/src/builtins/builtins-date.cc b/deps/v8/src/builtins/builtins-date.cc
index df74321093..1bc1dfa036 100644
--- a/deps/v8/src/builtins/builtins-date.cc
+++ b/deps/v8/src/builtins/builtins-date.cc
@@ -6,7 +6,10 @@
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
#include "src/code-stub-assembler.h"
+#include "src/conversions.h"
+#include "src/counters.h"
#include "src/dateparser-inl.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -302,8 +305,8 @@ BUILTIN(DateUTC) {
HandleScope scope(isolate);
int const argc = args.length() - 1;
double year = std::numeric_limits<double>::quiet_NaN();
- double month = std::numeric_limits<double>::quiet_NaN();
- double date = 1.0, hours = 0.0, minutes = 0.0, seconds = 0.0, ms = 0.0;
+ double month = 0.0, date = 1.0, hours = 0.0, minutes = 0.0, seconds = 0.0,
+ ms = 0.0;
if (argc >= 1) {
Handle<Object> year_object;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year_object,
@@ -945,8 +948,8 @@ void Generate_DatePrototype_GetField(CodeStubAssembler* assembler,
// Raise a TypeError if the receiver is not a date.
assembler->Bind(&receiver_not_date);
{
- Node* result = assembler->CallRuntime(Runtime::kThrowNotDateError, context);
- assembler->Return(result);
+ assembler->CallRuntime(Runtime::kThrowNotDateError, context);
+ assembler->Unreachable();
}
}
@@ -1099,7 +1102,7 @@ void Builtins::Generate_DatePrototypeToPrimitive(
// Check if the {receiver} is actually a JSReceiver.
Label receiver_is_invalid(&assembler, Label::kDeferred);
assembler.GotoIf(assembler.TaggedIsSmi(receiver), &receiver_is_invalid);
- assembler.GotoUnless(assembler.IsJSReceiver(receiver), &receiver_is_invalid);
+ assembler.GotoIfNot(assembler.IsJSReceiver(receiver), &receiver_is_invalid);
// Dispatch to the appropriate OrdinaryToPrimitive builtin.
Label hint_is_number(&assembler), hint_is_string(&assembler),
@@ -1116,7 +1119,7 @@ void Builtins::Generate_DatePrototypeToPrimitive(
// Slow-case with actual string comparisons.
Callable string_equal = CodeFactory::StringEqual(assembler.isolate());
assembler.GotoIf(assembler.TaggedIsSmi(hint), &hint_is_invalid);
- assembler.GotoUnless(assembler.IsString(hint), &hint_is_invalid);
+ assembler.GotoIfNot(assembler.IsString(hint), &hint_is_invalid);
assembler.GotoIf(assembler.WordEqual(assembler.CallStub(string_equal, context,
hint, number_string),
assembler.TrueConstant()),
@@ -1152,20 +1155,19 @@ void Builtins::Generate_DatePrototypeToPrimitive(
// Raise a TypeError if the {hint} is invalid.
assembler.Bind(&hint_is_invalid);
{
- Node* result =
- assembler.CallRuntime(Runtime::kThrowInvalidHint, context, hint);
- assembler.Return(result);
+ assembler.CallRuntime(Runtime::kThrowInvalidHint, context, hint);
+ assembler.Unreachable();
}
// Raise a TypeError if the {receiver} is not a JSReceiver instance.
assembler.Bind(&receiver_is_invalid);
{
- Node* result = assembler.CallRuntime(
+ assembler.CallRuntime(
Runtime::kThrowIncompatibleMethodReceiver, context,
assembler.HeapConstant(assembler.factory()->NewStringFromAsciiChecked(
"Date.prototype [ @@toPrimitive ]", TENURED)),
receiver);
- assembler.Return(result);
+ assembler.Unreachable();
}
}
diff --git a/deps/v8/src/builtins/builtins-debug.cc b/deps/v8/src/builtins/builtins-debug.cc
index 011eba3db4..de603287f2 100644
--- a/deps/v8/src/builtins/builtins-debug.cc
+++ b/deps/v8/src/builtins/builtins-debug.cc
@@ -2,9 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
#include "src/debug/debug.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -19,8 +20,12 @@ void Builtins::Generate_Slot_DebugBreak(MacroAssembler* masm) {
DebugCodegen::IGNORE_RESULT_REGISTER);
}
-void Builtins::Generate_FrameDropper_LiveEdit(MacroAssembler* masm) {
- DebugCodegen::GenerateFrameDropperLiveEdit(masm);
+void Builtins::Generate_FrameDropperTrampoline(MacroAssembler* masm) {
+ DebugCodegen::GenerateFrameDropperTrampoline(masm);
+}
+
+void Builtins::Generate_HandleDebuggerStatement(MacroAssembler* masm) {
+ DebugCodegen::GenerateHandleDebuggerStatement(masm);
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-error.cc b/deps/v8/src/builtins/builtins-error.cc
index 24ae56bd06..5b28863364 100644
--- a/deps/v8/src/builtins/builtins-error.cc
+++ b/deps/v8/src/builtins/builtins-error.cc
@@ -5,7 +5,9 @@
#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
+#include "src/counters.h"
#include "src/messages.h"
+#include "src/objects-inl.h"
#include "src/property-descriptor.h"
#include "src/string-builder.h"
diff --git a/deps/v8/src/builtins/builtins-function.cc b/deps/v8/src/builtins/builtins-function.cc
index 818e09a722..e58cad30d8 100644
--- a/deps/v8/src/builtins/builtins-function.cc
+++ b/deps/v8/src/builtins/builtins-function.cc
@@ -7,6 +7,10 @@
#include "src/code-factory.h"
#include "src/code-stub-assembler.h"
#include "src/compiler.h"
+#include "src/conversions.h"
+#include "src/counters.h"
+#include "src/lookup.h"
+#include "src/objects-inl.h"
#include "src/string-builder.h"
namespace v8 {
@@ -32,11 +36,16 @@ MaybeHandle<Object> CreateDynamicFunction(Isolate* isolate,
// Build the source string.
Handle<String> source;
+ int parameters_end_pos = kNoSourcePosition;
{
IncrementalStringBuilder builder(isolate);
builder.AppendCharacter('(');
builder.AppendCString(token);
- builder.AppendCharacter('(');
+ if (FLAG_harmony_function_tostring) {
+ builder.AppendCString(" anonymous(");
+ } else {
+ builder.AppendCharacter('(');
+ }
bool parenthesis_in_arg_string = false;
if (argc > 1) {
for (int i = 1; i < argc; ++i) {
@@ -46,22 +55,30 @@ MaybeHandle<Object> CreateDynamicFunction(Isolate* isolate,
isolate, param, Object::ToString(isolate, args.at(i)), Object);
param = String::Flatten(param);
builder.AppendString(param);
- // If the formal parameters string include ) - an illegal
- // character - it may make the combined function expression
- // compile. We avoid this problem by checking for this early on.
- DisallowHeapAllocation no_gc; // Ensure vectors stay valid.
- String::FlatContent param_content = param->GetFlatContent();
- for (int i = 0, length = param->length(); i < length; ++i) {
- if (param_content.Get(i) == ')') {
- parenthesis_in_arg_string = true;
- break;
+ if (!FLAG_harmony_function_tostring) {
+ // If the formal parameters string include ) - an illegal
+ // character - it may make the combined function expression
+ // compile. We avoid this problem by checking for this early on.
+ DisallowHeapAllocation no_gc; // Ensure vectors stay valid.
+ String::FlatContent param_content = param->GetFlatContent();
+ for (int i = 0, length = param->length(); i < length; ++i) {
+ if (param_content.Get(i) == ')') {
+ parenthesis_in_arg_string = true;
+ break;
+ }
}
}
}
- // If the formal parameters include an unbalanced block comment, the
- // function must be rejected. Since JavaScript does not allow nested
- // comments we can include a trailing block comment to catch this.
- builder.AppendCString("\n/*``*/");
+ if (!FLAG_harmony_function_tostring) {
+ // If the formal parameters include an unbalanced block comment, the
+ // function must be rejected. Since JavaScript does not allow nested
+ // comments we can include a trailing block comment to catch this.
+ builder.AppendCString("\n/*``*/");
+ }
+ }
+ if (FLAG_harmony_function_tostring) {
+ builder.AppendCharacter('\n');
+ parameters_end_pos = builder.Length();
}
builder.AppendCString(") {\n");
if (argc > 0) {
@@ -86,11 +103,12 @@ MaybeHandle<Object> CreateDynamicFunction(Isolate* isolate,
// come from here.
Handle<JSFunction> function;
{
- ASSIGN_RETURN_ON_EXCEPTION(isolate, function,
- Compiler::GetFunctionFromString(
- handle(target->native_context(), isolate),
- source, ONLY_SINGLE_FUNCTION_LITERAL),
- Object);
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, function,
+ Compiler::GetFunctionFromString(
+ handle(target->native_context(), isolate), source,
+ ONLY_SINGLE_FUNCTION_LITERAL, parameters_end_pos),
+ Object);
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, result,
@@ -267,7 +285,7 @@ void Builtins::Generate_FastFunctionPrototypeBind(
Node* context = assembler.Parameter(BuiltinDescriptor::kContext);
Node* new_target = assembler.Parameter(BuiltinDescriptor::kNewTarget);
- CodeStubArguments args(&assembler, argc);
+ CodeStubArguments args(&assembler, assembler.ChangeInt32ToIntPtr(argc));
// Check that receiver has instance type of JS_FUNCTION_TYPE
Node* receiver = args.GetReceiver();
@@ -311,7 +329,7 @@ void Builtins::Generate_FastFunctionPrototypeBind(
descriptors, DescriptorArray::ToValueIndex(length_index));
assembler.GotoIf(assembler.TaggedIsSmi(maybe_length_accessor), &slow);
Node* length_value_map = assembler.LoadMap(maybe_length_accessor);
- assembler.GotoUnless(assembler.IsAccessorInfoMap(length_value_map), &slow);
+ assembler.GotoIfNot(assembler.IsAccessorInfoMap(length_value_map), &slow);
const int name_index = JSFunction::kNameDescriptorIndex;
Node* maybe_name = assembler.LoadFixedArrayElement(
@@ -325,7 +343,7 @@ void Builtins::Generate_FastFunctionPrototypeBind(
descriptors, DescriptorArray::ToValueIndex(name_index));
assembler.GotoIf(assembler.TaggedIsSmi(maybe_name_accessor), &slow);
Node* name_value_map = assembler.LoadMap(maybe_name_accessor);
- assembler.GotoUnless(assembler.IsAccessorInfoMap(name_value_map), &slow);
+ assembler.GotoIfNot(assembler.IsAccessorInfoMap(name_value_map), &slow);
// Choose the right bound function map based on whether the target is
// constructable.
diff --git a/deps/v8/src/builtins/builtins-generator.cc b/deps/v8/src/builtins/builtins-generator.cc
index d22c3cdd64..14a11edff3 100644
--- a/deps/v8/src/builtins/builtins-generator.cc
+++ b/deps/v8/src/builtins/builtins-generator.cc
@@ -6,119 +6,110 @@
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
#include "src/code-stub-assembler.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
-namespace {
+typedef compiler::CodeAssemblerState CodeAssemblerState;
-void Generate_GeneratorPrototypeResume(
- CodeStubAssembler* assembler, JSGeneratorObject::ResumeMode resume_mode,
- char const* const method_name) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
+class GeneratorBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit GeneratorBuiltinsAssembler(CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
- Node* receiver = assembler->Parameter(0);
- Node* value = assembler->Parameter(1);
- Node* context = assembler->Parameter(4);
- Node* closed =
- assembler->SmiConstant(Smi::FromInt(JSGeneratorObject::kGeneratorClosed));
+ protected:
+ void GeneratorPrototypeResume(JSGeneratorObject::ResumeMode resume_mode,
+ char const* const method_name);
+};
+
+void GeneratorBuiltinsAssembler::GeneratorPrototypeResume(
+ JSGeneratorObject::ResumeMode resume_mode, char const* const method_name) {
+ Node* receiver = Parameter(0);
+ Node* value = Parameter(1);
+ Node* context = Parameter(4);
+ Node* closed = SmiConstant(JSGeneratorObject::kGeneratorClosed);
// Check if the {receiver} is actually a JSGeneratorObject.
- Label if_receiverisincompatible(assembler, Label::kDeferred);
- assembler->GotoIf(assembler->TaggedIsSmi(receiver),
- &if_receiverisincompatible);
- Node* receiver_instance_type = assembler->LoadInstanceType(receiver);
- assembler->GotoUnless(assembler->Word32Equal(
- receiver_instance_type,
- assembler->Int32Constant(JS_GENERATOR_OBJECT_TYPE)),
- &if_receiverisincompatible);
+ Label if_receiverisincompatible(this, Label::kDeferred);
+ GotoIf(TaggedIsSmi(receiver), &if_receiverisincompatible);
+ Node* receiver_instance_type = LoadInstanceType(receiver);
+ GotoIfNot(Word32Equal(receiver_instance_type,
+ Int32Constant(JS_GENERATOR_OBJECT_TYPE)),
+ &if_receiverisincompatible);
// Check if the {receiver} is running or already closed.
- Node* receiver_continuation = assembler->LoadObjectField(
- receiver, JSGeneratorObject::kContinuationOffset);
- Label if_receiverisclosed(assembler, Label::kDeferred),
- if_receiverisrunning(assembler, Label::kDeferred);
- assembler->GotoIf(assembler->SmiEqual(receiver_continuation, closed),
- &if_receiverisclosed);
+ Node* receiver_continuation =
+ LoadObjectField(receiver, JSGeneratorObject::kContinuationOffset);
+ Label if_receiverisclosed(this, Label::kDeferred),
+ if_receiverisrunning(this, Label::kDeferred);
+ GotoIf(SmiEqual(receiver_continuation, closed), &if_receiverisclosed);
DCHECK_LT(JSGeneratorObject::kGeneratorExecuting,
JSGeneratorObject::kGeneratorClosed);
- assembler->GotoIf(assembler->SmiLessThan(receiver_continuation, closed),
- &if_receiverisrunning);
+ GotoIf(SmiLessThan(receiver_continuation, closed), &if_receiverisrunning);
// Resume the {receiver} using our trampoline.
- Node* result = assembler->CallStub(
- CodeFactory::ResumeGenerator(assembler->isolate()), context, value,
- receiver, assembler->SmiConstant(Smi::FromInt(resume_mode)));
- assembler->Return(result);
+ Node* result = CallStub(CodeFactory::ResumeGenerator(isolate()), context,
+ value, receiver, SmiConstant(resume_mode));
+ Return(result);
- assembler->Bind(&if_receiverisincompatible);
+ Bind(&if_receiverisincompatible);
{
// The {receiver} is not a valid JSGeneratorObject.
- Node* result = assembler->CallRuntime(
- Runtime::kThrowIncompatibleMethodReceiver, context,
- assembler->HeapConstant(assembler->factory()->NewStringFromAsciiChecked(
- method_name, TENURED)),
- receiver);
- assembler->Return(result); // Never reached.
+ CallRuntime(Runtime::kThrowIncompatibleMethodReceiver, context,
+ HeapConstant(
+ factory()->NewStringFromAsciiChecked(method_name, TENURED)),
+ receiver);
+ Unreachable();
}
- assembler->Bind(&if_receiverisclosed);
+ Bind(&if_receiverisclosed);
{
Callable create_iter_result_object =
- CodeFactory::CreateIterResultObject(assembler->isolate());
+ CodeFactory::CreateIterResultObject(isolate());
// The {receiver} is closed already.
Node* result = nullptr;
switch (resume_mode) {
case JSGeneratorObject::kNext:
- result = assembler->CallStub(create_iter_result_object, context,
- assembler->UndefinedConstant(),
- assembler->TrueConstant());
+ result = CallStub(create_iter_result_object, context,
+ UndefinedConstant(), TrueConstant());
break;
case JSGeneratorObject::kReturn:
- result = assembler->CallStub(create_iter_result_object, context, value,
- assembler->TrueConstant());
+ result =
+ CallStub(create_iter_result_object, context, value, TrueConstant());
break;
case JSGeneratorObject::kThrow:
- result = assembler->CallRuntime(Runtime::kThrow, context, value);
+ result = CallRuntime(Runtime::kThrow, context, value);
break;
}
- assembler->Return(result);
+ Return(result);
}
- assembler->Bind(&if_receiverisrunning);
+ Bind(&if_receiverisrunning);
{
- Node* result =
- assembler->CallRuntime(Runtime::kThrowGeneratorRunning, context);
- assembler->Return(result); // Never reached.
+ CallRuntime(Runtime::kThrowGeneratorRunning, context);
+ Unreachable();
}
}
-} // anonymous namespace
-
// ES6 section 25.3.1.2 Generator.prototype.next ( value )
-void Builtins::Generate_GeneratorPrototypeNext(
- compiler::CodeAssemblerState* state) {
- CodeStubAssembler assembler(state);
- Generate_GeneratorPrototypeResume(&assembler, JSGeneratorObject::kNext,
- "[Generator].prototype.next");
+TF_BUILTIN(GeneratorPrototypeNext, GeneratorBuiltinsAssembler) {
+ GeneratorPrototypeResume(JSGeneratorObject::kNext,
+ "[Generator].prototype.next");
}
// ES6 section 25.3.1.3 Generator.prototype.return ( value )
-void Builtins::Generate_GeneratorPrototypeReturn(
- compiler::CodeAssemblerState* state) {
- CodeStubAssembler assembler(state);
- Generate_GeneratorPrototypeResume(&assembler, JSGeneratorObject::kReturn,
- "[Generator].prototype.return");
+TF_BUILTIN(GeneratorPrototypeReturn, GeneratorBuiltinsAssembler) {
+ GeneratorPrototypeResume(JSGeneratorObject::kReturn,
+ "[Generator].prototype.return");
}
// ES6 section 25.3.1.4 Generator.prototype.throw ( exception )
-void Builtins::Generate_GeneratorPrototypeThrow(
- compiler::CodeAssemblerState* state) {
- CodeStubAssembler assembler(state);
- Generate_GeneratorPrototypeResume(&assembler, JSGeneratorObject::kThrow,
- "[Generator].prototype.throw");
+TF_BUILTIN(GeneratorPrototypeThrow, GeneratorBuiltinsAssembler) {
+ GeneratorPrototypeResume(JSGeneratorObject::kThrow,
+ "[Generator].prototype.throw");
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-global.cc b/deps/v8/src/builtins/builtins-global.cc
index 6c97a0bbad..2af6e99730 100644
--- a/deps/v8/src/builtins/builtins-global.cc
+++ b/deps/v8/src/builtins/builtins-global.cc
@@ -7,6 +7,8 @@
#include "src/code-factory.h"
#include "src/code-stub-assembler.h"
#include "src/compiler.h"
+#include "src/counters.h"
+#include "src/objects-inl.h"
#include "src/uri.h"
namespace v8 {
@@ -92,9 +94,10 @@ BUILTIN(GlobalEval) {
}
Handle<JSFunction> function;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, function, Compiler::GetFunctionFromString(
- handle(target->native_context(), isolate),
- Handle<String>::cast(x), NO_PARSE_RESTRICTION));
+ isolate, function,
+ Compiler::GetFunctionFromString(handle(target->native_context(), isolate),
+ Handle<String>::cast(x),
+ NO_PARSE_RESTRICTION, kNoSourcePosition));
RETURN_RESULT_OR_FAILURE(
isolate,
Execution::Call(isolate, function, target_global_proxy, 0, nullptr));
diff --git a/deps/v8/src/builtins/builtins-handler.cc b/deps/v8/src/builtins/builtins-handler.cc
index 42b35d0d2f..766d43757c 100644
--- a/deps/v8/src/builtins/builtins-handler.cc
+++ b/deps/v8/src/builtins/builtins-handler.cc
@@ -5,107 +5,166 @@
#include "src/builtins/builtins-utils.h"
#include "src/builtins/builtins.h"
#include "src/code-stub-assembler.h"
-#include "src/ic/accessor-assembler.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
#include "src/ic/keyed-store-generic.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
-void Builtins::Generate_KeyedLoadIC_Megamorphic_TF(
- compiler::CodeAssemblerState* state) {
- AccessorAssembler::GenerateKeyedLoadICMegamorphic(state);
+TF_BUILTIN(KeyedLoadIC_IndexedString, CodeStubAssembler) {
+ typedef LoadWithVectorDescriptor Descriptor;
+
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* index = Parameter(Descriptor::kName);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* vector = Parameter(Descriptor::kVector);
+ Node* context = Parameter(Descriptor::kContext);
+
+ Label miss(this);
+
+ Node* index_intptr = TryToIntptr(index, &miss);
+ Node* length = SmiUntag(LoadStringLength(receiver));
+ GotoIf(UintPtrGreaterThanOrEqual(index_intptr, length), &miss);
+
+ Node* code = StringCharCodeAt(receiver, index_intptr, INTPTR_PARAMETERS);
+ Node* result = StringFromCharCode(code);
+ Return(result);
+
+ Bind(&miss);
+ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, context, receiver, index, slot,
+ vector);
}
-void Builtins::Generate_KeyedLoadIC_Miss(compiler::CodeAssemblerState* state) {
- typedef compiler::Node Node;
+TF_BUILTIN(KeyedLoadIC_Miss, CodeStubAssembler) {
typedef LoadWithVectorDescriptor Descriptor;
- CodeStubAssembler assembler(state);
- Node* receiver = assembler.Parameter(Descriptor::kReceiver);
- Node* name = assembler.Parameter(Descriptor::kName);
- Node* slot = assembler.Parameter(Descriptor::kSlot);
- Node* vector = assembler.Parameter(Descriptor::kVector);
- Node* context = assembler.Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* name = Parameter(Descriptor::kName);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* vector = Parameter(Descriptor::kVector);
+ Node* context = Parameter(Descriptor::kContext);
- assembler.TailCallRuntime(Runtime::kKeyedLoadIC_Miss, context, receiver, name,
- slot, vector);
+ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, context, receiver, name, slot,
+ vector);
}
-void Builtins::Generate_KeyedLoadIC_Slow(compiler::CodeAssemblerState* state) {
- typedef compiler::Node Node;
+TF_BUILTIN(KeyedLoadIC_Slow, CodeStubAssembler) {
typedef LoadWithVectorDescriptor Descriptor;
- CodeStubAssembler assembler(state);
- Node* receiver = assembler.Parameter(Descriptor::kReceiver);
- Node* name = assembler.Parameter(Descriptor::kName);
- Node* context = assembler.Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* name = Parameter(Descriptor::kName);
+ Node* context = Parameter(Descriptor::kContext);
- assembler.TailCallRuntime(Runtime::kKeyedGetProperty, context, receiver,
- name);
+ TailCallRuntime(Runtime::kKeyedGetProperty, context, receiver, name);
}
-void Builtins::Generate_KeyedStoreIC_Megamorphic_TF(
+void Builtins::Generate_KeyedStoreIC_Megamorphic(
compiler::CodeAssemblerState* state) {
KeyedStoreGenericGenerator::Generate(state, SLOPPY);
}
-void Builtins::Generate_KeyedStoreIC_Megamorphic_Strict_TF(
+void Builtins::Generate_KeyedStoreIC_Megamorphic_Strict(
compiler::CodeAssemblerState* state) {
KeyedStoreGenericGenerator::Generate(state, STRICT);
}
-void Builtins::Generate_KeyedStoreIC_Miss(MacroAssembler* masm) {
- KeyedStoreIC::GenerateMiss(masm);
+TF_BUILTIN(KeyedStoreIC_Miss, CodeStubAssembler) {
+ typedef StoreWithVectorDescriptor Descriptor;
+
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* name = Parameter(Descriptor::kName);
+ Node* value = Parameter(Descriptor::kValue);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* vector = Parameter(Descriptor::kVector);
+ Node* context = Parameter(Descriptor::kContext);
+
+ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, context, value, slot, vector,
+ receiver, name);
}
-void Builtins::Generate_KeyedStoreIC_Slow(MacroAssembler* masm) {
- KeyedStoreIC::GenerateSlow(masm);
+TF_BUILTIN(KeyedStoreIC_Slow, CodeStubAssembler) {
+ typedef StoreWithVectorDescriptor Descriptor;
+
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* name = Parameter(Descriptor::kName);
+ Node* value = Parameter(Descriptor::kValue);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* vector = Parameter(Descriptor::kVector);
+ Node* context = Parameter(Descriptor::kContext);
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, context, value, slot, vector,
+ receiver, name);
}
-void Builtins::Generate_LoadGlobalIC_Miss(compiler::CodeAssemblerState* state) {
- typedef compiler::Node Node;
+TF_BUILTIN(LoadGlobalIC_Miss, CodeStubAssembler) {
typedef LoadGlobalWithVectorDescriptor Descriptor;
- CodeStubAssembler assembler(state);
- Node* name = assembler.Parameter(Descriptor::kName);
- Node* slot = assembler.Parameter(Descriptor::kSlot);
- Node* vector = assembler.Parameter(Descriptor::kVector);
- Node* context = assembler.Parameter(Descriptor::kContext);
+ Node* name = Parameter(Descriptor::kName);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* vector = Parameter(Descriptor::kVector);
+ Node* context = Parameter(Descriptor::kContext);
- assembler.TailCallRuntime(Runtime::kLoadGlobalIC_Miss, context, name, slot,
- vector);
+ TailCallRuntime(Runtime::kLoadGlobalIC_Miss, context, name, slot, vector);
}
-void Builtins::Generate_LoadGlobalIC_Slow(compiler::CodeAssemblerState* state) {
- typedef compiler::Node Node;
+TF_BUILTIN(LoadGlobalIC_Slow, CodeStubAssembler) {
typedef LoadGlobalWithVectorDescriptor Descriptor;
- CodeStubAssembler assembler(state);
- Node* name = assembler.Parameter(Descriptor::kName);
- Node* context = assembler.Parameter(Descriptor::kContext);
+ Node* name = Parameter(Descriptor::kName);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* vector = Parameter(Descriptor::kVector);
+ Node* context = Parameter(Descriptor::kContext);
- assembler.TailCallRuntime(Runtime::kLoadGlobalIC_Slow, context, name);
+ TailCallRuntime(Runtime::kLoadGlobalIC_Slow, context, name, slot, vector);
}
void Builtins::Generate_LoadIC_Getter_ForDeopt(MacroAssembler* masm) {
NamedLoadHandlerCompiler::GenerateLoadViaGetterForDeopt(masm);
}
-void Builtins::Generate_LoadIC_Miss(compiler::CodeAssemblerState* state) {
- typedef compiler::Node Node;
+TF_BUILTIN(LoadIC_FunctionPrototype, CodeStubAssembler) {
+ typedef LoadWithVectorDescriptor Descriptor;
+
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* name = Parameter(Descriptor::kName);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* vector = Parameter(Descriptor::kVector);
+ Node* context = Parameter(Descriptor::kContext);
+
+ Label miss(this);
+
+ Node* proto_or_map =
+ LoadObjectField(receiver, JSFunction::kPrototypeOrInitialMapOffset);
+ GotoIf(IsTheHole(proto_or_map), &miss);
+
+ Variable var_result(this, MachineRepresentation::kTagged, proto_or_map);
+ Label done(this, &var_result);
+ GotoIfNot(IsMap(proto_or_map), &done);
+
+ var_result.Bind(LoadMapPrototype(proto_or_map));
+ Goto(&done);
+
+ Bind(&done);
+ Return(var_result.value());
+
+ Bind(&miss);
+ TailCallRuntime(Runtime::kLoadIC_Miss, context, receiver, name, slot, vector);
+}
+
+TF_BUILTIN(LoadIC_Miss, CodeStubAssembler) {
typedef LoadWithVectorDescriptor Descriptor;
- CodeStubAssembler assembler(state);
- Node* receiver = assembler.Parameter(Descriptor::kReceiver);
- Node* name = assembler.Parameter(Descriptor::kName);
- Node* slot = assembler.Parameter(Descriptor::kSlot);
- Node* vector = assembler.Parameter(Descriptor::kVector);
- Node* context = assembler.Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* name = Parameter(Descriptor::kName);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* vector = Parameter(Descriptor::kVector);
+ Node* context = Parameter(Descriptor::kContext);
- assembler.TailCallRuntime(Runtime::kLoadIC_Miss, context, receiver, name,
- slot, vector);
+ TailCallRuntime(Runtime::kLoadIC_Miss, context, receiver, name, slot, vector);
}
TF_BUILTIN(LoadIC_Normal, CodeStubAssembler) {
@@ -138,32 +197,28 @@ TF_BUILTIN(LoadIC_Normal, CodeStubAssembler) {
TailCallRuntime(Runtime::kGetProperty, context, receiver, name);
}
-void Builtins::Generate_LoadIC_Slow(compiler::CodeAssemblerState* state) {
- typedef compiler::Node Node;
+TF_BUILTIN(LoadIC_Slow, CodeStubAssembler) {
typedef LoadWithVectorDescriptor Descriptor;
- CodeStubAssembler assembler(state);
- Node* receiver = assembler.Parameter(Descriptor::kReceiver);
- Node* name = assembler.Parameter(Descriptor::kName);
- Node* context = assembler.Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* name = Parameter(Descriptor::kName);
+ Node* context = Parameter(Descriptor::kContext);
- assembler.TailCallRuntime(Runtime::kGetProperty, context, receiver, name);
+ TailCallRuntime(Runtime::kGetProperty, context, receiver, name);
}
-void Builtins::Generate_StoreIC_Miss(compiler::CodeAssemblerState* state) {
- typedef compiler::Node Node;
+TF_BUILTIN(StoreIC_Miss, CodeStubAssembler) {
typedef StoreWithVectorDescriptor Descriptor;
- CodeStubAssembler assembler(state);
- Node* receiver = assembler.Parameter(Descriptor::kReceiver);
- Node* name = assembler.Parameter(Descriptor::kName);
- Node* value = assembler.Parameter(Descriptor::kValue);
- Node* slot = assembler.Parameter(Descriptor::kSlot);
- Node* vector = assembler.Parameter(Descriptor::kVector);
- Node* context = assembler.Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* name = Parameter(Descriptor::kName);
+ Node* value = Parameter(Descriptor::kValue);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* vector = Parameter(Descriptor::kVector);
+ Node* context = Parameter(Descriptor::kContext);
- assembler.TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot,
- vector, receiver, name);
+ TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot, vector,
+ receiver, name);
}
TF_BUILTIN(StoreIC_Normal, CodeStubAssembler) {
@@ -185,21 +240,15 @@ TF_BUILTIN(StoreIC_Normal, CodeStubAssembler) {
&var_name_index, &slow);
Bind(&found);
{
- const int kNameToDetailsOffset = (NameDictionary::kEntryDetailsIndex -
- NameDictionary::kEntryKeyIndex) *
- kPointerSize;
- Node* details = LoadFixedArrayElement(properties, var_name_index.value(),
- kNameToDetailsOffset);
+ Node* details = LoadDetailsByKeyIndex<NameDictionary>(
+ properties, var_name_index.value());
// Check that the property is a writable data property (no accessor).
const int kTypeAndReadOnlyMask = PropertyDetails::KindField::kMask |
PropertyDetails::kAttributesReadOnlyMask;
STATIC_ASSERT(kData == 0);
- GotoIf(IsSetSmi(details, kTypeAndReadOnlyMask), &slow);
- const int kNameToValueOffset =
- (NameDictionary::kEntryValueIndex - NameDictionary::kEntryKeyIndex) *
- kPointerSize;
- StoreFixedArrayElement(properties, var_name_index.value(), value,
- UPDATE_WRITE_BARRIER, kNameToValueOffset);
+ GotoIf(IsSetWord32(details, kTypeAndReadOnlyMask), &slow);
+ StoreValueByKeyIndex<NameDictionary>(properties, var_name_index.value(),
+ value);
Return(value);
}
}
@@ -213,35 +262,5 @@ void Builtins::Generate_StoreIC_Setter_ForDeopt(MacroAssembler* masm) {
NamedStoreHandlerCompiler::GenerateStoreViaSetterForDeopt(masm);
}
-namespace {
-void Generate_StoreIC_Slow(compiler::CodeAssemblerState* state,
- LanguageMode language_mode) {
- typedef compiler::Node Node;
- typedef StoreWithVectorDescriptor Descriptor;
- CodeStubAssembler assembler(state);
-
- Node* receiver = assembler.Parameter(Descriptor::kReceiver);
- Node* name = assembler.Parameter(Descriptor::kName);
- Node* value = assembler.Parameter(Descriptor::kValue);
- Node* context = assembler.Parameter(Descriptor::kContext);
- Node* lang_mode = assembler.SmiConstant(Smi::FromInt(language_mode));
-
- // The slow case calls into the runtime to complete the store without causing
- // an IC miss that would otherwise cause a transition to the generic stub.
- assembler.TailCallRuntime(Runtime::kSetProperty, context, receiver, name,
- value, lang_mode);
-}
-} // anonymous namespace
-
-void Builtins::Generate_StoreIC_SlowSloppy(
- compiler::CodeAssemblerState* state) {
- Generate_StoreIC_Slow(state, SLOPPY);
-}
-
-void Builtins::Generate_StoreIC_SlowStrict(
- compiler::CodeAssemblerState* state) {
- Generate_StoreIC_Slow(state, STRICT);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-ic.cc b/deps/v8/src/builtins/builtins-ic.cc
index 398d512dcf..e11afbe3b1 100644
--- a/deps/v8/src/builtins/builtins-ic.cc
+++ b/deps/v8/src/builtins/builtins-ic.cc
@@ -4,75 +4,48 @@
#include "src/builtins/builtins-utils.h"
#include "src/builtins/builtins.h"
-#include "src/code-stub-assembler.h"
#include "src/ic/accessor-assembler.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
-TF_BUILTIN(LoadIC, CodeStubAssembler) {
- AccessorAssembler::GenerateLoadIC(state());
-}
-
-TF_BUILTIN(KeyedLoadIC, CodeStubAssembler) {
- AccessorAssembler::GenerateKeyedLoadICTF(state());
-}
-
-TF_BUILTIN(LoadICTrampoline, CodeStubAssembler) {
- AccessorAssembler::GenerateLoadICTrampoline(state());
-}
-
-TF_BUILTIN(KeyedLoadICTrampoline, CodeStubAssembler) {
- AccessorAssembler::GenerateKeyedLoadICTrampolineTF(state());
-}
-
-TF_BUILTIN(StoreIC, CodeStubAssembler) {
- AccessorAssembler::GenerateStoreIC(state());
-}
-
-TF_BUILTIN(StoreICTrampoline, CodeStubAssembler) {
- AccessorAssembler::GenerateStoreICTrampoline(state());
-}
-
-TF_BUILTIN(StoreICStrict, CodeStubAssembler) {
- AccessorAssembler::GenerateStoreIC(state());
-}
-
-TF_BUILTIN(StoreICStrictTrampoline, CodeStubAssembler) {
- AccessorAssembler::GenerateStoreICTrampoline(state());
-}
-
-TF_BUILTIN(KeyedStoreIC, CodeStubAssembler) {
- AccessorAssembler::GenerateKeyedStoreICTF(state(), SLOPPY);
-}
-
-TF_BUILTIN(KeyedStoreICTrampoline, CodeStubAssembler) {
- AccessorAssembler::GenerateKeyedStoreICTrampolineTF(state(), SLOPPY);
-}
-
-TF_BUILTIN(KeyedStoreICStrict, CodeStubAssembler) {
- AccessorAssembler::GenerateKeyedStoreICTF(state(), STRICT);
-}
-
-TF_BUILTIN(KeyedStoreICStrictTrampoline, CodeStubAssembler) {
- AccessorAssembler::GenerateKeyedStoreICTrampolineTF(state(), STRICT);
-}
-
-TF_BUILTIN(LoadGlobalIC, CodeStubAssembler) {
- AccessorAssembler::GenerateLoadGlobalIC(state(), NOT_INSIDE_TYPEOF);
-}
-
-TF_BUILTIN(LoadGlobalICInsideTypeof, CodeStubAssembler) {
- AccessorAssembler::GenerateLoadGlobalIC(state(), INSIDE_TYPEOF);
-}
-
-TF_BUILTIN(LoadGlobalICTrampoline, CodeStubAssembler) {
- AccessorAssembler::GenerateLoadGlobalICTrampoline(state(), NOT_INSIDE_TYPEOF);
-}
-
-TF_BUILTIN(LoadGlobalICInsideTypeofTrampoline, CodeStubAssembler) {
- AccessorAssembler::GenerateLoadGlobalICTrampoline(state(), INSIDE_TYPEOF);
-}
+#define IC_BUILTIN(Name) \
+ void Builtins::Generate_##Name(compiler::CodeAssemblerState* state) { \
+ AccessorAssembler assembler(state); \
+ assembler.Generate##Name(); \
+ }
+
+#define IC_BUILTIN_PARAM(BuiltinName, GeneratorName, parameter) \
+ void Builtins::Generate_##BuiltinName(compiler::CodeAssemblerState* state) { \
+ AccessorAssembler assembler(state); \
+ assembler.Generate##GeneratorName(parameter); \
+ }
+
+IC_BUILTIN(LoadIC)
+IC_BUILTIN(KeyedLoadIC)
+IC_BUILTIN(LoadICTrampoline)
+IC_BUILTIN(LoadField)
+IC_BUILTIN(KeyedLoadICTrampoline)
+IC_BUILTIN(KeyedLoadIC_Megamorphic)
+IC_BUILTIN(StoreIC)
+IC_BUILTIN(StoreICTrampoline)
+
+IC_BUILTIN_PARAM(StoreICStrict, StoreIC, /* no param */)
+IC_BUILTIN_PARAM(StoreICStrictTrampoline, StoreICTrampoline, /* no param */)
+
+IC_BUILTIN_PARAM(KeyedStoreIC, KeyedStoreIC, SLOPPY)
+IC_BUILTIN_PARAM(KeyedStoreICTrampoline, KeyedStoreICTrampoline, SLOPPY)
+IC_BUILTIN_PARAM(KeyedStoreICStrict, KeyedStoreIC, STRICT)
+IC_BUILTIN_PARAM(KeyedStoreICStrictTrampoline, KeyedStoreICTrampoline, STRICT)
+IC_BUILTIN_PARAM(LoadGlobalIC, LoadGlobalIC, NOT_INSIDE_TYPEOF)
+IC_BUILTIN_PARAM(LoadGlobalICInsideTypeof, LoadGlobalIC, INSIDE_TYPEOF)
+IC_BUILTIN_PARAM(LoadGlobalICTrampoline, LoadGlobalICTrampoline,
+ NOT_INSIDE_TYPEOF)
+IC_BUILTIN_PARAM(LoadGlobalICInsideTypeofTrampoline, LoadGlobalICTrampoline,
+ INSIDE_TYPEOF)
+IC_BUILTIN_PARAM(LoadICProtoArray, LoadICProtoArray, false)
+IC_BUILTIN_PARAM(LoadICProtoArrayThrowIfNonexistent, LoadICProtoArray, true)
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-internal.cc b/deps/v8/src/builtins/builtins-internal.cc
index f94ed0c16f..b1c737ba48 100644
--- a/deps/v8/src/builtins/builtins-internal.cc
+++ b/deps/v8/src/builtins/builtins-internal.cc
@@ -5,8 +5,10 @@
#include "src/builtins/builtins-utils.h"
#include "src/builtins/builtins.h"
#include "src/code-stub-assembler.h"
+#include "src/counters.h"
#include "src/interface-descriptors.h"
#include "src/macro-assembler.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -247,11 +249,9 @@ void Builtins::Generate_NewUnmappedArgumentsElements(
assembler.Load(MachineType::AnyTagged(), parent_frame,
assembler.IntPtrConstant(
CommonFrameConstants::kContextOrFrameTypeOffset));
- assembler.GotoUnless(
- assembler.WordEqual(
- parent_frame_type,
- assembler.SmiConstant(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))),
- &done);
+ assembler.GotoIfNot(assembler.MarkerIsFrameType(
+ parent_frame_type, StackFrame::ARGUMENTS_ADAPTOR),
+ &done);
{
// Determine the length from the ArgumentsAdaptorFrame.
Node* length = assembler.LoadAndUntagSmi(
@@ -289,9 +289,8 @@ void Builtins::Generate_NewRestParameterElements(
assembler.Load(MachineType::AnyTagged(), frame,
assembler.IntPtrConstant(
CommonFrameConstants::kContextOrFrameTypeOffset));
- assembler.GotoUnless(
- assembler.WordEqual(frame_type, assembler.SmiConstant(Smi::FromInt(
- StackFrame::ARGUMENTS_ADAPTOR))),
+ assembler.GotoIfNot(
+ assembler.MarkerIsFrameType(frame_type, StackFrame::ARGUMENTS_ADAPTOR),
&if_empty);
// Determine the length from the ArgumentsAdaptorFrame.
diff --git a/deps/v8/src/builtins/builtins-interpreter.cc b/deps/v8/src/builtins/builtins-interpreter.cc
index 16091848c5..3cfa57bcbe 100644
--- a/deps/v8/src/builtins/builtins-interpreter.cc
+++ b/deps/v8/src/builtins/builtins-interpreter.cc
@@ -2,24 +2,28 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
-Handle<Code> Builtins::InterpreterPushArgsAndCall(TailCallMode tail_call_mode,
- CallableType function_type) {
- switch (tail_call_mode) {
- case TailCallMode::kDisallow:
- if (function_type == CallableType::kJSFunction) {
+Handle<Code> Builtins::InterpreterPushArgsAndCall(
+ TailCallMode tail_call_mode, InterpreterPushArgsMode mode) {
+ switch (mode) {
+ case InterpreterPushArgsMode::kJSFunction:
+ if (tail_call_mode == TailCallMode::kDisallow) {
return InterpreterPushArgsAndCallFunction();
} else {
- return InterpreterPushArgsAndCall();
- }
- case TailCallMode::kAllow:
- if (function_type == CallableType::kJSFunction) {
return InterpreterPushArgsAndTailCallFunction();
+ }
+ case InterpreterPushArgsMode::kWithFinalSpread:
+ CHECK(tail_call_mode == TailCallMode::kDisallow);
+ return InterpreterPushArgsAndCallWithFinalSpread();
+ case InterpreterPushArgsMode::kOther:
+ if (tail_call_mode == TailCallMode::kDisallow) {
+ return InterpreterPushArgsAndCall();
} else {
return InterpreterPushArgsAndTailCall();
}
@@ -29,33 +33,41 @@ Handle<Code> Builtins::InterpreterPushArgsAndCall(TailCallMode tail_call_mode,
}
void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
- return Generate_InterpreterPushArgsAndCallImpl(masm, TailCallMode::kDisallow,
- CallableType::kAny);
+ return Generate_InterpreterPushArgsAndCallImpl(
+ masm, TailCallMode::kDisallow, InterpreterPushArgsMode::kOther);
}
void Builtins::Generate_InterpreterPushArgsAndCallFunction(
MacroAssembler* masm) {
- return Generate_InterpreterPushArgsAndCallImpl(masm, TailCallMode::kDisallow,
- CallableType::kJSFunction);
+ return Generate_InterpreterPushArgsAndCallImpl(
+ masm, TailCallMode::kDisallow, InterpreterPushArgsMode::kJSFunction);
+}
+
+void Builtins::Generate_InterpreterPushArgsAndCallWithFinalSpread(
+ MacroAssembler* masm) {
+ return Generate_InterpreterPushArgsAndCallImpl(
+ masm, TailCallMode::kDisallow, InterpreterPushArgsMode::kWithFinalSpread);
}
void Builtins::Generate_InterpreterPushArgsAndTailCall(MacroAssembler* masm) {
- return Generate_InterpreterPushArgsAndCallImpl(masm, TailCallMode::kAllow,
- CallableType::kAny);
+ return Generate_InterpreterPushArgsAndCallImpl(
+ masm, TailCallMode::kAllow, InterpreterPushArgsMode::kOther);
}
void Builtins::Generate_InterpreterPushArgsAndTailCallFunction(
MacroAssembler* masm) {
- return Generate_InterpreterPushArgsAndCallImpl(masm, TailCallMode::kAllow,
- CallableType::kJSFunction);
+ return Generate_InterpreterPushArgsAndCallImpl(
+ masm, TailCallMode::kAllow, InterpreterPushArgsMode::kJSFunction);
}
Handle<Code> Builtins::InterpreterPushArgsAndConstruct(
- CallableType function_type) {
- switch (function_type) {
- case CallableType::kJSFunction:
+ InterpreterPushArgsMode mode) {
+ switch (mode) {
+ case InterpreterPushArgsMode::kJSFunction:
return InterpreterPushArgsAndConstructFunction();
- case CallableType::kAny:
+ case InterpreterPushArgsMode::kWithFinalSpread:
+ return InterpreterPushArgsAndConstructWithFinalSpread();
+ case InterpreterPushArgsMode::kOther:
return InterpreterPushArgsAndConstruct();
}
UNREACHABLE();
@@ -63,13 +75,20 @@ Handle<Code> Builtins::InterpreterPushArgsAndConstruct(
}
void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
- return Generate_InterpreterPushArgsAndConstructImpl(masm, CallableType::kAny);
+ return Generate_InterpreterPushArgsAndConstructImpl(
+ masm, InterpreterPushArgsMode::kOther);
+}
+
+void Builtins::Generate_InterpreterPushArgsAndConstructWithFinalSpread(
+ MacroAssembler* masm) {
+ return Generate_InterpreterPushArgsAndConstructImpl(
+ masm, InterpreterPushArgsMode::kWithFinalSpread);
}
void Builtins::Generate_InterpreterPushArgsAndConstructFunction(
MacroAssembler* masm) {
return Generate_InterpreterPushArgsAndConstructImpl(
- masm, CallableType::kJSFunction);
+ masm, InterpreterPushArgsMode::kJSFunction);
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-json.cc b/deps/v8/src/builtins/builtins-json.cc
index 4a8c7c5ea8..7bc6ab0b06 100644
--- a/deps/v8/src/builtins/builtins-json.cc
+++ b/deps/v8/src/builtins/builtins-json.cc
@@ -5,8 +5,10 @@
#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
+#include "src/counters.h"
#include "src/json-parser.h"
#include "src/json-stringifier.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-math.cc b/deps/v8/src/builtins/builtins-math.cc
index 1305e73db0..f5249138c7 100644
--- a/deps/v8/src/builtins/builtins-math.cc
+++ b/deps/v8/src/builtins/builtins-math.cc
@@ -6,6 +6,8 @@
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
#include "src/code-stub-assembler.h"
+#include "src/counters.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-number.cc b/deps/v8/src/builtins/builtins-number.cc
index 7e750139de..90f54efc69 100644
--- a/deps/v8/src/builtins/builtins-number.cc
+++ b/deps/v8/src/builtins/builtins-number.cc
@@ -6,6 +6,9 @@
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
#include "src/code-stub-assembler.h"
+#include "src/conversions.h"
+#include "src/counters.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -60,7 +63,7 @@ TF_BUILTIN(NumberIsFinite, CodeStubAssembler) {
GotoIf(TaggedIsSmi(number), &return_true);
// Check if {number} is a HeapNumber.
- GotoUnless(IsHeapNumberMap(LoadMap(number)), &return_false);
+ GotoIfNot(IsHeapNumberMap(LoadMap(number)), &return_false);
// Check if {number} contains a finite, non-NaN value.
Node* number_value = LoadHeapNumberValue(number);
@@ -84,7 +87,7 @@ TF_BUILTIN(NumberIsInteger, CodeStubAssembler) {
GotoIf(TaggedIsSmi(number), &return_true);
// Check if {number} is a HeapNumber.
- GotoUnless(IsHeapNumberMap(LoadMap(number)), &return_false);
+ GotoIfNot(IsHeapNumberMap(LoadMap(number)), &return_false);
// Load the actual value of {number}.
Node* number_value = LoadHeapNumberValue(number);
@@ -113,7 +116,7 @@ TF_BUILTIN(NumberIsNaN, CodeStubAssembler) {
GotoIf(TaggedIsSmi(number), &return_false);
// Check if {number} is a HeapNumber.
- GotoUnless(IsHeapNumberMap(LoadMap(number)), &return_false);
+ GotoIfNot(IsHeapNumberMap(LoadMap(number)), &return_false);
// Check if {number} contains a NaN value.
Node* number_value = LoadHeapNumberValue(number);
@@ -136,7 +139,7 @@ TF_BUILTIN(NumberIsSafeInteger, CodeStubAssembler) {
GotoIf(TaggedIsSmi(number), &return_true);
// Check if {number} is a HeapNumber.
- GotoUnless(IsHeapNumberMap(LoadMap(number)), &return_false);
+ GotoIfNot(IsHeapNumberMap(LoadMap(number)), &return_false);
// Load the actual value of {number}.
Node* number_value = LoadHeapNumberValue(number);
@@ -145,7 +148,7 @@ TF_BUILTIN(NumberIsSafeInteger, CodeStubAssembler) {
Node* integer = Float64Trunc(number_value);
// Check if {number}s value matches the integer (ruling out the infinities).
- GotoUnless(
+ GotoIfNot(
Float64Equal(Float64Sub(number_value, integer), Float64Constant(0.0)),
&return_false);
@@ -299,8 +302,8 @@ TF_BUILTIN(NumberParseInt, CodeStubAssembler) {
// Check if the absolute {input} value is in the ]0.01,1e9[ range.
Node* input_value_abs = Float64Abs(input_value);
- GotoUnless(Float64LessThan(input_value_abs, Float64Constant(1e9)),
- &if_generic);
+ GotoIfNot(Float64LessThan(input_value_abs, Float64Constant(1e9)),
+ &if_generic);
Branch(Float64LessThan(Float64Constant(0.01), input_value_abs),
&if_inputissigned32, &if_generic);
diff --git a/deps/v8/src/builtins/builtins-object.cc b/deps/v8/src/builtins/builtins-object.cc
index 74e0a20832..af5a42aa64 100644
--- a/deps/v8/src/builtins/builtins-object.cc
+++ b/deps/v8/src/builtins/builtins-object.cc
@@ -2,25 +2,41 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/builtins/builtins-object.h"
#include "src/builtins/builtins-utils.h"
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
#include "src/code-stub-assembler.h"
+#include "src/counters.h"
+#include "src/keys.h"
+#include "src/lookup.h"
+#include "src/objects-inl.h"
#include "src/property-descriptor.h"
namespace v8 {
namespace internal {
-class ObjectBuiltinsAssembler : public CodeStubAssembler {
- public:
- explicit ObjectBuiltinsAssembler(compiler::CodeAssemblerState* state)
- : CodeStubAssembler(state) {}
+typedef compiler::Node Node;
- protected:
- void IsString(Node* object, Label* if_string, Label* if_notstring);
- void ReturnToStringFormat(Node* context, Node* string);
-};
+std::tuple<Node*, Node*, Node*> ObjectBuiltinsAssembler::EmitForInPrepare(
+ Node* object, Node* context, Label* call_runtime,
+ Label* nothing_to_iterate) {
+ Label use_cache(this);
+ CSA_ASSERT(this, IsJSReceiver(object));
+ CheckEnumCache(object, &use_cache, call_runtime);
+ Bind(&use_cache);
+ Node* map = LoadMap(object);
+ Node* enum_length = EnumLength(map);
+ GotoIf(WordEqual(enum_length, SmiConstant(0)), nothing_to_iterate);
+ Node* descriptors = LoadMapDescriptors(map);
+ Node* cache_offset =
+ LoadObjectField(descriptors, DescriptorArray::kEnumCacheOffset);
+ Node* enum_cache = LoadObjectField(
+ cache_offset, DescriptorArray::kEnumCacheBridgeCacheOffset);
+
+ return std::make_tuple(map, enum_cache, enum_length);
+}
// -----------------------------------------------------------------------------
// ES6 section 19.1 Object Objects
@@ -39,21 +55,24 @@ TF_BUILTIN(ObjectHasOwnProperty, ObjectBuiltinsAssembler) {
Node* map = LoadMap(object);
Node* instance_type = LoadMapInstanceType(map);
- Variable var_index(this, MachineType::PointerRepresentation());
-
- Label keyisindex(this), if_iskeyunique(this);
- TryToName(key, &keyisindex, &var_index, &if_iskeyunique, &call_runtime);
-
- Bind(&if_iskeyunique);
- TryHasOwnProperty(object, map, instance_type, key, &return_true,
- &return_false, &call_runtime);
-
- Bind(&keyisindex);
- // Handle negative keys in the runtime.
- GotoIf(IntPtrLessThan(var_index.value(), IntPtrConstant(0)), &call_runtime);
- TryLookupElement(object, map, instance_type, var_index.value(), &return_true,
- &return_false, &call_runtime);
-
+ {
+ Variable var_index(this, MachineType::PointerRepresentation());
+ Variable var_unique(this, MachineRepresentation::kTagged);
+
+ Label keyisindex(this), if_iskeyunique(this);
+ TryToName(key, &keyisindex, &var_index, &if_iskeyunique, &var_unique,
+ &call_runtime);
+
+ Bind(&if_iskeyunique);
+ TryHasOwnProperty(object, map, instance_type, var_unique.value(),
+ &return_true, &return_false, &call_runtime);
+
+ Bind(&keyisindex);
+ // Handle negative keys in the runtime.
+ GotoIf(IntPtrLessThan(var_index.value(), IntPtrConstant(0)), &call_runtime);
+ TryLookupElement(object, map, instance_type, var_index.value(),
+ &return_true, &return_false, &call_runtime);
+ }
Bind(&return_true);
Return(BooleanConstant(true));
@@ -80,9 +99,8 @@ BUILTIN(ObjectAssign) {
// 4. For each element nextSource of sources, in ascending index order,
for (int i = 2; i < args.length(); ++i) {
Handle<Object> next_source = args.at(i);
- MAYBE_RETURN(
- JSReceiver::SetOrCopyDataProperties(isolate, to, next_source, true),
- isolate->heap()->exception());
+ MAYBE_RETURN(JSReceiver::SetOrCopyDataProperties(isolate, to, next_source),
+ isolate->heap()->exception());
}
// 5. Return to.
return *to;
@@ -315,9 +333,9 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) {
Node* properties_map = LoadMap(properties);
GotoIf(IsSpecialReceiverMap(properties_map), &call_runtime);
// Stay on the fast path only if there are no elements.
- GotoUnless(WordEqual(LoadElements(properties),
- LoadRoot(Heap::kEmptyFixedArrayRootIndex)),
- &call_runtime);
+ GotoIfNot(WordEqual(LoadElements(properties),
+ LoadRoot(Heap::kEmptyFixedArrayRootIndex)),
+ &call_runtime);
// Handle dictionary objects or fast objects with properties in runtime.
Node* bit_field3 = LoadMapBitField3(properties_map);
GotoIf(IsSetWord32<Map::DictionaryMap>(bit_field3), &call_runtime);
@@ -908,6 +926,49 @@ TF_BUILTIN(ForInFilter, ObjectBuiltinsAssembler) {
Return(ForInFilter(key, object, context));
}
+TF_BUILTIN(ForInNext, ObjectBuiltinsAssembler) {
+ typedef ForInNextDescriptor Descriptor;
+
+ Label filter(this);
+ Node* object = Parameter(Descriptor::kObject);
+ Node* cache_array = Parameter(Descriptor::kCacheArray);
+ Node* cache_type = Parameter(Descriptor::kCacheType);
+ Node* index = Parameter(Descriptor::kIndex);
+ Node* context = Parameter(Descriptor::kContext);
+
+ Node* key = LoadFixedArrayElement(cache_array, SmiUntag(index));
+ Node* map = LoadMap(object);
+ GotoIfNot(WordEqual(map, cache_type), &filter);
+ Return(key);
+ Bind(&filter);
+ Return(ForInFilter(key, object, context));
+}
+
+TF_BUILTIN(ForInPrepare, ObjectBuiltinsAssembler) {
+ typedef ForInPrepareDescriptor Descriptor;
+
+ Label call_runtime(this), nothing_to_iterate(this);
+ Node* object = Parameter(Descriptor::kObject);
+ Node* context = Parameter(Descriptor::kContext);
+
+ Node* cache_type;
+ Node* cache_array;
+ Node* cache_length;
+ std::tie(cache_type, cache_array, cache_length) =
+ EmitForInPrepare(object, context, &call_runtime, &nothing_to_iterate);
+
+ Return(cache_type, cache_array, cache_length);
+
+ Bind(&call_runtime);
+ TailCallRuntime(Runtime::kForInPrepare, context, object);
+
+ Bind(&nothing_to_iterate);
+ {
+ Node* zero = SmiConstant(0);
+ Return(zero, zero, zero);
+ }
+}
+
TF_BUILTIN(InstanceOf, ObjectBuiltinsAssembler) {
typedef CompareDescriptor Descriptor;
diff --git a/deps/v8/src/builtins/builtins-object.h b/deps/v8/src/builtins/builtins-object.h
new file mode 100644
index 0000000000..49434268c0
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-object.h
@@ -0,0 +1,26 @@
+
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+class ObjectBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit ObjectBuiltinsAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ std::tuple<Node*, Node*, Node*> EmitForInPrepare(Node* object, Node* context,
+ Label* call_runtime,
+ Label* nothing_to_iterate);
+
+ protected:
+ void IsString(Node* object, Label* if_string, Label* if_notstring);
+ void ReturnToStringFormat(Node* context, Node* string);
+};
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-promise.cc b/deps/v8/src/builtins/builtins-promise.cc
index 8a2eab06fc..0d0238d267 100644
--- a/deps/v8/src/builtins/builtins-promise.cc
+++ b/deps/v8/src/builtins/builtins-promise.cc
@@ -8,6 +8,7 @@
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
#include "src/code-stub-assembler.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -42,7 +43,7 @@ Node* PromiseBuiltinsAssembler::AllocateAndInitJSPromise(Node* context,
PromiseInit(instance);
Label out(this);
- GotoUnless(IsPromiseHookEnabled(), &out);
+ GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &out);
CallRuntime(Runtime::kPromiseHookInit, context, instance, parent);
Goto(&out);
@@ -63,7 +64,7 @@ Node* PromiseBuiltinsAssembler::AllocateAndSetJSPromise(Node* context,
SmiConstant(0));
Label out(this);
- GotoUnless(IsPromiseHookEnabled(), &out);
+ GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &out);
CallRuntime(Runtime::kPromiseHookInit, context, instance,
UndefinedConstant());
Goto(&out);
@@ -134,7 +135,7 @@ Node* PromiseBuiltinsAssembler::NewPromiseCapability(Node* context,
StoreObjectField(capability, JSPromiseCapability::kResolveOffset, resolve);
StoreObjectField(capability, JSPromiseCapability::kRejectOffset, reject);
- GotoUnless(IsPromiseHookEnabled(), &out);
+ GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &out);
CallRuntime(Runtime::kPromiseHookInit, context, promise,
UndefinedConstant());
Goto(&out);
@@ -158,12 +159,12 @@ Node* PromiseBuiltinsAssembler::NewPromiseCapability(Node* context,
Node* resolve =
LoadObjectField(capability, JSPromiseCapability::kResolveOffset);
GotoIf(TaggedIsSmi(resolve), &if_notcallable);
- GotoUnless(IsCallableMap(LoadMap(resolve)), &if_notcallable);
+ GotoIfNot(IsCallableMap(LoadMap(resolve)), &if_notcallable);
Node* reject =
LoadObjectField(capability, JSPromiseCapability::kRejectOffset);
GotoIf(TaggedIsSmi(reject), &if_notcallable);
- GotoUnless(IsCallableMap(LoadMap(reject)), &if_notcallable);
+ GotoIfNot(IsCallableMap(LoadMap(reject)), &if_notcallable);
StoreObjectField(capability, JSPromiseCapability::kPromiseOffset, promise);
@@ -178,8 +179,7 @@ Node* PromiseBuiltinsAssembler::NewPromiseCapability(Node* context,
StoreObjectField(capability, JSPromiseCapability::kRejectOffset,
UndefinedConstant());
CallRuntime(Runtime::kThrowTypeError, context, message);
- var_result.Bind(UndefinedConstant());
- Goto(&out);
+ Unreachable();
}
Bind(&out);
@@ -251,8 +251,7 @@ Node* PromiseBuiltinsAssembler::ThrowIfNotJSReceiver(
isolate()->factory()->NewStringFromAsciiChecked(method_name));
Node* const message_id = SmiConstant(msg_template);
CallRuntime(Runtime::kThrowTypeError, context, message_id, method);
- var_value_map.Bind(UndefinedConstant());
- Goto(&out); // Never reached.
+ Unreachable();
}
Bind(&out);
@@ -271,6 +270,13 @@ void PromiseBuiltinsAssembler::PromiseSetHasHandler(Node* promise) {
StoreObjectFieldNoWriteBarrier(promise, JSPromise::kFlagsOffset, new_flags);
}
+void PromiseBuiltinsAssembler::PromiseSetHandledHint(Node* promise) {
+ Node* const flags = LoadObjectField(promise, JSPromise::kFlagsOffset);
+ Node* const new_flags =
+ SmiOr(flags, SmiConstant(1 << JSPromise::kHandledHintBit));
+ StoreObjectFieldNoWriteBarrier(promise, JSPromise::kFlagsOffset, new_flags);
+}
+
Node* PromiseBuiltinsAssembler::SpeciesConstructor(Node* context, Node* object,
Node* default_constructor) {
Isolate* isolate = this->isolate();
@@ -305,10 +311,10 @@ Node* PromiseBuiltinsAssembler::SpeciesConstructor(Node* context, Node* object,
// 7. If IsConstructor(S) is true, return S.
Label throw_error(this);
Node* species_bitfield = LoadMapBitField(LoadMap(species));
- GotoUnless(Word32Equal(Word32And(species_bitfield,
- Int32Constant((1 << Map::kIsConstructor))),
- Int32Constant(1 << Map::kIsConstructor)),
- &throw_error);
+ GotoIfNot(Word32Equal(Word32And(species_bitfield,
+ Int32Constant((1 << Map::kIsConstructor))),
+ Int32Constant(1 << Map::kIsConstructor)),
+ &throw_error);
var_result.Bind(species);
Goto(&out);
@@ -318,7 +324,7 @@ Node* PromiseBuiltinsAssembler::SpeciesConstructor(Node* context, Node* object,
Node* const message_id =
SmiConstant(MessageTemplate::kSpeciesNotConstructor);
CallRuntime(Runtime::kThrowTypeError, context, message_id);
- Goto(&out);
+ Unreachable();
}
Bind(&out);
@@ -413,7 +419,6 @@ Node* PromiseBuiltinsAssembler::InternalPerformPromiseThen(
Node* context, Node* promise, Node* on_resolve, Node* on_reject,
Node* deferred_promise, Node* deferred_on_resolve,
Node* deferred_on_reject) {
- Node* const native_context = LoadNativeContext(context);
Variable var_on_resolve(this, MachineRepresentation::kTagged),
var_on_reject(this, MachineRepresentation::kTagged);
@@ -425,14 +430,16 @@ Node* PromiseBuiltinsAssembler::InternalPerformPromiseThen(
append_callbacks(this);
GotoIf(TaggedIsSmi(on_resolve), &if_onresolvenotcallable);
+ Isolate* isolate = this->isolate();
Node* const on_resolve_map = LoadMap(on_resolve);
Branch(IsCallableMap(on_resolve_map), &onrejectcheck,
&if_onresolvenotcallable);
Bind(&if_onresolvenotcallable);
{
- var_on_resolve.Bind(LoadContextElement(
- native_context, Context::PROMISE_ID_RESOLVE_HANDLER_INDEX));
+ Node* const default_resolve_handler_symbol = HeapConstant(
+ isolate->factory()->promise_default_resolve_handler_symbol());
+ var_on_resolve.Bind(default_resolve_handler_symbol);
Goto(&onrejectcheck);
}
@@ -447,8 +454,9 @@ Node* PromiseBuiltinsAssembler::InternalPerformPromiseThen(
Bind(&if_onrejectnotcallable);
{
- var_on_reject.Bind(LoadContextElement(
- native_context, Context::PROMISE_ID_REJECT_HANDLER_INDEX));
+ Node* const default_reject_handler_symbol = HeapConstant(
+ isolate->factory()->promise_default_reject_handler_symbol());
+ var_on_reject.Bind(default_reject_handler_symbol);
Goto(&append_callbacks);
}
}
@@ -457,8 +465,8 @@ Node* PromiseBuiltinsAssembler::InternalPerformPromiseThen(
{
Label fulfilled_check(this);
Node* const status = LoadObjectField(promise, JSPromise::kStatusOffset);
- GotoUnless(SmiEqual(status, SmiConstant(v8::Promise::kPending)),
- &fulfilled_check);
+ GotoIfNot(SmiEqual(status, SmiConstant(v8::Promise::kPending)),
+ &fulfilled_check);
Node* const existing_deferred_promise =
LoadObjectField(promise, JSPromise::kDeferredPromiseOffset);
@@ -561,15 +569,14 @@ Node* PromiseBuiltinsAssembler::InternalPerformPromiseThen(
{
Label reject(this);
Node* const result = LoadObjectField(promise, JSPromise::kResultOffset);
- GotoUnless(WordEqual(status, SmiConstant(v8::Promise::kFulfilled)),
- &reject);
+ GotoIfNot(WordEqual(status, SmiConstant(v8::Promise::kFulfilled)),
+ &reject);
Node* info = AllocatePromiseReactionJobInfo(
result, var_on_resolve.value(), deferred_promise, deferred_on_resolve,
deferred_on_reject, context);
// TODO(gsathya): Move this to TF
- CallRuntime(Runtime::kEnqueuePromiseReactionJob, context, promise, info,
- SmiConstant(v8::Promise::kFulfilled));
+ CallRuntime(Runtime::kEnqueuePromiseReactionJob, context, info);
Goto(&out);
Bind(&reject);
@@ -588,8 +595,7 @@ Node* PromiseBuiltinsAssembler::InternalPerformPromiseThen(
result, var_on_reject.value(), deferred_promise,
deferred_on_resolve, deferred_on_reject, context);
// TODO(gsathya): Move this to TF
- CallRuntime(Runtime::kEnqueuePromiseReactionJob, context, promise,
- info, SmiConstant(v8::Promise::kRejected));
+ CallRuntime(Runtime::kEnqueuePromiseReactionJob, context, info);
Goto(&out);
}
}
@@ -632,7 +638,7 @@ void PromiseBuiltinsAssembler::BranchIfFastPath(Node* native_context,
LoadObjectField(promise_fun, JSFunction::kPrototypeOrInitialMapOffset);
Node* const has_initialmap = WordEqual(map, initial_map);
- GotoUnless(has_initialmap, if_ismodified);
+ GotoIfNot(has_initialmap, if_ismodified);
Node* const initial_proto_initial_map =
LoadContextElement(native_context, Context::PROMISE_PROTOTYPE_MAP_INDEX);
@@ -656,9 +662,6 @@ Node* PromiseBuiltinsAssembler::AllocatePromiseResolveThenableJobInfo(
info, PromiseResolveThenableJobInfo::kResolveOffset, resolve);
StoreObjectFieldNoWriteBarrier(
info, PromiseResolveThenableJobInfo::kRejectOffset, reject);
- StoreObjectFieldNoWriteBarrier(info,
- PromiseResolveThenableJobInfo::kDebugIdOffset,
- SmiConstant(kDebugPromiseNoID));
StoreObjectFieldNoWriteBarrier(
info, PromiseResolveThenableJobInfo::kContextOffset, context);
return info;
@@ -676,7 +679,7 @@ void PromiseBuiltinsAssembler::InternalResolvePromise(Node* context,
if_rejectpromise(this, Label::kDeferred), out(this);
Label cycle_check(this);
- GotoUnless(IsPromiseHookEnabled(), &cycle_check);
+ GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &cycle_check);
CallRuntime(Runtime::kPromiseHookResolve, context, promise);
Goto(&cycle_check);
@@ -686,7 +689,7 @@ void PromiseBuiltinsAssembler::InternalResolvePromise(Node* context,
// 7. If Type(resolution) is not Object, then
GotoIf(TaggedIsSmi(result), &fulfill);
- GotoUnless(IsJSReceiver(result), &fulfill);
+ GotoIfNot(IsJSReceiver(result), &fulfill);
Label if_nativepromise(this), if_notnativepromise(this, Label::kDeferred);
Node* const native_context = LoadNativeContext(context);
@@ -706,8 +709,8 @@ void PromiseBuiltinsAssembler::InternalResolvePromise(Node* context,
LoadObjectField(result, JSPromise::kResultOffset);
Label if_isnotpending(this);
- GotoUnless(SmiEqual(SmiConstant(v8::Promise::kPending), thenable_status),
- &if_isnotpending);
+ GotoIfNot(SmiEqual(SmiConstant(v8::Promise::kPending), thenable_status),
+ &if_isnotpending);
// TODO(gsathya): Use a marker here instead of the actual then
// callback, and check for the marker in PromiseResolveThenableJob
@@ -765,7 +768,7 @@ void PromiseBuiltinsAssembler::InternalResolvePromise(Node* context,
// 11. If IsCallable(thenAction) is false, then
GotoIf(TaggedIsSmi(then), &fulfill);
Node* const then_map = LoadMap(then);
- GotoUnless(IsCallableMap(then_map), &fulfill);
+ GotoIfNot(IsCallableMap(then_map), &fulfill);
var_then.Bind(then);
Goto(&do_enqueue);
}
@@ -784,15 +787,10 @@ void PromiseBuiltinsAssembler::InternalResolvePromise(Node* context,
result, var_then.value(), resolve, reject, context);
Label enqueue(this);
- GotoUnless(IsDebugActive(), &enqueue);
-
- Node* const debug_id =
- CallRuntime(Runtime::kDebugNextAsyncTaskId, context, promise);
- StoreObjectField(info, PromiseResolveThenableJobInfo::kDebugIdOffset,
- debug_id);
+ GotoIfNot(IsDebugActive(), &enqueue);
GotoIf(TaggedIsSmi(result), &enqueue);
- GotoUnless(HasInstanceType(result, JS_PROMISE_TYPE), &enqueue);
+ GotoIfNot(HasInstanceType(result, JS_PROMISE_TYPE), &enqueue);
// Mark the dependency of the new promise on the resolution
Node* const key =
@@ -863,13 +861,12 @@ void PromiseBuiltinsAssembler::PromiseFulfill(
result, tasks, deferred_promise, deferred_on_resolve, deferred_on_reject,
context);
- CallRuntime(Runtime::kEnqueuePromiseReactionJob, context, promise, info,
- status_smi);
+ CallRuntime(Runtime::kEnqueuePromiseReactionJob, context, info);
Goto(&debug_async_event_enqueue_recurring);
Bind(&debug_async_event_enqueue_recurring);
{
- GotoUnless(IsDebugActive(), &do_promisereset);
+ GotoIfNot(IsDebugActive(), &do_promisereset);
CallRuntime(Runtime::kDebugAsyncEventEnqueueRecurring, context, promise,
status_smi);
Goto(&do_promisereset);
@@ -907,8 +904,8 @@ void PromiseBuiltinsAssembler::BranchIfAccessCheckFailed(
{
Node* executor_type = LoadInstanceType(var_executor.value());
GotoIf(InstanceTypeEqual(executor_type, JS_FUNCTION_TYPE), &found_function);
- GotoUnless(InstanceTypeEqual(executor_type, JS_BOUND_FUNCTION_TYPE),
- &call_runtime);
+ GotoIfNot(InstanceTypeEqual(executor_type, JS_BOUND_FUNCTION_TYPE),
+ &call_runtime);
var_executor.Bind(LoadObjectField(
var_executor.value(), JSBoundFunction::kBoundTargetFunctionOffset));
Goto(&loop_over_bound_function);
@@ -941,8 +938,8 @@ void PromiseBuiltinsAssembler::InternalPromiseReject(Node* context,
Node* promise, Node* value,
Node* debug_event) {
Label out(this);
- GotoUnless(IsDebugActive(), &out);
- GotoUnless(WordEqual(TrueConstant(), debug_event), &out);
+ GotoIfNot(IsDebugActive(), &out);
+ GotoIfNot(WordEqual(TrueConstant(), debug_event), &out);
CallRuntime(Runtime::kDebugPromiseReject, context, promise, value);
Goto(&out);
@@ -958,7 +955,7 @@ void PromiseBuiltinsAssembler::InternalPromiseReject(Node* context,
Label fulfill(this), report_unhandledpromise(this), run_promise_hook(this);
if (debug_event) {
- GotoUnless(IsDebugActive(), &run_promise_hook);
+ GotoIfNot(IsDebugActive(), &run_promise_hook);
CallRuntime(Runtime::kDebugPromiseReject, context, promise, value);
Goto(&run_promise_hook);
} else {
@@ -967,7 +964,7 @@ void PromiseBuiltinsAssembler::InternalPromiseReject(Node* context,
Bind(&run_promise_hook);
{
- GotoUnless(IsPromiseHookEnabled(), &report_unhandledpromise);
+ GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &report_unhandledpromise);
CallRuntime(Runtime::kPromiseHookResolve, context, promise);
Goto(&report_unhandledpromise);
}
@@ -1032,7 +1029,7 @@ TF_BUILTIN(PromiseConstructor, PromiseBuiltinsAssembler) {
GotoIf(TaggedIsSmi(executor), &if_notcallable);
Node* const executor_map = LoadMap(executor);
- GotoUnless(IsCallableMap(executor_map), &if_notcallable);
+ GotoIfNot(IsCallableMap(executor_map), &if_notcallable);
Node* const native_context = LoadNativeContext(context);
Node* const promise_fun =
@@ -1067,7 +1064,7 @@ TF_BUILTIN(PromiseConstructor, PromiseBuiltinsAssembler) {
PromiseInit(instance);
var_result.Bind(instance);
- GotoUnless(IsPromiseHookEnabled(), &debug_push);
+ GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &debug_push);
CallRuntime(Runtime::kPromiseHookInit, context, instance,
UndefinedConstant());
Goto(&debug_push);
@@ -1075,7 +1072,7 @@ TF_BUILTIN(PromiseConstructor, PromiseBuiltinsAssembler) {
Bind(&debug_push);
{
- GotoUnless(is_debug_active, &run_executor);
+ GotoIfNot(is_debug_active, &run_executor);
CallRuntime(Runtime::kDebugPushPromise, context, var_result.value());
Goto(&run_executor);
}
@@ -1117,7 +1114,7 @@ TF_BUILTIN(PromiseConstructor, PromiseBuiltinsAssembler) {
{
Node* const message_id = SmiConstant(MessageTemplate::kNotAPromise);
CallRuntime(Runtime::kThrowTypeError, context, message_id, new_target);
- Return(UndefinedConstant()); // Never reached.
+ Unreachable();
}
// 2. If IsCallable(executor) is false, throw a TypeError exception.
@@ -1126,7 +1123,7 @@ TF_BUILTIN(PromiseConstructor, PromiseBuiltinsAssembler) {
Node* const message_id =
SmiConstant(MessageTemplate::kResolverNotAFunction);
CallRuntime(Runtime::kThrowTypeError, context, message_id, executor);
- Return(UndefinedConstant()); // Never reached.
+ Unreachable();
}
// Silently fail if the stack looks fishy.
@@ -1159,23 +1156,6 @@ TF_BUILTIN(IsPromise, PromiseBuiltinsAssembler) {
Return(FalseConstant());
}
-TF_BUILTIN(PerformPromiseThen, PromiseBuiltinsAssembler) {
- Node* const promise = Parameter(1);
- Node* const on_resolve = Parameter(2);
- Node* const on_reject = Parameter(3);
- Node* const deferred_promise = Parameter(4);
- Node* const context = Parameter(7);
-
- // No deferred_on_resolve/deferred_on_reject because this is just an
- // internal promise created by async-await.
- Node* const result = InternalPerformPromiseThen(
- context, promise, on_resolve, on_reject, deferred_promise,
- UndefinedConstant(), UndefinedConstant());
-
- // TODO(gsathya): This is unused, but value is returned according to spec.
- Return(result);
-}
-
// ES#sec-promise.prototype.then
// Promise.prototype.catch ( onFulfilled, onRejected )
TF_BUILTIN(PromiseThen, PromiseBuiltinsAssembler) {
@@ -1273,38 +1253,68 @@ TF_BUILTIN(PromiseHandle, PromiseBuiltinsAssembler) {
Label run_handler(this), if_rejectpromise(this), promisehook_before(this),
promisehook_after(this), debug_pop(this);
- GotoUnless(is_debug_active, &promisehook_before);
+ GotoIfNot(is_debug_active, &promisehook_before);
CallRuntime(Runtime::kDebugPushPromise, context, deferred_promise);
Goto(&promisehook_before);
Bind(&promisehook_before);
{
- GotoUnless(IsPromiseHookEnabled(), &run_handler);
+ GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &run_handler);
CallRuntime(Runtime::kPromiseHookBefore, context, deferred_promise);
Goto(&run_handler);
}
Bind(&run_handler);
{
- Callable call_callable = CodeFactory::Call(isolate);
- Node* const result =
- CallJS(call_callable, context, handler, UndefinedConstant(), value);
+ Label if_defaulthandler(this), if_callablehandler(this),
+ if_internalhandler(this), if_customhandler(this, Label::kDeferred);
+ Variable var_result(this, MachineRepresentation::kTagged);
+
+ Branch(IsSymbol(handler), &if_defaulthandler, &if_callablehandler);
+
+ Bind(&if_defaulthandler);
+ {
+ Label if_resolve(this), if_reject(this);
+ Node* const default_resolve_handler_symbol = HeapConstant(
+ isolate->factory()->promise_default_resolve_handler_symbol());
+ Branch(WordEqual(default_resolve_handler_symbol, handler), &if_resolve,
+ &if_reject);
+
+ Bind(&if_resolve);
+ {
+ var_result.Bind(value);
+ Branch(IsUndefined(deferred_on_resolve), &if_internalhandler,
+ &if_customhandler);
+ }
- GotoIfException(result, &if_rejectpromise, &var_reason);
+ Bind(&if_reject);
+ {
+ var_reason.Bind(value);
+ Goto(&if_rejectpromise);
+ }
+ }
- Label if_internalhandler(this), if_customhandler(this, Label::kDeferred);
- Branch(IsUndefined(deferred_on_resolve), &if_internalhandler,
- &if_customhandler);
+ Bind(&if_callablehandler);
+ {
+ Callable call_callable = CodeFactory::Call(isolate);
+ Node* const result =
+ CallJS(call_callable, context, handler, UndefinedConstant(), value);
+ var_result.Bind(result);
+ GotoIfException(result, &if_rejectpromise, &var_reason);
+ Branch(IsUndefined(deferred_on_resolve), &if_internalhandler,
+ &if_customhandler);
+ }
Bind(&if_internalhandler);
- InternalResolvePromise(context, deferred_promise, result);
+ InternalResolvePromise(context, deferred_promise, var_result.value());
Goto(&promisehook_after);
Bind(&if_customhandler);
{
+ Callable call_callable = CodeFactory::Call(isolate);
Node* const maybe_exception =
CallJS(call_callable, context, deferred_on_resolve,
- UndefinedConstant(), result);
+ UndefinedConstant(), var_result.value());
GotoIfException(maybe_exception, &if_rejectpromise, &var_reason);
Goto(&promisehook_after);
}
@@ -1320,7 +1330,7 @@ TF_BUILTIN(PromiseHandle, PromiseBuiltinsAssembler) {
Bind(&promisehook_after);
{
- GotoUnless(IsPromiseHookEnabled(), &debug_pop);
+ GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &debug_pop);
CallRuntime(Runtime::kPromiseHookAfter, context, deferred_promise);
Goto(&debug_pop);
}
@@ -1329,7 +1339,7 @@ TF_BUILTIN(PromiseHandle, PromiseBuiltinsAssembler) {
{
Label out(this);
- GotoUnless(is_debug_active, &out);
+ GotoIfNot(is_debug_active, &out);
CallRuntime(Runtime::kDebugPopPromise, context);
Goto(&out);
@@ -1390,7 +1400,7 @@ TF_BUILTIN(PromiseResolve, PromiseBuiltinsAssembler) {
GotoIf(TaggedIsSmi(value), &if_valueisnotpromise);
// This shortcircuits the constructor lookups.
- GotoUnless(HasInstanceType(value, JS_PROMISE_TYPE), &if_valueisnotpromise);
+ GotoIfNot(HasInstanceType(value, JS_PROMISE_TYPE), &if_valueisnotpromise);
// This adds a fast path as non-subclassed native promises don't have
// an observable constructor lookup.
@@ -1402,7 +1412,7 @@ TF_BUILTIN(PromiseResolve, PromiseBuiltinsAssembler) {
Bind(&if_valueisnativepromise);
{
- GotoUnless(WordEqual(promise_fun, receiver), &if_valueisnotnativepromise);
+ GotoIfNot(WordEqual(promise_fun, receiver), &if_valueisnotnativepromise);
Return(value);
}
@@ -1419,8 +1429,7 @@ TF_BUILTIN(PromiseResolve, PromiseBuiltinsAssembler) {
CallStub(getproperty_callable, context, value, constructor_str);
// 3.b If SameValue(xConstructor, C) is true, return x.
- GotoUnless(SameValue(constructor, receiver, context),
- &if_valueisnotpromise);
+ GotoIfNot(SameValue(constructor, receiver, context), &if_valueisnotpromise);
Return(value);
}
@@ -1485,7 +1494,8 @@ TF_BUILTIN(PromiseGetCapabilitiesExecutor, PromiseBuiltinsAssembler) {
Bind(&if_alreadyinvoked);
Node* message = SmiConstant(MessageTemplate::kPromiseExecutorAlreadyInvoked);
- Return(CallRuntime(Runtime::kThrowTypeError, context, message));
+ CallRuntime(Runtime::kThrowTypeError, context, message);
+ Unreachable();
}
TF_BUILTIN(NewPromiseCapability, PromiseBuiltinsAssembler) {
@@ -1552,5 +1562,221 @@ TF_BUILTIN(InternalPromiseReject, PromiseBuiltinsAssembler) {
Return(UndefinedConstant());
}
+Node* PromiseBuiltinsAssembler::CreatePromiseFinallyContext(
+ Node* on_finally, Node* native_context) {
+ Node* const context =
+ CreatePromiseContext(native_context, kOnFinallyContextLength);
+ StoreContextElementNoWriteBarrier(context, kOnFinallySlot, on_finally);
+ return context;
+}
+
+std::pair<Node*, Node*> PromiseBuiltinsAssembler::CreatePromiseFinallyFunctions(
+ Node* on_finally, Node* native_context) {
+ Node* const promise_context =
+ CreatePromiseFinallyContext(on_finally, native_context);
+ Node* const map = LoadContextElement(
+ native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
+ Node* const then_finally_info = LoadContextElement(
+ native_context, Context::PROMISE_THEN_FINALLY_SHARED_FUN);
+ Node* const then_finally = AllocateFunctionWithMapAndContext(
+ map, then_finally_info, promise_context);
+ Node* const catch_finally_info = LoadContextElement(
+ native_context, Context::PROMISE_CATCH_FINALLY_SHARED_FUN);
+ Node* const catch_finally = AllocateFunctionWithMapAndContext(
+ map, catch_finally_info, promise_context);
+ return std::make_pair(then_finally, catch_finally);
+}
+
+TF_BUILTIN(PromiseValueThunkFinally, PromiseBuiltinsAssembler) {
+ Node* const context = Parameter(3);
+
+ Node* const value = LoadContextElement(context, kOnFinallySlot);
+ Return(value);
+}
+
+Node* PromiseBuiltinsAssembler::CreateValueThunkFunctionContext(
+ Node* value, Node* native_context) {
+ Node* const context =
+ CreatePromiseContext(native_context, kOnFinallyContextLength);
+ StoreContextElementNoWriteBarrier(context, kOnFinallySlot, value);
+ return context;
+}
+
+Node* PromiseBuiltinsAssembler::CreateValueThunkFunction(Node* value,
+ Node* native_context) {
+ Node* const value_thunk_context =
+ CreateValueThunkFunctionContext(value, native_context);
+ Node* const map = LoadContextElement(
+ native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
+ Node* const value_thunk_info = LoadContextElement(
+ native_context, Context::PROMISE_VALUE_THUNK_FINALLY_SHARED_FUN);
+ Node* const value_thunk = AllocateFunctionWithMapAndContext(
+ map, value_thunk_info, value_thunk_context);
+ return value_thunk;
+}
+
+TF_BUILTIN(PromiseThenFinally, PromiseBuiltinsAssembler) {
+ CSA_ASSERT_JS_ARGC_EQ(this, 1);
+
+ Node* const value = Parameter(1);
+ Node* const context = Parameter(4);
+
+ Node* const on_finally = LoadContextElement(context, kOnFinallySlot);
+
+ // 2.a Let result be ? Call(onFinally, undefined).
+ Callable call_callable = CodeFactory::Call(isolate());
+ Node* result =
+ CallJS(call_callable, context, on_finally, UndefinedConstant());
+
+ // 2.b Let promise be ! PromiseResolve( %Promise%, result).
+ Node* const promise = AllocateAndInitJSPromise(context);
+ InternalResolvePromise(context, promise, result);
+
+ // 2.c Let valueThunk be equivalent to a function that returns value.
+ Node* native_context = LoadNativeContext(context);
+ Node* const value_thunk = CreateValueThunkFunction(value, native_context);
+
+ // 2.d Let promiseCapability be ! NewPromiseCapability( %Promise%).
+ Node* const promise_capability = AllocateAndInitJSPromise(context, promise);
+
+ // 2.e Return PerformPromiseThen(promise, valueThunk, undefined,
+ // promiseCapability).
+ InternalPerformPromiseThen(context, promise, value_thunk, UndefinedConstant(),
+ promise_capability, UndefinedConstant(),
+ UndefinedConstant());
+ Return(promise_capability);
+}
+
+TF_BUILTIN(PromiseThrowerFinally, PromiseBuiltinsAssembler) {
+ Node* const context = Parameter(3);
+
+ Node* const reason = LoadContextElement(context, kOnFinallySlot);
+ CallRuntime(Runtime::kThrow, context, reason);
+ Unreachable();
+}
+
+Node* PromiseBuiltinsAssembler::CreateThrowerFunctionContext(
+ Node* reason, Node* native_context) {
+ Node* const context =
+ CreatePromiseContext(native_context, kOnFinallyContextLength);
+ StoreContextElementNoWriteBarrier(context, kOnFinallySlot, reason);
+ return context;
+}
+
+Node* PromiseBuiltinsAssembler::CreateThrowerFunction(Node* reason,
+ Node* native_context) {
+ Node* const thrower_context =
+ CreateThrowerFunctionContext(reason, native_context);
+ Node* const map = LoadContextElement(
+ native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
+ Node* const thrower_info = LoadContextElement(
+ native_context, Context::PROMISE_THROWER_FINALLY_SHARED_FUN);
+ Node* const thrower =
+ AllocateFunctionWithMapAndContext(map, thrower_info, thrower_context);
+ return thrower;
+}
+
+TF_BUILTIN(PromiseCatchFinally, PromiseBuiltinsAssembler) {
+ CSA_ASSERT_JS_ARGC_EQ(this, 1);
+
+ Node* const reason = Parameter(1);
+ Node* const context = Parameter(4);
+
+ Node* const on_finally = LoadContextElement(context, kOnFinallySlot);
+
+ // 2.a Let result be ? Call(onFinally, undefined).
+ Callable call_callable = CodeFactory::Call(isolate());
+ Node* result =
+ CallJS(call_callable, context, on_finally, UndefinedConstant());
+
+ // 2.b Let promise be ! PromiseResolve( %Promise%, result).
+ Node* const promise = AllocateAndInitJSPromise(context);
+ InternalResolvePromise(context, promise, result);
+
+ // 2.c Let thrower be equivalent to a function that throws reason.
+ Node* native_context = LoadNativeContext(context);
+ Node* const thrower = CreateThrowerFunction(reason, native_context);
+
+ // 2.d Let promiseCapability be ! NewPromiseCapability( %Promise%).
+ Node* const promise_capability = AllocateAndInitJSPromise(context, promise);
+
+ // 2.e Return PerformPromiseThen(promise, thrower, undefined,
+ // promiseCapability).
+ InternalPerformPromiseThen(context, promise, thrower, UndefinedConstant(),
+ promise_capability, UndefinedConstant(),
+ UndefinedConstant());
+ Return(promise_capability);
+}
+
+TF_BUILTIN(PromiseFinally, PromiseBuiltinsAssembler) {
+ CSA_ASSERT_JS_ARGC_EQ(this, 1);
+
+ // 1. Let promise be the this value.
+ Node* const promise = Parameter(0);
+ Node* const on_finally = Parameter(1);
+ Node* const context = Parameter(4);
+
+ // 2. If IsPromise(promise) is false, throw a TypeError exception.
+ ThrowIfNotInstanceType(context, promise, JS_PROMISE_TYPE,
+ "Promise.prototype.finally");
+
+ Variable var_then_finally(this, MachineRepresentation::kTagged),
+ var_catch_finally(this, MachineRepresentation::kTagged);
+
+ Label if_notcallable(this, Label::kDeferred), perform_finally(this);
+
+ // 3. Let thenFinally be ! CreateThenFinally(onFinally).
+ // 4. Let catchFinally be ! CreateCatchFinally(onFinally).
+ GotoIf(TaggedIsSmi(on_finally), &if_notcallable);
+ Node* const on_finally_map = LoadMap(on_finally);
+ GotoIfNot(IsCallableMap(on_finally_map), &if_notcallable);
+
+ Node* const native_context = LoadNativeContext(context);
+ Node* then_finally = nullptr;
+ Node* catch_finally = nullptr;
+ std::tie(then_finally, catch_finally) =
+ CreatePromiseFinallyFunctions(on_finally, native_context);
+ var_then_finally.Bind(then_finally);
+ var_catch_finally.Bind(catch_finally);
+ Goto(&perform_finally);
+
+ Bind(&if_notcallable);
+ {
+ var_then_finally.Bind(on_finally);
+ var_catch_finally.Bind(on_finally);
+ Goto(&perform_finally);
+ }
+
+ // 5. Return PerformPromiseThen(promise, valueThunk, undefined,
+ // promiseCapability).
+ Bind(&perform_finally);
+ Label if_nativepromise(this), if_custompromise(this, Label::kDeferred);
+ BranchIfFastPath(context, promise, &if_nativepromise, &if_custompromise);
+
+ Bind(&if_nativepromise);
+ {
+ Node* deferred_promise = AllocateAndInitJSPromise(context, promise);
+ InternalPerformPromiseThen(context, promise, var_then_finally.value(),
+ var_catch_finally.value(), deferred_promise,
+ UndefinedConstant(), UndefinedConstant());
+ Return(deferred_promise);
+ }
+
+ Bind(&if_custompromise);
+ {
+ Isolate* isolate = this->isolate();
+ Node* const then_str = HeapConstant(isolate->factory()->then_string());
+ Callable getproperty_callable = CodeFactory::GetProperty(isolate);
+ Node* const then =
+ CallStub(getproperty_callable, context, promise, then_str);
+ Callable call_callable = CodeFactory::Call(isolate);
+ // 5. Return ? Invoke(promise, "then", « thenFinally, catchFinally »).
+ Node* const result =
+ CallJS(call_callable, context, then, promise, var_then_finally.value(),
+ var_catch_finally.value());
+ Return(result);
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-promise.h b/deps/v8/src/builtins/builtins-promise.h
index dee9a075a2..df011822ff 100644
--- a/deps/v8/src/builtins/builtins-promise.h
+++ b/deps/v8/src/builtins/builtins-promise.h
@@ -2,6 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#ifndef V8_BUILTINS_BUILTINS_PROMISE_H_
+#define V8_BUILTINS_BUILTINS_PROMISE_H_
+
#include "src/code-stub-assembler.h"
#include "src/contexts.h"
@@ -33,6 +36,18 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
kCapabilitiesContextLength,
};
+ // This is used by the PromiseThenFinally and PromiseCatchFinally
+ // builtins to store the onFinally in the onFinallySlot.
+ //
+ // This is also used by the PromiseValueThunkFinally to store the
+ // value in the onFinallySlot and PromiseThrowerFinally to store the
+ // reason in the onFinallySlot.
+ enum PromiseFinallyContextSlot {
+ kOnFinallySlot = Context::MIN_CONTEXT_SLOTS,
+
+ kOnFinallyContextLength,
+ };
+
explicit PromiseBuiltinsAssembler(CodeAssemblerState* state)
: CodeStubAssembler(state) {}
// These allocate and initialize a promise with pending state and
@@ -78,6 +93,7 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
Node* default_constructor);
void PromiseSetHasHandler(Node* promise);
+ void PromiseSetHandledHint(Node* promise);
void AppendPromiseCallback(int offset, compiler::Node* promise,
compiler::Node* value);
@@ -111,6 +127,15 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
bool debug_event);
void InternalPromiseReject(Node* context, Node* promise, Node* value,
Node* debug_event);
+ std::pair<Node*, Node*> CreatePromiseFinallyFunctions(Node* on_finally,
+ Node* native_context);
+ Node* CreatePromiseFinallyContext(Node* on_finally, Node* native_context);
+
+ Node* CreateValueThunkFunction(Node* value, Node* native_context);
+ Node* CreateValueThunkFunctionContext(Node* value, Node* native_context);
+
+ Node* CreateThrowerFunctionContext(Node* reason, Node* native_context);
+ Node* CreateThrowerFunction(Node* reason, Node* native_context);
private:
Node* AllocateJSPromise(Node* context);
@@ -118,3 +143,5 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
} // namespace internal
} // namespace v8
+
+#endif // V8_BUILTINS_BUILTINS_PROMISE_H_
diff --git a/deps/v8/src/builtins/builtins-proxy.cc b/deps/v8/src/builtins/builtins-proxy.cc
index 05ba3041a7..db6f7b57c9 100644
--- a/deps/v8/src/builtins/builtins-proxy.cc
+++ b/deps/v8/src/builtins/builtins-proxy.cc
@@ -5,6 +5,9 @@
#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
+#include "src/counters.h"
+#include "src/objects-inl.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-reflect.cc b/deps/v8/src/builtins/builtins-reflect.cc
index 64947b1f77..9b29634629 100644
--- a/deps/v8/src/builtins/builtins-reflect.cc
+++ b/deps/v8/src/builtins/builtins-reflect.cc
@@ -5,6 +5,10 @@
#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
+#include "src/counters.h"
+#include "src/keys.h"
+#include "src/lookup.h"
+#include "src/objects-inl.h"
#include "src/property-descriptor.h"
namespace v8 {
diff --git a/deps/v8/src/builtins/builtins-regexp.cc b/deps/v8/src/builtins/builtins-regexp.cc
index 2191268441..f76136b806 100644
--- a/deps/v8/src/builtins/builtins-regexp.cc
+++ b/deps/v8/src/builtins/builtins-regexp.cc
@@ -2,11 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/builtins/builtins-regexp.h"
+
#include "src/builtins/builtins-constructor.h"
#include "src/builtins/builtins-utils.h"
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
#include "src/code-stub-assembler.h"
+#include "src/counters.h"
+#include "src/objects-inl.h"
+#include "src/objects/regexp-match-info.h"
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-utils.h"
#include "src/string-builder.h"
@@ -14,81 +19,8 @@
namespace v8 {
namespace internal {
-typedef compiler::Node Node;
typedef CodeStubAssembler::ParameterMode ParameterMode;
-typedef compiler::CodeAssemblerState CodeAssemblerState;
-class RegExpBuiltinsAssembler : public CodeStubAssembler {
- public:
- explicit RegExpBuiltinsAssembler(CodeAssemblerState* state)
- : CodeStubAssembler(state) {}
-
- protected:
- Node* FastLoadLastIndex(Node* regexp);
- Node* SlowLoadLastIndex(Node* context, Node* regexp);
- Node* LoadLastIndex(Node* context, Node* regexp, bool is_fastpath);
-
- void FastStoreLastIndex(Node* regexp, Node* value);
- void SlowStoreLastIndex(Node* context, Node* regexp, Node* value);
- void StoreLastIndex(Node* context, Node* regexp, Node* value,
- bool is_fastpath);
-
- Node* ConstructNewResultFromMatchInfo(Node* context, Node* match_info,
- Node* string);
-
- Node* RegExpPrototypeExecBodyWithoutResult(Node* const context,
- Node* const regexp,
- Node* const string,
- Label* if_didnotmatch,
- const bool is_fastpath);
- Node* RegExpPrototypeExecBody(Node* const context, Node* const regexp,
- Node* const string, const bool is_fastpath);
-
- Node* ThrowIfNotJSReceiver(Node* context, Node* maybe_receiver,
- MessageTemplate::Template msg_template,
- char const* method_name);
-
- Node* IsInitialRegExpMap(Node* context, Node* map);
- void BranchIfFastRegExp(Node* context, Node* map, Label* if_isunmodified,
- Label* if_ismodified);
- void BranchIfFastRegExpResult(Node* context, Node* map,
- Label* if_isunmodified, Label* if_ismodified);
-
- Node* FlagsGetter(Node* const context, Node* const regexp, bool is_fastpath);
-
- Node* FastFlagGetter(Node* const regexp, JSRegExp::Flag flag);
- Node* SlowFlagGetter(Node* const context, Node* const regexp,
- JSRegExp::Flag flag);
- Node* FlagGetter(Node* const context, Node* const regexp, JSRegExp::Flag flag,
- bool is_fastpath);
- void FlagGetter(JSRegExp::Flag flag, v8::Isolate::UseCounterFeature counter,
- const char* method_name);
-
- Node* IsRegExp(Node* const context, Node* const maybe_receiver);
- Node* RegExpInitialize(Node* const context, Node* const regexp,
- Node* const maybe_pattern, Node* const maybe_flags);
-
- Node* RegExpExec(Node* context, Node* regexp, Node* string);
-
- Node* AdvanceStringIndex(Node* const string, Node* const index,
- Node* const is_unicode);
-
- void RegExpPrototypeMatchBody(Node* const context, Node* const regexp,
- Node* const string, const bool is_fastpath);
-
- void RegExpPrototypeSearchBodyFast(Node* const context, Node* const regexp,
- Node* const string);
- void RegExpPrototypeSearchBodySlow(Node* const context, Node* const regexp,
- Node* const string);
-
- void RegExpPrototypeSplitBody(Node* const context, Node* const regexp,
- Node* const string, Node* const limit);
-
- Node* ReplaceGlobalCallableFastPath(Node* context, Node* regexp, Node* string,
- Node* replace_callable);
- Node* ReplaceSimpleStringFastPath(Node* context, Node* regexp, Node* string,
- Node* replace_string);
-};
// -----------------------------------------------------------------------------
// ES6 section 21.2 RegExp Objects
@@ -141,10 +73,10 @@ void RegExpBuiltinsAssembler::StoreLastIndex(Node* context, Node* regexp,
}
}
-Node* RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(Node* context,
- Node* match_info,
- Node* string) {
- Label out(this);
+Node* RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
+ Node* const context, Node* const regexp, Node* const match_info,
+ Node* const string) {
+ Label named_captures(this), out(this);
Node* const num_indices = SmiUntag(LoadFixedArrayElement(
match_info, RegExpMatchInfo::kNumberOfCapturesIndex));
@@ -164,17 +96,18 @@ Node* RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(Node* context,
StoreFixedArrayElement(result_elements, 0, first, SKIP_WRITE_BARRIER);
- GotoIf(SmiEqual(num_results, SmiConstant(Smi::FromInt(1))), &out);
+ // If no captures exist we can skip named capture handling as well.
+ GotoIf(SmiEqual(num_results, SmiConstant(1)), &out);
// Store all remaining captures.
Node* const limit = IntPtrAdd(
IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex), num_indices);
- Variable var_from_cursor(this, MachineType::PointerRepresentation());
- Variable var_to_cursor(this, MachineType::PointerRepresentation());
-
- var_from_cursor.Bind(IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex + 2));
- var_to_cursor.Bind(IntPtrConstant(1));
+ Variable var_from_cursor(
+ this, MachineType::PointerRepresentation(),
+ IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex + 2));
+ Variable var_to_cursor(this, MachineType::PointerRepresentation(),
+ IntPtrConstant(1));
Variable* vars[] = {&var_from_cursor, &var_to_cursor};
Label loop(this, 2, vars);
@@ -187,7 +120,7 @@ Node* RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(Node* context,
Node* const start = LoadFixedArrayElement(match_info, from_cursor);
Label next_iter(this);
- GotoIf(SmiEqual(start, SmiConstant(Smi::FromInt(-1))), &next_iter);
+ GotoIf(SmiEqual(start, SmiConstant(-1)), &next_iter);
Node* const from_cursor_plus1 = IntPtrAdd(from_cursor, IntPtrConstant(1));
Node* const end = LoadFixedArrayElement(match_info, from_cursor_plus1);
@@ -199,7 +132,83 @@ Node* RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(Node* context,
Bind(&next_iter);
var_from_cursor.Bind(IntPtrAdd(from_cursor, IntPtrConstant(2)));
var_to_cursor.Bind(IntPtrAdd(to_cursor, IntPtrConstant(1)));
- Branch(UintPtrLessThan(var_from_cursor.value(), limit), &loop, &out);
+ Branch(UintPtrLessThan(var_from_cursor.value(), limit), &loop,
+ &named_captures);
+ }
+
+ Bind(&named_captures);
+ {
+ // We reach this point only if captures exist, implying that this is an
+ // IRREGEXP JSRegExp.
+
+ CSA_ASSERT(this, HasInstanceType(regexp, JS_REGEXP_TYPE));
+ CSA_ASSERT(this, SmiGreaterThan(num_results, SmiConstant(1)));
+
+ // Preparations for named capture properties. Exit early if the result does
+ // not have any named captures to minimize performance impact.
+
+ Node* const data = LoadObjectField(regexp, JSRegExp::kDataOffset);
+ CSA_ASSERT(this, SmiEqual(LoadFixedArrayElement(data, JSRegExp::kTagIndex),
+ SmiConstant(JSRegExp::IRREGEXP)));
+
+ // The names fixed array associates names at even indices with a capture
+ // index at odd indices.
+ Node* const names =
+ LoadFixedArrayElement(data, JSRegExp::kIrregexpCaptureNameMapIndex);
+ GotoIf(SmiEqual(names, SmiConstant(0)), &out);
+
+ // Allocate a new object to store the named capture properties.
+ // TODO(jgruber): Could be optimized by adding the object map to the heap
+ // root list.
+
+ Node* const native_context = LoadNativeContext(context);
+ Node* const map = LoadContextElement(
+ native_context, Context::SLOW_OBJECT_WITH_NULL_PROTOTYPE_MAP);
+ Node* const properties =
+ AllocateNameDictionary(NameDictionary::kInitialCapacity);
+
+ Node* const group_object = AllocateJSObjectFromMap(map, properties);
+
+ // Store it on the result as a 'group' property.
+
+ {
+ Node* const name = HeapConstant(isolate()->factory()->group_string());
+ CallRuntime(Runtime::kCreateDataProperty, context, result, name,
+ group_object);
+ }
+
+ // One or more named captures exist, add a property for each one.
+
+ CSA_ASSERT(this, HasInstanceType(names, FIXED_ARRAY_TYPE));
+ Node* const names_length = LoadAndUntagFixedArrayBaseLength(names);
+ CSA_ASSERT(this, IntPtrGreaterThan(names_length, IntPtrConstant(0)));
+
+ Variable var_i(this, MachineType::PointerRepresentation());
+ var_i.Bind(IntPtrConstant(0));
+
+ Variable* vars[] = {&var_i};
+ const int vars_count = sizeof(vars) / sizeof(vars[0]);
+ Label loop(this, vars_count, vars);
+
+ Goto(&loop);
+ Bind(&loop);
+ {
+ Node* const i = var_i.value();
+ Node* const i_plus_1 = IntPtrAdd(i, IntPtrConstant(1));
+ Node* const i_plus_2 = IntPtrAdd(i_plus_1, IntPtrConstant(1));
+
+ Node* const name = LoadFixedArrayElement(names, i);
+ Node* const index = LoadFixedArrayElement(names, i_plus_1);
+ Node* const capture =
+ LoadFixedArrayElement(result_elements, SmiUntag(index));
+
+ CallRuntime(Runtime::kCreateDataProperty, context, group_object, name,
+ capture);
+
+ var_i.Bind(i_plus_2);
+ Branch(IntPtrGreaterThanOrEqual(var_i.value(), names_length), &out,
+ &loop);
+ }
}
Bind(&out);
@@ -232,8 +241,31 @@ Node* RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResult(
Variable var_result(this, MachineRepresentation::kTagged);
Label out(this);
- Node* const native_context = LoadNativeContext(context);
- Node* const string_length = LoadStringLength(string);
+ // Load lastIndex.
+ Variable var_lastindex(this, MachineRepresentation::kTagged);
+ {
+ Node* const regexp_lastindex = LoadLastIndex(context, regexp, is_fastpath);
+ var_lastindex.Bind(regexp_lastindex);
+
+ if (is_fastpath) {
+ // ToLength on a positive smi is a nop and can be skipped.
+ CSA_ASSERT(this, TaggedIsPositiveSmi(regexp_lastindex));
+ } else {
+ // Omit ToLength if lastindex is a non-negative smi.
+ Label call_tolength(this, Label::kDeferred), next(this);
+ Branch(TaggedIsPositiveSmi(regexp_lastindex), &next, &call_tolength);
+
+ Bind(&call_tolength);
+ {
+ Callable tolength_callable = CodeFactory::ToLength(isolate);
+ var_lastindex.Bind(
+ CallStub(tolength_callable, context, regexp_lastindex));
+ Goto(&next);
+ }
+
+ Bind(&next);
+ }
+ }
// Check whether the regexp is global or sticky, which determines whether we
// update last index later on.
@@ -245,38 +277,18 @@ Node* RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResult(
// Grab and possibly update last index.
Label run_exec(this);
- Variable var_lastindex(this, MachineRepresentation::kTagged);
{
Label if_doupdate(this), if_dontupdate(this);
Branch(should_update_last_index, &if_doupdate, &if_dontupdate);
Bind(&if_doupdate);
{
- Node* const regexp_lastindex =
- LoadLastIndex(context, regexp, is_fastpath);
- var_lastindex.Bind(regexp_lastindex);
-
- // Omit ToLength if lastindex is a non-negative smi.
- {
- Label call_tolength(this, Label::kDeferred), next(this);
- Branch(TaggedIsPositiveSmi(regexp_lastindex), &next, &call_tolength);
-
- Bind(&call_tolength);
- {
- Callable tolength_callable = CodeFactory::ToLength(isolate);
- var_lastindex.Bind(
- CallStub(tolength_callable, context, regexp_lastindex));
- Goto(&next);
- }
-
- Bind(&next);
- }
-
Node* const lastindex = var_lastindex.value();
Label if_isoob(this, Label::kDeferred);
- GotoUnless(TaggedIsSmi(lastindex), &if_isoob);
- GotoUnless(SmiLessThanOrEqual(lastindex, string_length), &if_isoob);
+ GotoIfNot(TaggedIsSmi(lastindex), &if_isoob);
+ Node* const string_length = LoadStringLength(string);
+ GotoIfNot(SmiLessThanOrEqual(lastindex, string_length), &if_isoob);
Goto(&run_exec);
Bind(&if_isoob);
@@ -299,6 +311,7 @@ Node* RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResult(
Bind(&run_exec);
{
// Get last match info from the context.
+ Node* const native_context = LoadNativeContext(context);
Node* const last_match_info = LoadContextElement(
native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
@@ -310,9 +323,9 @@ Node* RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResult(
// {match_indices} is either null or the RegExpMatchInfo array.
// Return early if exec failed, possibly updating last index.
- GotoUnless(WordEqual(match_indices, null), &successful_match);
+ GotoIfNot(WordEqual(match_indices, null), &successful_match);
- GotoUnless(should_update_last_index, if_didnotmatch);
+ GotoIfNot(should_update_last_index, if_didnotmatch);
StoreLastIndex(context, regexp, smi_zero, is_fastpath);
Goto(if_didnotmatch);
@@ -320,7 +333,7 @@ Node* RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResult(
Bind(&successful_match);
{
- GotoUnless(should_update_last_index, &out);
+ GotoIfNot(should_update_last_index, &out);
// Update the new last index from {match_indices}.
Node* const new_lastindex = LoadFixedArrayElement(
@@ -352,7 +365,7 @@ Node* RegExpBuiltinsAssembler::RegExpPrototypeExecBody(Node* const context,
{
Node* const match_indices = indices_or_null;
Node* const result =
- ConstructNewResultFromMatchInfo(context, match_indices, string);
+ ConstructNewResultFromMatchInfo(context, regexp, match_indices, string);
var_result.Bind(result);
Goto(&out);
}
@@ -393,15 +406,18 @@ Node* RegExpBuiltinsAssembler::ThrowIfNotJSReceiver(
CallRuntime(Runtime::kThrowTypeError, context, message_id, method_name_str,
value_str);
- var_value_map.Bind(UndefinedConstant());
- Goto(&out); // Never reached.
+ Unreachable();
}
Bind(&out);
return var_value_map.value();
}
-Node* RegExpBuiltinsAssembler::IsInitialRegExpMap(Node* context, Node* map) {
+Node* RegExpBuiltinsAssembler::IsInitialRegExpMap(Node* context, Node* object,
+ Node* map) {
+ Label out(this);
+ Variable var_result(this, MachineRepresentation::kWord32);
+
Node* const native_context = LoadNativeContext(context);
Node* const regexp_fun =
LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
@@ -409,16 +425,33 @@ Node* RegExpBuiltinsAssembler::IsInitialRegExpMap(Node* context, Node* map) {
LoadObjectField(regexp_fun, JSFunction::kPrototypeOrInitialMapOffset);
Node* const has_initialmap = WordEqual(map, initial_map);
- return has_initialmap;
+ var_result.Bind(has_initialmap);
+ GotoIfNot(has_initialmap, &out);
+
+ // The smi check is required to omit ToLength(lastIndex) calls with possible
+ // user-code execution on the fast path.
+ Node* const last_index = FastLoadLastIndex(object);
+ var_result.Bind(TaggedIsPositiveSmi(last_index));
+ Goto(&out);
+
+ Bind(&out);
+ return var_result.value();
}
// RegExp fast path implementations rely on unmodified JSRegExp instances.
// We use a fairly coarse granularity for this and simply check whether both
-// the regexp itself is unmodified (i.e. its map has not changed) and its
-// prototype is unmodified.
-void RegExpBuiltinsAssembler::BranchIfFastRegExp(Node* context, Node* map,
- Label* if_isunmodified,
- Label* if_ismodified) {
+// the regexp itself is unmodified (i.e. its map has not changed), its
+// prototype is unmodified, and lastIndex is a non-negative smi.
+void RegExpBuiltinsAssembler::BranchIfFastRegExp(Node* const context,
+ Node* const object,
+ Node* const map,
+ Label* const if_isunmodified,
+ Label* const if_ismodified) {
+ CSA_ASSERT(this, WordEqual(LoadMap(object), map));
+
+ // TODO(ishell): Update this check once map changes for constant field
+ // tracking are landing.
+
Node* const native_context = LoadNativeContext(context);
Node* const regexp_fun =
LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
@@ -426,7 +459,7 @@ void RegExpBuiltinsAssembler::BranchIfFastRegExp(Node* context, Node* map,
LoadObjectField(regexp_fun, JSFunction::kPrototypeOrInitialMapOffset);
Node* const has_initialmap = WordEqual(map, initial_map);
- GotoUnless(has_initialmap, if_ismodified);
+ GotoIfNot(has_initialmap, if_ismodified);
Node* const initial_proto_initial_map =
LoadContextElement(native_context, Context::REGEXP_PROTOTYPE_MAP_INDEX);
@@ -434,10 +467,32 @@ void RegExpBuiltinsAssembler::BranchIfFastRegExp(Node* context, Node* map,
Node* const proto_has_initialmap =
WordEqual(proto_map, initial_proto_initial_map);
- // TODO(ishell): Update this check once map changes for constant field
- // tracking are landing.
+ GotoIfNot(proto_has_initialmap, if_ismodified);
+
+ // The smi check is required to omit ToLength(lastIndex) calls with possible
+ // user-code execution on the fast path.
+ Node* const last_index = FastLoadLastIndex(object);
+ Branch(TaggedIsPositiveSmi(last_index), if_isunmodified, if_ismodified);
+}
+
+Node* RegExpBuiltinsAssembler::IsFastRegExpMap(Node* const context,
+ Node* const object,
+ Node* const map) {
+ Label yup(this), nope(this), out(this);
+ Variable var_result(this, MachineRepresentation::kWord32);
+
+ BranchIfFastRegExp(context, object, map, &yup, &nope);
+
+ Bind(&yup);
+ var_result.Bind(Int32Constant(1));
+ Goto(&out);
+
+ Bind(&nope);
+ var_result.Bind(Int32Constant(0));
+ Goto(&out);
- Branch(proto_has_initialmap, if_isunmodified, if_ismodified);
+ Bind(&out);
+ return var_result.value();
}
void RegExpBuiltinsAssembler::BranchIfFastRegExpResult(Node* context, Node* map,
@@ -459,16 +514,16 @@ TF_BUILTIN(RegExpPrototypeExec, RegExpBuiltinsAssembler) {
Node* const context = Parameter(4);
// Ensure {maybe_receiver} is a JSRegExp.
- Node* const regexp_map = ThrowIfNotInstanceType(
- context, maybe_receiver, JS_REGEXP_TYPE, "RegExp.prototype.exec");
+ ThrowIfNotInstanceType(context, maybe_receiver, JS_REGEXP_TYPE,
+ "RegExp.prototype.exec");
Node* const receiver = maybe_receiver;
// Convert {maybe_string} to a String.
Node* const string = ToString(context, maybe_string);
Label if_isfastpath(this), if_isslowpath(this);
- Branch(IsInitialRegExpMap(context, regexp_map), &if_isfastpath,
- &if_isslowpath);
+ Branch(IsInitialRegExpMap(context, receiver, LoadMap(receiver)),
+ &if_isfastpath, &if_isslowpath);
Bind(&if_isfastpath);
{
@@ -492,14 +547,12 @@ Node* RegExpBuiltinsAssembler::FlagsGetter(Node* const context,
Node* const int_zero = IntPtrConstant(0);
Node* const int_one = IntPtrConstant(1);
- Variable var_length(this, MachineType::PointerRepresentation());
+ Variable var_length(this, MachineType::PointerRepresentation(), int_zero);
Variable var_flags(this, MachineType::PointerRepresentation());
// First, count the number of characters we will need and check which flags
// are set.
- var_length.Bind(int_zero);
-
if (is_fastpath) {
// Refer to JSRegExp's flag property on the fast-path.
Node* const flags_smi = LoadObjectField(regexp, JSRegExp::kFlagsOffset);
@@ -509,7 +562,7 @@ Node* RegExpBuiltinsAssembler::FlagsGetter(Node* const context,
#define CASE_FOR_FLAG(FLAG) \
do { \
Label next(this); \
- GotoUnless(IsSetWord(flags_intptr, FLAG), &next); \
+ GotoIfNot(IsSetWord(flags_intptr, FLAG), &next); \
var_length.Bind(IntPtrAdd(var_length.value(), int_one)); \
Goto(&next); \
Bind(&next); \
@@ -559,14 +612,14 @@ Node* RegExpBuiltinsAssembler::FlagsGetter(Node* const context,
Node* const result = AllocateSeqOneByteString(context, var_length.value());
Node* const flags_intptr = var_flags.value();
- Variable var_offset(this, MachineType::PointerRepresentation());
- var_offset.Bind(
+ Variable var_offset(
+ this, MachineType::PointerRepresentation(),
IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag));
#define CASE_FOR_FLAG(FLAG, CHAR) \
do { \
Label next(this); \
- GotoUnless(IsSetWord(flags_intptr, FLAG), &next); \
+ GotoIfNot(IsSetWord(flags_intptr, FLAG), &next); \
Node* const value = Int32Constant(CHAR); \
StoreNoWriteBarrier(MachineRepresentation::kWord8, result, \
var_offset.value(), value); \
@@ -591,11 +644,10 @@ Node* RegExpBuiltinsAssembler::IsRegExp(Node* const context,
Node* const maybe_receiver) {
Label out(this), if_isregexp(this);
- Variable var_result(this, MachineRepresentation::kWord32);
- var_result.Bind(Int32Constant(0));
+ Variable var_result(this, MachineRepresentation::kWord32, Int32Constant(0));
GotoIf(TaggedIsSmi(maybe_receiver), &out);
- GotoUnless(IsJSReceiver(maybe_receiver), &out);
+ GotoIfNot(IsJSReceiver(maybe_receiver), &out);
Node* const receiver = maybe_receiver;
@@ -657,7 +709,8 @@ TF_BUILTIN(RegExpPrototypeFlagsGetter, RegExpBuiltinsAssembler) {
Node* const receiver = maybe_receiver;
Label if_isfastpath(this), if_isslowpath(this, Label::kDeferred);
- Branch(IsInitialRegExpMap(context, map), &if_isfastpath, &if_isslowpath);
+ Branch(IsInitialRegExpMap(context, receiver, map), &if_isfastpath,
+ &if_isslowpath);
Bind(&if_isfastpath);
Return(FlagsGetter(context, receiver, true));
@@ -676,13 +729,9 @@ TF_BUILTIN(RegExpConstructor, RegExpBuiltinsAssembler) {
Isolate* isolate = this->isolate();
- Variable var_flags(this, MachineRepresentation::kTagged);
- Variable var_pattern(this, MachineRepresentation::kTagged);
- Variable var_new_target(this, MachineRepresentation::kTagged);
-
- var_flags.Bind(flags);
- var_pattern.Bind(pattern);
- var_new_target.Bind(new_target);
+ Variable var_flags(this, MachineRepresentation::kTagged, flags);
+ Variable var_pattern(this, MachineRepresentation::kTagged, pattern);
+ Variable var_new_target(this, MachineRepresentation::kTagged, new_target);
Node* const native_context = LoadNativeContext(context);
Node* const regexp_function =
@@ -693,17 +742,17 @@ TF_BUILTIN(RegExpConstructor, RegExpBuiltinsAssembler) {
{
Label next(this);
- GotoUnless(IsUndefined(new_target), &next);
+ GotoIfNot(IsUndefined(new_target), &next);
var_new_target.Bind(regexp_function);
- GotoUnless(pattern_is_regexp, &next);
- GotoUnless(IsUndefined(flags), &next);
+ GotoIfNot(pattern_is_regexp, &next);
+ GotoIfNot(IsUndefined(flags), &next);
Callable getproperty_callable = CodeFactory::GetProperty(isolate);
Node* const name = HeapConstant(isolate->factory()->constructor_string());
Node* const value = CallStub(getproperty_callable, context, pattern, name);
- GotoUnless(WordEqual(value, regexp_function), &next);
+ GotoIfNot(WordEqual(value, regexp_function), &next);
Return(pattern);
Bind(&next);
@@ -725,7 +774,7 @@ TF_BUILTIN(RegExpConstructor, RegExpBuiltinsAssembler) {
{
Label inner_next(this);
- GotoUnless(IsUndefined(flags), &inner_next);
+ GotoIfNot(IsUndefined(flags), &inner_next);
Node* const value = FlagsGetter(context, pattern, true);
var_flags.Bind(value);
@@ -750,7 +799,7 @@ TF_BUILTIN(RegExpConstructor, RegExpBuiltinsAssembler) {
{
Label inner_next(this);
- GotoUnless(IsUndefined(flags), &inner_next);
+ GotoIfNot(IsUndefined(flags), &inner_next);
Node* const name = HeapConstant(isolate->factory()->flags_string());
Node* const value =
@@ -814,18 +863,15 @@ TF_BUILTIN(RegExpPrototypeCompile, RegExpBuiltinsAssembler) {
"RegExp.prototype.compile");
Node* const receiver = maybe_receiver;
- Variable var_flags(this, MachineRepresentation::kTagged);
- Variable var_pattern(this, MachineRepresentation::kTagged);
-
- var_flags.Bind(maybe_flags);
- var_pattern.Bind(maybe_pattern);
+ Variable var_flags(this, MachineRepresentation::kTagged, maybe_flags);
+ Variable var_pattern(this, MachineRepresentation::kTagged, maybe_pattern);
// Handle a JSRegExp pattern.
{
Label next(this);
GotoIf(TaggedIsSmi(maybe_pattern), &next);
- GotoUnless(HasInstanceType(maybe_pattern, JS_REGEXP_TYPE), &next);
+ GotoIfNot(HasInstanceType(maybe_pattern, JS_REGEXP_TYPE), &next);
Node* const pattern = maybe_pattern;
@@ -1075,7 +1121,7 @@ void RegExpBuiltinsAssembler::FlagGetter(JSRegExp::Flag flag,
isolate->factory()->NewStringFromAsciiChecked(method_name));
CallRuntime(Runtime::kThrowTypeError, context, message_id,
method_name_str);
- Return(UndefinedConstant()); // Never reached.
+ Unreachable();
}
}
}
@@ -1204,7 +1250,7 @@ Node* RegExpBuiltinsAssembler::RegExpExec(Node* context, Node* regexp,
Label out(this), if_isfastpath(this), if_isslowpath(this);
Node* const map = LoadMap(regexp);
- BranchIfFastRegExp(context, map, &if_isfastpath, &if_isslowpath);
+ BranchIfFastRegExp(context, regexp, map, &if_isfastpath, &if_isslowpath);
Bind(&if_isfastpath);
{
@@ -1269,16 +1315,17 @@ TF_BUILTIN(RegExpPrototypeTest, RegExpBuiltinsAssembler) {
Node* const context = Parameter(4);
// Ensure {maybe_receiver} is a JSReceiver.
- Node* const map = ThrowIfNotJSReceiver(
- context, maybe_receiver, MessageTemplate::kIncompatibleMethodReceiver,
- "RegExp.prototype.test");
+ ThrowIfNotJSReceiver(context, maybe_receiver,
+ MessageTemplate::kIncompatibleMethodReceiver,
+ "RegExp.prototype.test");
Node* const receiver = maybe_receiver;
// Convert {maybe_string} to a String.
Node* const string = ToString(context, maybe_string);
Label fast_path(this), slow_path(this);
- BranchIfFastRegExp(context, map, &fast_path, &slow_path);
+ BranchIfFastRegExp(context, receiver, LoadMap(receiver), &fast_path,
+ &slow_path);
Bind(&fast_path);
{
@@ -1305,33 +1352,54 @@ TF_BUILTIN(RegExpPrototypeTest, RegExpBuiltinsAssembler) {
Node* RegExpBuiltinsAssembler::AdvanceStringIndex(Node* const string,
Node* const index,
- Node* const is_unicode) {
- Variable var_result(this, MachineRepresentation::kTagged);
+ Node* const is_unicode,
+ bool is_fastpath) {
+ CSA_ASSERT(this, IsHeapNumberMap(LoadReceiverMap(index)));
+ if (is_fastpath) CSA_ASSERT(this, TaggedIsPositiveSmi(index));
// Default to last_index + 1.
- Node* const index_plus_one = SmiAdd(index, SmiConstant(1));
- var_result.Bind(index_plus_one);
+ Node* const index_plus_one = NumberInc(index);
+ Variable var_result(this, MachineRepresentation::kTagged, index_plus_one);
+
+ // Advancing the index has some subtle issues involving the distinction
+ // between Smis and HeapNumbers. There's three cases:
+ // * {index} is a Smi, {index_plus_one} is a Smi. The standard case.
+ // * {index} is a Smi, {index_plus_one} overflows into a HeapNumber.
+ // In this case we can return the result early, because
+ // {index_plus_one} > {string}.length.
+ // * {index} is a HeapNumber, {index_plus_one} is a HeapNumber. This can only
+ // occur when {index} is outside the Smi range since we normalize
+ // explicitly. Again we can return early.
+ if (is_fastpath) {
+ // Must be in Smi range on the fast path. We control the value of {index}
+ // on all call-sites and can never exceed the length of the string.
+ STATIC_ASSERT(String::kMaxLength + 2 < Smi::kMaxValue);
+ CSA_ASSERT(this, TaggedIsPositiveSmi(index_plus_one));
+ }
Label if_isunicode(this), out(this);
- Branch(is_unicode, &if_isunicode, &out);
+ GotoIfNot(is_unicode, &out);
+
+ // Keep this unconditional (even on the fast path) just to be safe.
+ Branch(TaggedIsPositiveSmi(index_plus_one), &if_isunicode, &out);
Bind(&if_isunicode);
{
Node* const string_length = LoadStringLength(string);
- GotoUnless(SmiLessThan(index_plus_one, string_length), &out);
+ GotoIfNot(SmiLessThan(index_plus_one, string_length), &out);
Node* const lead = StringCharCodeAt(string, index);
- GotoUnless(Word32Equal(Word32And(lead, Int32Constant(0xFC00)),
- Int32Constant(0xD800)),
- &out);
+ GotoIfNot(Word32Equal(Word32And(lead, Int32Constant(0xFC00)),
+ Int32Constant(0xD800)),
+ &out);
Node* const trail = StringCharCodeAt(string, index_plus_one);
- GotoUnless(Word32Equal(Word32And(trail, Int32Constant(0xFC00)),
- Int32Constant(0xDC00)),
- &out);
+ GotoIfNot(Word32Equal(Word32And(trail, Int32Constant(0xFC00)),
+ Int32Constant(0xDC00)),
+ &out);
// At a surrogate pair, return index + 2.
- Node* const index_plus_two = SmiAdd(index, SmiConstant(2));
+ Node* const index_plus_two = NumberInc(index_plus_one);
var_result.Bind(index_plus_two);
Goto(&out);
@@ -1597,7 +1665,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
Bind(&if_didnotmatch);
{
// Return null if there were no matches, otherwise just exit the loop.
- GotoUnless(IntPtrEqual(array.length(), int_zero), &out);
+ GotoIfNot(IntPtrEqual(array.length(), int_zero), &out);
Return(null);
}
@@ -1612,15 +1680,26 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
// Advance last index if the match is the empty string.
Node* const match_length = LoadStringLength(match);
- GotoUnless(SmiEqual(match_length, smi_zero), &loop);
+ GotoIfNot(SmiEqual(match_length, smi_zero), &loop);
Node* last_index = LoadLastIndex(context, regexp, is_fastpath);
-
- Callable tolength_callable = CodeFactory::ToLength(isolate);
- last_index = CallStub(tolength_callable, context, last_index);
+ if (is_fastpath) {
+ CSA_ASSERT(this, TaggedIsPositiveSmi(last_index));
+ } else {
+ Callable tolength_callable = CodeFactory::ToLength(isolate);
+ last_index = CallStub(tolength_callable, context, last_index);
+ }
Node* const new_last_index =
- AdvanceStringIndex(string, last_index, is_unicode);
+ AdvanceStringIndex(string, last_index, is_unicode, is_fastpath);
+
+ if (is_fastpath) {
+ // On the fast path, we can be certain that lastIndex can never be
+ // incremented to overflow the Smi range since the maximal string
+ // length is less than the maximal Smi value.
+ STATIC_ASSERT(String::kMaxLength < Smi::kMaxValue);
+ CSA_ASSERT(this, TaggedIsPositiveSmi(new_last_index));
+ }
StoreLastIndex(context, regexp, new_last_index, is_fastpath);
@@ -1646,16 +1725,17 @@ TF_BUILTIN(RegExpPrototypeMatch, RegExpBuiltinsAssembler) {
Node* const context = Parameter(4);
// Ensure {maybe_receiver} is a JSReceiver.
- Node* const map = ThrowIfNotJSReceiver(
- context, maybe_receiver, MessageTemplate::kIncompatibleMethodReceiver,
- "RegExp.prototype.@@match");
+ ThrowIfNotJSReceiver(context, maybe_receiver,
+ MessageTemplate::kIncompatibleMethodReceiver,
+ "RegExp.prototype.@@match");
Node* const receiver = maybe_receiver;
// Convert {maybe_string} to a String.
Node* const string = ToString(context, maybe_string);
Label fast_path(this), slow_path(this);
- BranchIfFastRegExp(context, map, &fast_path, &slow_path);
+ BranchIfFastRegExp(context, receiver, LoadMap(receiver), &fast_path,
+ &slow_path);
Bind(&fast_path);
RegExpPrototypeMatchBody(context, receiver, string, true);
@@ -1734,7 +1814,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSearchBodySlow(
// Return -1 if no match was found.
{
Label next(this);
- GotoUnless(WordEqual(exec_result, NullConstant()), &next);
+ GotoIfNot(WordEqual(exec_result, NullConstant()), &next);
Return(SmiConstant(-1));
Bind(&next);
}
@@ -1771,16 +1851,17 @@ TF_BUILTIN(RegExpPrototypeSearch, RegExpBuiltinsAssembler) {
Node* const context = Parameter(4);
// Ensure {maybe_receiver} is a JSReceiver.
- Node* const map = ThrowIfNotJSReceiver(
- context, maybe_receiver, MessageTemplate::kIncompatibleMethodReceiver,
- "RegExp.prototype.@@search");
+ ThrowIfNotJSReceiver(context, maybe_receiver,
+ MessageTemplate::kIncompatibleMethodReceiver,
+ "RegExp.prototype.@@search");
Node* const receiver = maybe_receiver;
// Convert {maybe_string} to a String.
Node* const string = ToString(context, maybe_string);
Label fast_path(this), slow_path(this);
- BranchIfFastRegExp(context, map, &fast_path, &slow_path);
+ BranchIfFastRegExp(context, receiver, LoadMap(receiver), &fast_path,
+ &slow_path);
Bind(&fast_path);
RegExpPrototypeSearchBodyFast(context, receiver, string);
@@ -1919,12 +2000,12 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
{
Label next(this);
- GotoUnless(SmiEqual(match_to, next_search_from), &next);
- GotoUnless(SmiEqual(match_to, last_matched_until), &next);
+ GotoIfNot(SmiEqual(match_to, next_search_from), &next);
+ GotoIfNot(SmiEqual(match_to, last_matched_until), &next);
Node* const is_unicode = FastFlagGetter(regexp, JSRegExp::kUnicode);
Node* const new_next_search_from =
- AdvanceStringIndex(string, next_search_from, is_unicode);
+ AdvanceStringIndex(string, next_search_from, is_unicode, true);
var_next_search_from.Bind(new_next_search_from);
Goto(&loop);
@@ -2035,6 +2116,69 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
}
}
+// Helper that skips a few initial checks.
+TF_BUILTIN(RegExpSplit, RegExpBuiltinsAssembler) {
+ typedef RegExpSplitDescriptor Descriptor;
+
+ Node* const regexp = Parameter(Descriptor::kReceiver);
+ Node* const string = Parameter(Descriptor::kString);
+ Node* const maybe_limit = Parameter(Descriptor::kLimit);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ CSA_ASSERT(this, IsFastRegExpMap(context, regexp, LoadMap(regexp)));
+ CSA_ASSERT(this, IsString(string));
+
+ // TODO(jgruber): Even if map checks send us to the fast path, we still need
+ // to verify the constructor property and jump to the slow path if it has
+ // been changed.
+
+ // Convert {maybe_limit} to a uint32, capping at the maximal smi value.
+ Variable var_limit(this, MachineRepresentation::kTagged, maybe_limit);
+ Label if_limitissmimax(this), limit_done(this), runtime(this);
+
+ GotoIf(IsUndefined(maybe_limit), &if_limitissmimax);
+ GotoIf(TaggedIsPositiveSmi(maybe_limit), &limit_done);
+
+ Node* const limit = ToUint32(context, maybe_limit);
+ {
+ // ToUint32(limit) could potentially change the shape of the RegExp
+ // object. Recheck that we are still on the fast path and bail to runtime
+ // otherwise.
+ {
+ Label next(this);
+ BranchIfFastRegExp(context, regexp, LoadMap(regexp), &next, &runtime);
+ Bind(&next);
+ }
+
+ GotoIfNot(TaggedIsSmi(limit), &if_limitissmimax);
+
+ var_limit.Bind(limit);
+ Goto(&limit_done);
+ }
+
+ Bind(&if_limitissmimax);
+ {
+ // TODO(jgruber): In this case, we can probably avoid generation of limit
+ // checks in Generate_RegExpPrototypeSplitBody.
+ var_limit.Bind(SmiConstant(Smi::kMaxValue));
+ Goto(&limit_done);
+ }
+
+ Bind(&limit_done);
+ {
+ Node* const limit = var_limit.value();
+ RegExpPrototypeSplitBody(context, regexp, string, limit);
+ }
+
+ Bind(&runtime);
+ {
+ // The runtime call passes in limit to ensure the second ToUint32(limit)
+ // call is not observable.
+ CSA_ASSERT(this, IsHeapNumberMap(LoadReceiverMap(limit)));
+ Return(CallRuntime(Runtime::kRegExpSplit, context, regexp, string, limit));
+ }
+}
+
// ES#sec-regexp.prototype-@@split
// RegExp.prototype [ @@split ] ( string, limit )
TF_BUILTIN(RegExpPrototypeSplit, RegExpBuiltinsAssembler) {
@@ -2044,59 +2188,24 @@ TF_BUILTIN(RegExpPrototypeSplit, RegExpBuiltinsAssembler) {
Node* const context = Parameter(5);
// Ensure {maybe_receiver} is a JSReceiver.
- Node* const map = ThrowIfNotJSReceiver(
- context, maybe_receiver, MessageTemplate::kIncompatibleMethodReceiver,
- "RegExp.prototype.@@split");
+ ThrowIfNotJSReceiver(context, maybe_receiver,
+ MessageTemplate::kIncompatibleMethodReceiver,
+ "RegExp.prototype.@@split");
Node* const receiver = maybe_receiver;
// Convert {maybe_string} to a String.
Node* const string = ToString(context, maybe_string);
- Label fast_path(this), slow_path(this);
- BranchIfFastRegExp(context, map, &fast_path, &slow_path);
-
- Bind(&fast_path);
- {
- // TODO(jgruber): Even if map checks send us to the fast path, we still need
- // to verify the constructor property and jump to the slow path if it has
- // been changed.
-
- // Convert {maybe_limit} to a uint32, capping at the maximal smi value.
- Variable var_limit(this, MachineRepresentation::kTagged);
- Label if_limitissmimax(this), limit_done(this);
-
- GotoIf(IsUndefined(maybe_limit), &if_limitissmimax);
-
- {
- Node* const limit = ToUint32(context, maybe_limit);
- GotoUnless(TaggedIsSmi(limit), &if_limitissmimax);
-
- var_limit.Bind(limit);
- Goto(&limit_done);
- }
-
- Bind(&if_limitissmimax);
- {
- // TODO(jgruber): In this case, we can probably generation of limit checks
- // in Generate_RegExpPrototypeSplitBody.
- Node* const smi_max = SmiConstant(Smi::kMaxValue);
- var_limit.Bind(smi_max);
- Goto(&limit_done);
- }
+ Label stub(this), runtime(this, Label::kDeferred);
+ BranchIfFastRegExp(context, receiver, LoadMap(receiver), &stub, &runtime);
- Bind(&limit_done);
- {
- Node* const limit = var_limit.value();
- RegExpPrototypeSplitBody(context, receiver, string, limit);
- }
- }
+ Bind(&stub);
+ Callable split_callable = CodeFactory::RegExpSplit(isolate());
+ Return(CallStub(split_callable, context, receiver, string, maybe_limit));
- Bind(&slow_path);
- {
- Node* const result = CallRuntime(Runtime::kRegExpSplit, context, receiver,
- string, maybe_limit);
- Return(result);
- }
+ Bind(&runtime);
+ Return(CallRuntime(Runtime::kRegExpSplit, context, receiver, string,
+ maybe_limit));
}
Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
@@ -2185,7 +2294,7 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
Bind(&loop);
{
Node* const i = var_i.value();
- GotoUnless(IntPtrLessThan(i, end), &create_result);
+ GotoIfNot(IntPtrLessThan(i, end), &create_result);
Node* const elem = LoadFixedArrayElement(res_elems, i);
@@ -2257,7 +2366,7 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
const int increment = 1;
BuildFastLoop(
- MachineType::PointerRepresentation(), from, to,
+ from, to,
[this, res_elems, isolate, native_context, context, undefined,
replace_callable](Node* index) {
Node* const elem = LoadFixedArrayElement(res_elems, index);
@@ -2288,7 +2397,8 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
Goto(&do_continue);
Bind(&do_continue);
},
- increment, CodeStubAssembler::IndexAdvanceMode::kPost);
+ increment, CodeStubAssembler::INTPTR_PARAMETERS,
+ CodeStubAssembler::IndexAdvanceMode::kPost);
Goto(&create_result);
}
@@ -2312,9 +2422,6 @@ Node* RegExpBuiltinsAssembler::ReplaceSimpleStringFastPath(
// ToString({replace_value}) does not contain '$', i.e. we're doing a simple
// string replacement.
- Isolate* const isolate = this->isolate();
-
- Node* const null = NullConstant();
Node* const int_zero = IntPtrConstant(0);
Node* const smi_zero = SmiConstant(Smi::kZero);
@@ -2347,21 +2454,11 @@ Node* RegExpBuiltinsAssembler::ReplaceSimpleStringFastPath(
Bind(&if_isnonglobal);
{
// Run exec, then manually construct the resulting string.
- Callable exec_callable = CodeFactory::RegExpExec(isolate);
- Node* const match_indices = CallStub(exec_callable, context, regexp, string,
- smi_zero, last_match_info);
-
- Label if_matched(this), if_didnotmatch(this);
- Branch(WordEqual(match_indices, null), &if_didnotmatch, &if_matched);
-
- Bind(&if_didnotmatch);
- {
- FastStoreLastIndex(regexp, smi_zero);
- var_result.Bind(string);
- Goto(&out);
- }
+ Label if_didnotmatch(this);
+ Node* const match_indices = RegExpPrototypeExecBodyWithoutResult(
+ context, regexp, string, &if_didnotmatch, true);
- Bind(&if_matched);
+ // Successful match.
{
Node* const subject_start = smi_zero;
Node* const match_start = LoadFixedArrayElement(
@@ -2405,57 +2502,60 @@ Node* RegExpBuiltinsAssembler::ReplaceSimpleStringFastPath(
Goto(&out);
}
}
+
+ Bind(&if_didnotmatch);
+ {
+ var_result.Bind(string);
+ Goto(&out);
+ }
}
Bind(&out);
return var_result.value();
}
-// ES#sec-regexp.prototype-@@replace
-// RegExp.prototype [ @@replace ] ( string, replaceValue )
-TF_BUILTIN(RegExpPrototypeReplace, RegExpBuiltinsAssembler) {
- Node* const maybe_receiver = Parameter(0);
- Node* const maybe_string = Parameter(1);
- Node* const replace_value = Parameter(2);
- Node* const context = Parameter(5);
-
- // Ensure {maybe_receiver} is a JSReceiver.
- Node* const map = ThrowIfNotJSReceiver(
- context, maybe_receiver, MessageTemplate::kIncompatibleMethodReceiver,
- "RegExp.prototype.@@replace");
- Node* const receiver = maybe_receiver;
+// Helper that skips a few initial checks.
+TF_BUILTIN(RegExpReplace, RegExpBuiltinsAssembler) {
+ typedef RegExpReplaceDescriptor Descriptor;
- // Convert {maybe_string} to a String.
- Callable tostring_callable = CodeFactory::ToString(isolate());
- Node* const string = CallStub(tostring_callable, context, maybe_string);
+ Node* const regexp = Parameter(Descriptor::kReceiver);
+ Node* const string = Parameter(Descriptor::kString);
+ Node* const replace_value = Parameter(Descriptor::kReplaceValue);
+ Node* const context = Parameter(Descriptor::kContext);
- // Fast-path checks: 1. Is the {receiver} an unmodified JSRegExp instance?
- Label checkreplacecallable(this), runtime(this, Label::kDeferred),
- fastpath(this);
- BranchIfFastRegExp(context, map, &checkreplacecallable, &runtime);
+ CSA_ASSERT(this, IsFastRegExpMap(context, regexp, LoadMap(regexp)));
+ CSA_ASSERT(this, IsString(string));
- Bind(&checkreplacecallable);
- Node* const regexp = receiver;
+ Label checkreplacestring(this), if_iscallable(this),
+ runtime(this, Label::kDeferred);
// 2. Is {replace_value} callable?
- Label checkreplacestring(this), if_iscallable(this);
GotoIf(TaggedIsSmi(replace_value), &checkreplacestring);
-
- Node* const replace_value_map = LoadMap(replace_value);
- Branch(IsCallableMap(replace_value_map), &if_iscallable, &checkreplacestring);
+ Branch(IsCallableMap(LoadMap(replace_value)), &if_iscallable,
+ &checkreplacestring);
// 3. Does ToString({replace_value}) contain '$'?
Bind(&checkreplacestring);
{
+ Callable tostring_callable = CodeFactory::ToString(isolate());
Node* const replace_string =
CallStub(tostring_callable, context, replace_value);
- Node* const dollar_char = Int32Constant('$');
- Node* const smi_minusone = SmiConstant(Smi::FromInt(-1));
- GotoUnless(SmiEqual(StringIndexOfChar(context, replace_string, dollar_char,
- SmiConstant(0)),
- smi_minusone),
- &runtime);
+ // ToString(replaceValue) could potentially change the shape of the RegExp
+ // object. Recheck that we are still on the fast path and bail to runtime
+ // otherwise.
+ {
+ Label next(this);
+ BranchIfFastRegExp(context, regexp, LoadMap(regexp), &next, &runtime);
+ Bind(&next);
+ }
+
+ Callable indexof_callable = CodeFactory::StringIndexOf(isolate());
+ Node* const dollar_string = HeapConstant(
+ isolate()->factory()->LookupSingleCharacterStringFromCode('$'));
+ Node* const dollar_ix = CallStub(indexof_callable, context, replace_string,
+ dollar_string, SmiConstant(0));
+ GotoIfNot(SmiEqual(dollar_ix, SmiConstant(-1)), &runtime);
Return(
ReplaceSimpleStringFastPath(context, regexp, string, replace_string));
@@ -2464,35 +2564,74 @@ TF_BUILTIN(RegExpPrototypeReplace, RegExpBuiltinsAssembler) {
// {regexp} is unmodified and {replace_value} is callable.
Bind(&if_iscallable);
{
- Node* const replace_callable = replace_value;
+ Node* const replace_fn = replace_value;
// Check if the {regexp} is global.
Label if_isglobal(this), if_isnotglobal(this);
+
Node* const is_global = FastFlagGetter(regexp, JSRegExp::kGlobal);
Branch(is_global, &if_isglobal, &if_isnotglobal);
Bind(&if_isglobal);
- {
- Node* const result = ReplaceGlobalCallableFastPath(
- context, regexp, string, replace_callable);
- Return(result);
- }
+ Return(ReplaceGlobalCallableFastPath(context, regexp, string, replace_fn));
Bind(&if_isnotglobal);
- {
- Node* const result =
- CallRuntime(Runtime::kStringReplaceNonGlobalRegExpWithFunction,
- context, string, regexp, replace_callable);
- Return(result);
- }
+ Return(CallRuntime(Runtime::kStringReplaceNonGlobalRegExpWithFunction,
+ context, string, regexp, replace_fn));
}
Bind(&runtime);
- {
- Node* const result = CallRuntime(Runtime::kRegExpReplace, context, receiver,
- string, replace_value);
- Return(result);
- }
+ Return(CallRuntime(Runtime::kRegExpReplace, context, regexp, string,
+ replace_value));
+}
+
+// ES#sec-regexp.prototype-@@replace
+// RegExp.prototype [ @@replace ] ( string, replaceValue )
+TF_BUILTIN(RegExpPrototypeReplace, RegExpBuiltinsAssembler) {
+ Node* const maybe_receiver = Parameter(0);
+ Node* const maybe_string = Parameter(1);
+ Node* const replace_value = Parameter(2);
+ Node* const context = Parameter(5);
+
+ // RegExpPrototypeReplace is a bit of a beast - a summary of dispatch logic:
+ //
+ // if (!IsFastRegExp(receiver)) CallRuntime(RegExpReplace)
+ // if (IsCallable(replace)) {
+ // if (IsGlobal(receiver)) {
+ // // Called 'fast-path' but contains several runtime calls.
+ // ReplaceGlobalCallableFastPath()
+ // } else {
+ // CallRuntime(StringReplaceNonGlobalRegExpWithFunction)
+ // }
+ // } else {
+ // if (replace.contains("$")) {
+ // CallRuntime(RegExpReplace)
+ // } else {
+ // ReplaceSimpleStringFastPath() // Bails to runtime for global regexps.
+ // }
+ // }
+
+ // Ensure {maybe_receiver} is a JSReceiver.
+ ThrowIfNotJSReceiver(context, maybe_receiver,
+ MessageTemplate::kIncompatibleMethodReceiver,
+ "RegExp.prototype.@@replace");
+ Node* const receiver = maybe_receiver;
+
+ // Convert {maybe_string} to a String.
+ Callable tostring_callable = CodeFactory::ToString(isolate());
+ Node* const string = CallStub(tostring_callable, context, maybe_string);
+
+ // Fast-path checks: 1. Is the {receiver} an unmodified JSRegExp instance?
+ Label stub(this), runtime(this, Label::kDeferred);
+ BranchIfFastRegExp(context, receiver, LoadMap(receiver), &stub, &runtime);
+
+ Bind(&stub);
+ Callable replace_callable = CodeFactory::RegExpReplace(isolate());
+ Return(CallStub(replace_callable, context, receiver, string, replace_value));
+
+ Bind(&runtime);
+ Return(CallRuntime(Runtime::kRegExpReplace, context, receiver, string,
+ replace_value));
}
// Simple string matching functionality for internal use which does not modify
@@ -2522,7 +2661,7 @@ TF_BUILTIN(RegExpInternalMatch, RegExpBuiltinsAssembler) {
Bind(&if_matched);
{
Node* result =
- ConstructNewResultFromMatchInfo(context, match_indices, string);
+ ConstructNewResultFromMatchInfo(context, regexp, match_indices, string);
Return(result);
}
}
diff --git a/deps/v8/src/builtins/builtins-regexp.h b/deps/v8/src/builtins/builtins-regexp.h
new file mode 100644
index 0000000000..9e1bfdf48f
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-regexp.h
@@ -0,0 +1,99 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BUILTINS_BUILTINS_REGEXP_H_
+#define V8_BUILTINS_BUILTINS_REGEXP_H_
+
+#include "src/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+typedef compiler::Node Node;
+typedef compiler::CodeAssemblerState CodeAssemblerState;
+typedef compiler::CodeAssemblerLabel CodeAssemblerLabel;
+
+class RegExpBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit RegExpBuiltinsAssembler(CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ void BranchIfFastRegExp(Node* const context, Node* const object,
+ Node* const map, Label* const if_isunmodified,
+ Label* const if_ismodified);
+
+ protected:
+ Node* FastLoadLastIndex(Node* regexp);
+ Node* SlowLoadLastIndex(Node* context, Node* regexp);
+ Node* LoadLastIndex(Node* context, Node* regexp, bool is_fastpath);
+
+ void FastStoreLastIndex(Node* regexp, Node* value);
+ void SlowStoreLastIndex(Node* context, Node* regexp, Node* value);
+ void StoreLastIndex(Node* context, Node* regexp, Node* value,
+ bool is_fastpath);
+
+ Node* ConstructNewResultFromMatchInfo(Node* const context, Node* const regexp,
+ Node* const match_info,
+ Node* const string);
+
+ Node* RegExpPrototypeExecBodyWithoutResult(Node* const context,
+ Node* const regexp,
+ Node* const string,
+ Label* if_didnotmatch,
+ const bool is_fastpath);
+ Node* RegExpPrototypeExecBody(Node* const context, Node* const regexp,
+ Node* const string, const bool is_fastpath);
+
+ Node* ThrowIfNotJSReceiver(Node* context, Node* maybe_receiver,
+ MessageTemplate::Template msg_template,
+ char const* method_name);
+
+ // Analogous to BranchIfFastRegExp, for use in asserts.
+ Node* IsFastRegExpMap(Node* const context, Node* const object,
+ Node* const map);
+
+ Node* IsInitialRegExpMap(Node* context, Node* object, Node* map);
+ void BranchIfFastRegExpResult(Node* context, Node* map,
+ Label* if_isunmodified, Label* if_ismodified);
+
+ Node* FlagsGetter(Node* const context, Node* const regexp, bool is_fastpath);
+
+ Node* FastFlagGetter(Node* const regexp, JSRegExp::Flag flag);
+ Node* SlowFlagGetter(Node* const context, Node* const regexp,
+ JSRegExp::Flag flag);
+ Node* FlagGetter(Node* const context, Node* const regexp, JSRegExp::Flag flag,
+ bool is_fastpath);
+ void FlagGetter(JSRegExp::Flag flag, v8::Isolate::UseCounterFeature counter,
+ const char* method_name);
+
+ Node* IsRegExp(Node* const context, Node* const maybe_receiver);
+ Node* RegExpInitialize(Node* const context, Node* const regexp,
+ Node* const maybe_pattern, Node* const maybe_flags);
+
+ Node* RegExpExec(Node* context, Node* regexp, Node* string);
+
+ Node* AdvanceStringIndex(Node* const string, Node* const index,
+ Node* const is_unicode, bool is_fastpath);
+
+ void RegExpPrototypeMatchBody(Node* const context, Node* const regexp,
+ Node* const string, const bool is_fastpath);
+
+ void RegExpPrototypeSearchBodyFast(Node* const context, Node* const regexp,
+ Node* const string);
+ void RegExpPrototypeSearchBodySlow(Node* const context, Node* const regexp,
+ Node* const string);
+
+ void RegExpPrototypeSplitBody(Node* const context, Node* const regexp,
+ Node* const string, Node* const limit);
+
+ Node* ReplaceGlobalCallableFastPath(Node* context, Node* regexp, Node* string,
+ Node* replace_callable);
+ Node* ReplaceSimpleStringFastPath(Node* context, Node* regexp, Node* string,
+ Node* replace_string);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BUILTINS_BUILTINS_REGEXP_H_
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
index 53caf1fe21..b91807833f 100644
--- a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
+++ b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
@@ -78,7 +78,7 @@ void ValidateSharedTypedArray(CodeStubAssembler* a, compiler::Node* tagged,
a->Bind(&invalid);
a->CallRuntime(Runtime::kThrowNotIntegerSharedTypedArrayError, context,
tagged);
- a->Return(a->UndefinedConstant());
+ a->Unreachable();
a->Bind(&not_float_or_clamped);
*out_instance_type = elements_instance_type;
@@ -129,8 +129,8 @@ compiler::Node* ConvertTaggedAtomicIndexToWord32(CodeStubAssembler* a,
}
a->Bind(&if_indexesarenotequal);
- a->Return(
- a->CallRuntime(Runtime::kThrowInvalidAtomicAccessIndexError, context));
+ a->CallRuntime(Runtime::kThrowInvalidAtomicAccessIndexError, context);
+ a->Unreachable();
}
a->Bind(&done);
@@ -149,8 +149,8 @@ void ValidateAtomicIndex(CodeStubAssembler* a, compiler::Node* index_word,
a->Int32GreaterThanOrEqual(index_word, array_length_word)),
&if_notinbounds, &if_inbounds);
a->Bind(&if_notinbounds);
- a->Return(
- a->CallRuntime(Runtime::kThrowInvalidAtomicAccessIndexError, context));
+ a->CallRuntime(Runtime::kThrowInvalidAtomicAccessIndexError, context);
+ a->Unreachable();
a->Bind(&if_inbounds);
}
diff --git a/deps/v8/src/builtins/builtins-string.cc b/deps/v8/src/builtins/builtins-string.cc
index 3259d0021a..7cef567cf9 100644
--- a/deps/v8/src/builtins/builtins-string.cc
+++ b/deps/v8/src/builtins/builtins-string.cc
@@ -2,11 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/builtins/builtins-regexp.h"
#include "src/builtins/builtins-utils.h"
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
#include "src/code-stub-assembler.h"
+#include "src/conversions.h"
+#include "src/counters.h"
+#include "src/objects-inl.h"
#include "src/regexp/regexp-utils.h"
+#include "src/string-case.h"
+#include "src/unicode-inl.h"
+#include "src/unicode.h"
namespace v8 {
namespace internal {
@@ -20,13 +27,47 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
: CodeStubAssembler(state) {}
protected:
+ Node* DirectStringData(Node* string, Node* string_instance_type) {
+ // Compute the effective offset of the first character.
+ Variable var_data(this, MachineType::PointerRepresentation());
+ Label if_sequential(this), if_external(this), if_join(this);
+ Branch(Word32Equal(Word32And(string_instance_type,
+ Int32Constant(kStringRepresentationMask)),
+ Int32Constant(kSeqStringTag)),
+ &if_sequential, &if_external);
+
+ Bind(&if_sequential);
+ {
+ var_data.Bind(IntPtrAdd(
+ IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag),
+ BitcastTaggedToWord(string)));
+ Goto(&if_join);
+ }
+
+ Bind(&if_external);
+ {
+ // This is only valid for ExternalStrings where the resource data
+ // pointer is cached (i.e. no short external strings).
+ CSA_ASSERT(this, Word32NotEqual(
+ Word32And(string_instance_type,
+ Int32Constant(kShortExternalStringMask)),
+ Int32Constant(kShortExternalStringTag)));
+ var_data.Bind(LoadObjectField(string, ExternalString::kResourceDataOffset,
+ MachineType::Pointer()));
+ Goto(&if_join);
+ }
+
+ Bind(&if_join);
+ return var_data.value();
+ }
+
Node* LoadOneByteChar(Node* string, Node* index) {
return Load(MachineType::Uint8(), string, OneByteCharOffset(index));
}
Node* OneByteCharAddress(Node* string, Node* index) {
Node* offset = OneByteCharOffset(index);
- return IntPtrAdd(BitcastTaggedToWord(string), offset);
+ return IntPtrAdd(string, offset);
}
Node* OneByteCharOffset(Node* index) {
@@ -43,14 +84,25 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
return offset;
}
- void BranchIfSimpleOneByteStringInstanceType(Node* instance_type,
- Label* if_true,
- Label* if_false) {
+ void DispatchOnStringInstanceType(Node* const instance_type,
+ Label* if_onebyte_sequential,
+ Label* if_onebyte_external,
+ Label* if_otherwise) {
const int kMask = kStringRepresentationMask | kStringEncodingMask;
- const int kType = kOneByteStringTag | kSeqStringTag;
- Branch(Word32Equal(Word32And(instance_type, Int32Constant(kMask)),
- Int32Constant(kType)),
- if_true, if_false);
+ Node* const encoding_and_representation =
+ Word32And(instance_type, Int32Constant(kMask));
+
+ int32_t values[] = {
+ kOneByteStringTag | kSeqStringTag,
+ kOneByteStringTag | kExternalStringTag,
+ };
+ Label* labels[] = {
+ if_onebyte_sequential, if_onebyte_external,
+ };
+ STATIC_ASSERT(arraysize(values) == arraysize(labels));
+
+ Switch(encoding_and_representation, if_otherwise, values, labels,
+ arraysize(values));
}
void GenerateStringEqual(ResultMode mode);
@@ -60,6 +112,36 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
Node* LoadSurrogatePairAt(Node* string, Node* length, Node* index,
UnicodeEncoding encoding);
+
+ void StringIndexOf(Node* receiver, Node* instance_type, Node* search_string,
+ Node* search_string_instance_type, Node* position,
+ std::function<void(Node*)> f_return);
+
+ Node* IsNullOrUndefined(Node* const value);
+ void RequireObjectCoercible(Node* const context, Node* const value,
+ const char* method_name);
+
+ Node* SmiIsNegative(Node* const value) {
+ return SmiLessThan(value, SmiConstant(0));
+ }
+
+ // Implements boilerplate logic for {match, split, replace, search} of the
+ // form:
+ //
+ // if (!IS_NULL_OR_UNDEFINED(object)) {
+ // var maybe_function = object[symbol];
+ // if (!IS_UNDEFINED(maybe_function)) {
+ // return %_Call(maybe_function, ...);
+ // }
+ // }
+ //
+ // Contains fast paths for Smi and RegExp objects.
+ typedef std::function<Node*()> NodeFunction0;
+ typedef std::function<Node*(Node* fn)> NodeFunction1;
+ void MaybeCallFunctionAtSymbol(Node* const context, Node* const object,
+ Handle<Symbol> symbol,
+ const NodeFunction0& regexp_call,
+ const NodeFunction1& generic_call);
};
void StringBuiltinsAssembler::GenerateStringEqual(ResultMode mode) {
@@ -77,13 +159,23 @@ void StringBuiltinsAssembler::GenerateStringEqual(ResultMode mode) {
// }
// return true;
// }
+ // if (lhs and/or rhs are indirect strings) {
+ // unwrap them and restart from the beginning;
+ // }
// return %StringEqual(lhs, rhs);
- Node* lhs = Parameter(0);
- Node* rhs = Parameter(1);
+ Variable var_left(this, MachineRepresentation::kTagged);
+ Variable var_right(this, MachineRepresentation::kTagged);
+ var_left.Bind(Parameter(0));
+ var_right.Bind(Parameter(1));
Node* context = Parameter(2);
- Label if_equal(this), if_notequal(this);
+ Variable* input_vars[2] = {&var_left, &var_right};
+ Label if_equal(this), if_notequal(this), restart(this, 2, input_vars);
+ Goto(&restart);
+ Bind(&restart);
+ Node* lhs = var_left.value();
+ Node* rhs = var_right.value();
// Fast check to see if {lhs} and {rhs} refer to the same String object.
GotoIf(WordEqual(lhs, rhs), &if_equal);
@@ -114,43 +206,45 @@ void StringBuiltinsAssembler::GenerateStringEqual(ResultMode mode) {
Int32Constant(kBothInternalizedTag)),
&if_notequal);
- // Check that both {lhs} and {rhs} are flat one-byte strings.
- int const kBothSeqOneByteStringMask =
- kStringEncodingMask | kStringRepresentationMask |
- ((kStringEncodingMask | kStringRepresentationMask) << 8);
- int const kBothSeqOneByteStringTag =
- kOneByteStringTag | kSeqStringTag |
- ((kOneByteStringTag | kSeqStringTag) << 8);
- Label if_bothonebyteseqstrings(this), if_notbothonebyteseqstrings(this);
+ // Check that both {lhs} and {rhs} are flat one-byte strings, and that
+ // in case of ExternalStrings the data pointer is cached..
+ STATIC_ASSERT(kShortExternalStringTag != 0);
+ int const kBothDirectOneByteStringMask =
+ kStringEncodingMask | kIsIndirectStringMask | kShortExternalStringMask |
+ ((kStringEncodingMask | kIsIndirectStringMask | kShortExternalStringMask)
+ << 8);
+ int const kBothDirectOneByteStringTag =
+ kOneByteStringTag | (kOneByteStringTag << 8);
+ Label if_bothdirectonebytestrings(this), if_notbothdirectonebytestrings(this);
Branch(Word32Equal(Word32And(both_instance_types,
- Int32Constant(kBothSeqOneByteStringMask)),
- Int32Constant(kBothSeqOneByteStringTag)),
- &if_bothonebyteseqstrings, &if_notbothonebyteseqstrings);
+ Int32Constant(kBothDirectOneByteStringMask)),
+ Int32Constant(kBothDirectOneByteStringTag)),
+ &if_bothdirectonebytestrings, &if_notbothdirectonebytestrings);
- Bind(&if_bothonebyteseqstrings);
+ Bind(&if_bothdirectonebytestrings);
{
// Compute the effective offset of the first character.
- Node* begin =
- IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ Node* lhs_data = DirectStringData(lhs, lhs_instance_type);
+ Node* rhs_data = DirectStringData(rhs, rhs_instance_type);
// Compute the first offset after the string from the length.
- Node* end = IntPtrAdd(begin, SmiUntag(lhs_length));
+ Node* length = SmiUntag(lhs_length);
// Loop over the {lhs} and {rhs} strings to see if they are equal.
Variable var_offset(this, MachineType::PointerRepresentation());
Label loop(this, &var_offset);
- var_offset.Bind(begin);
+ var_offset.Bind(IntPtrConstant(0));
Goto(&loop);
Bind(&loop);
{
// If {offset} equals {end}, no difference was found, so the
// strings are equal.
Node* offset = var_offset.value();
- GotoIf(WordEqual(offset, end), &if_equal);
+ GotoIf(WordEqual(offset, length), &if_equal);
// Load the next characters from {lhs} and {rhs}.
- Node* lhs_value = Load(MachineType::Uint8(), lhs, offset);
- Node* rhs_value = Load(MachineType::Uint8(), rhs, offset);
+ Node* lhs_value = Load(MachineType::Uint8(), lhs_data, offset);
+ Node* rhs_value = Load(MachineType::Uint8(), rhs_data, offset);
// Check if the characters match.
GotoIf(Word32NotEqual(lhs_value, rhs_value), &if_notequal);
@@ -159,34 +253,44 @@ void StringBuiltinsAssembler::GenerateStringEqual(ResultMode mode) {
var_offset.Bind(IntPtrAdd(offset, IntPtrConstant(1)));
Goto(&loop);
}
- }
+ }
- Bind(&if_notbothonebyteseqstrings);
- {
- // TODO(bmeurer): Add fast case support for flattened cons strings;
- // also add support for two byte string equality checks.
- Runtime::FunctionId function_id =
- (mode == ResultMode::kDontNegateResult)
- ? Runtime::kStringEqual
- : Runtime::kStringNotEqual;
- TailCallRuntime(function_id, context, lhs, rhs);
- }
+ Bind(&if_notbothdirectonebytestrings);
+ {
+ // Try to unwrap indirect strings, restart the above attempt on success.
+ MaybeDerefIndirectStrings(&var_left, lhs_instance_type, &var_right,
+ rhs_instance_type, &restart);
+ // TODO(bmeurer): Add support for two byte string equality checks.
+
+ Runtime::FunctionId function_id = (mode == ResultMode::kDontNegateResult)
+ ? Runtime::kStringEqual
+ : Runtime::kStringNotEqual;
+ TailCallRuntime(function_id, context, lhs, rhs);
+ }
- Bind(&if_equal);
- Return(BooleanConstant(mode == ResultMode::kDontNegateResult));
+ Bind(&if_equal);
+ Return(BooleanConstant(mode == ResultMode::kDontNegateResult));
- Bind(&if_notequal);
- Return(BooleanConstant(mode == ResultMode::kNegateResult));
+ Bind(&if_notequal);
+ Return(BooleanConstant(mode == ResultMode::kNegateResult));
}
void StringBuiltinsAssembler::GenerateStringRelationalComparison(
RelationalComparisonMode mode) {
- Node* lhs = Parameter(0);
- Node* rhs = Parameter(1);
+ Variable var_left(this, MachineRepresentation::kTagged);
+ Variable var_right(this, MachineRepresentation::kTagged);
+ var_left.Bind(Parameter(0));
+ var_right.Bind(Parameter(1));
Node* context = Parameter(2);
+ Variable* input_vars[2] = {&var_left, &var_right};
Label if_less(this), if_equal(this), if_greater(this);
+ Label restart(this, 2, input_vars);
+ Goto(&restart);
+ Bind(&restart);
+ Node* lhs = var_left.value();
+ Node* rhs = var_right.value();
// Fast check to see if {lhs} and {rhs} refer to the same String object.
GotoIf(WordEqual(lhs, rhs), &if_equal);
@@ -274,8 +378,10 @@ void StringBuiltinsAssembler::GenerateStringRelationalComparison(
Bind(&if_notbothonebyteseqstrings);
{
- // TODO(bmeurer): Add fast case support for flattened cons strings;
- // also add support for two byte string relational comparisons.
+ // Try to unwrap indirect strings, restart the above attempt on success.
+ MaybeDerefIndirectStrings(&var_left, lhs_instance_type, &var_right,
+ rhs_instance_type, &restart);
+ // TODO(bmeurer): Add support for two byte string relational comparisons.
switch (mode) {
case RelationalComparisonMode::kLessThan:
TailCallRuntime(Runtime::kStringLessThan, context, lhs, rhs);
@@ -364,8 +470,7 @@ TF_BUILTIN(StringCharAt, CodeStubAssembler) {
Node* position = Parameter(1);
// Load the character code at the {position} from the {receiver}.
- Node* code = StringCharCodeAt(receiver, position,
- CodeStubAssembler::INTPTR_PARAMETERS);
+ Node* code = StringCharCodeAt(receiver, position, INTPTR_PARAMETERS);
// And return the single character string with only that {code}
Node* result = StringFromCharCode(code);
@@ -377,8 +482,7 @@ TF_BUILTIN(StringCharCodeAt, CodeStubAssembler) {
Node* position = Parameter(1);
// Load the character code at the {position} from the {receiver}.
- Node* code = StringCharCodeAt(receiver, position,
- CodeStubAssembler::INTPTR_PARAMETERS);
+ Node* code = StringCharCodeAt(receiver, position, INTPTR_PARAMETERS);
// And return it as TaggedSigned value.
// TODO(turbofan): Allow builtins to return values untagged.
@@ -394,7 +498,7 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount);
Node* context = Parameter(BuiltinDescriptor::kContext);
- CodeStubArguments arguments(this, argc);
+ CodeStubArguments arguments(this, ChangeInt32ToIntPtr(argc));
// From now on use word-size argc value.
argc = arguments.GetLength();
@@ -606,7 +710,7 @@ TF_BUILTIN(StringPrototypeCharAt, CodeStubAssembler) {
Label return_emptystring(this, Label::kDeferred);
position =
ToInteger(context, position, CodeStubAssembler::kTruncateMinusZero);
- GotoUnless(TaggedIsSmi(position), &return_emptystring);
+ GotoIfNot(TaggedIsSmi(position), &return_emptystring);
// Determine the actual length of the {receiver} String.
Node* receiver_length = LoadObjectField(receiver, String::kLengthOffset);
@@ -645,7 +749,7 @@ TF_BUILTIN(StringPrototypeCharCodeAt, CodeStubAssembler) {
Label return_nan(this, Label::kDeferred);
position =
ToInteger(context, position, CodeStubAssembler::kTruncateMinusZero);
- GotoUnless(TaggedIsSmi(position), &return_nan);
+ GotoIfNot(TaggedIsSmi(position), &return_nan);
// Determine the actual length of the {receiver} String.
Node* receiver_length = LoadObjectField(receiver, String::kLengthOffset);
@@ -763,7 +867,146 @@ BUILTIN(StringPrototypeIncludes) {
return *isolate->factory()->ToBoolean(index_in_str != -1);
}
-// ES6 #sec-string.prototype.indexof
+void StringBuiltinsAssembler::StringIndexOf(
+ Node* receiver, Node* instance_type, Node* search_string,
+ Node* search_string_instance_type, Node* position,
+ std::function<void(Node*)> f_return) {
+ CSA_ASSERT(this, IsString(receiver));
+ CSA_ASSERT(this, IsString(search_string));
+ CSA_ASSERT(this, TaggedIsSmi(position));
+
+ Label zero_length_needle(this),
+ call_runtime_unchecked(this, Label::kDeferred), return_minus_1(this),
+ check_search_string(this), continue_fast_path(this);
+
+ Node* const int_zero = IntPtrConstant(0);
+ Variable var_needle_byte(this, MachineType::PointerRepresentation(),
+ int_zero);
+ Variable var_string_addr(this, MachineType::PointerRepresentation(),
+ int_zero);
+
+ Node* needle_length = SmiUntag(LoadStringLength(search_string));
+ // Use faster/complex runtime fallback for long search strings.
+ GotoIf(IntPtrLessThan(IntPtrConstant(1), needle_length),
+ &call_runtime_unchecked);
+ Node* string_length = SmiUntag(LoadStringLength(receiver));
+ Node* start_position = IntPtrMax(SmiUntag(position), int_zero);
+
+ GotoIf(IntPtrEqual(int_zero, needle_length), &zero_length_needle);
+ // Check that the needle fits in the start position.
+ GotoIfNot(IntPtrLessThanOrEqual(needle_length,
+ IntPtrSub(string_length, start_position)),
+ &return_minus_1);
+
+ // Load the string address.
+ {
+ Label if_onebyte_sequential(this);
+ Label if_onebyte_external(this, Label::kDeferred);
+
+ // Only support one-byte strings on the fast path.
+ DispatchOnStringInstanceType(instance_type, &if_onebyte_sequential,
+ &if_onebyte_external, &call_runtime_unchecked);
+
+ Bind(&if_onebyte_sequential);
+ {
+ var_string_addr.Bind(
+ OneByteCharAddress(BitcastTaggedToWord(receiver), start_position));
+ Goto(&check_search_string);
+ }
+
+ Bind(&if_onebyte_external);
+ {
+ Node* const unpacked = TryDerefExternalString(receiver, instance_type,
+ &call_runtime_unchecked);
+ var_string_addr.Bind(OneByteCharAddress(unpacked, start_position));
+ Goto(&check_search_string);
+ }
+ }
+
+ // Load the needle character.
+ Bind(&check_search_string);
+ {
+ Label if_onebyte_sequential(this);
+ Label if_onebyte_external(this, Label::kDeferred);
+
+ DispatchOnStringInstanceType(search_string_instance_type,
+ &if_onebyte_sequential, &if_onebyte_external,
+ &call_runtime_unchecked);
+
+ Bind(&if_onebyte_sequential);
+ {
+ var_needle_byte.Bind(
+ ChangeInt32ToIntPtr(LoadOneByteChar(search_string, int_zero)));
+ Goto(&continue_fast_path);
+ }
+
+ Bind(&if_onebyte_external);
+ {
+ Node* const unpacked = TryDerefExternalString(
+ search_string, search_string_instance_type, &call_runtime_unchecked);
+ var_needle_byte.Bind(
+ ChangeInt32ToIntPtr(LoadOneByteChar(unpacked, int_zero)));
+ Goto(&continue_fast_path);
+ }
+ }
+
+ Bind(&continue_fast_path);
+ {
+ Node* needle_byte = var_needle_byte.value();
+ Node* string_addr = var_string_addr.value();
+ Node* search_length = IntPtrSub(string_length, start_position);
+ // Call out to the highly optimized memchr to perform the actual byte
+ // search.
+ Node* memchr =
+ ExternalConstant(ExternalReference::libc_memchr_function(isolate()));
+ Node* result_address =
+ CallCFunction3(MachineType::Pointer(), MachineType::Pointer(),
+ MachineType::IntPtr(), MachineType::UintPtr(), memchr,
+ string_addr, needle_byte, search_length);
+ GotoIf(WordEqual(result_address, int_zero), &return_minus_1);
+ Node* result_index =
+ IntPtrAdd(IntPtrSub(result_address, string_addr), start_position);
+ f_return(SmiTag(result_index));
+ }
+
+ Bind(&return_minus_1);
+ f_return(SmiConstant(-1));
+
+ Bind(&zero_length_needle);
+ {
+ Comment("0-length search_string");
+ f_return(SmiTag(IntPtrMin(string_length, start_position)));
+ }
+
+ Bind(&call_runtime_unchecked);
+ {
+ // Simplified version of the runtime call where the types of the arguments
+ // are already known due to type checks in this stub.
+ Comment("Call Runtime Unchecked");
+ Node* result = CallRuntime(Runtime::kStringIndexOfUnchecked, SmiConstant(0),
+ receiver, search_string, position);
+ f_return(result);
+ }
+}
+
+// ES6 String.prototype.indexOf(searchString [, position])
+// #sec-string.prototype.indexof
+// Unchecked helper for builtins lowering.
+TF_BUILTIN(StringIndexOf, StringBuiltinsAssembler) {
+ Node* receiver = Parameter(0);
+ Node* search_string = Parameter(1);
+ Node* position = Parameter(2);
+
+ Node* instance_type = LoadInstanceType(receiver);
+ Node* search_string_instance_type = LoadInstanceType(search_string);
+
+ StringIndexOf(receiver, instance_type, search_string,
+ search_string_instance_type, position,
+ [this](Node* result) { this->Return(result); });
+}
+
+// ES6 String.prototype.indexOf(searchString [, position])
+// #sec-string.prototype.indexof
TF_BUILTIN(StringPrototypeIndexOf, StringBuiltinsAssembler) {
Variable search_string(this, MachineRepresentation::kTagged),
position(this, MachineRepresentation::kTagged);
@@ -774,7 +1017,7 @@ TF_BUILTIN(StringPrototypeIndexOf, StringBuiltinsAssembler) {
Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount);
Node* context = Parameter(BuiltinDescriptor::kContext);
- CodeStubArguments arguments(this, argc);
+ CodeStubArguments arguments(this, ChangeInt32ToIntPtr(argc));
Node* receiver = arguments.GetReceiver();
// From now on use word-size argc value.
argc = arguments.GetLength();
@@ -802,74 +1045,27 @@ TF_BUILTIN(StringPrototypeIndexOf, StringBuiltinsAssembler) {
Comment("2 Argument case");
search_string.Bind(arguments.AtIndex(0));
position.Bind(arguments.AtIndex(1));
- GotoUnless(TaggedIsSmi(position.value()), &call_runtime);
- position.Bind(SmiMax(position.value(), SmiConstant(0)));
+ GotoIfNot(TaggedIsSmi(position.value()), &call_runtime);
Goto(&fast_path);
}
Bind(&fast_path);
{
Comment("Fast Path");
- Label zero_length_needle(this);
GotoIf(TaggedIsSmi(receiver), &call_runtime);
Node* needle = search_string.value();
GotoIf(TaggedIsSmi(needle), &call_runtime);
+
Node* instance_type = LoadInstanceType(receiver);
- GotoUnless(IsStringInstanceType(instance_type), &call_runtime);
+ GotoIfNot(IsStringInstanceType(instance_type), &call_runtime);
Node* needle_instance_type = LoadInstanceType(needle);
- GotoUnless(IsStringInstanceType(needle_instance_type), &call_runtime);
-
- // At this point we know that the receiver and the needle are Strings and
- // that position is a Smi.
-
- Node* needle_length = SmiUntag(LoadStringLength(needle));
- // Use possibly faster runtime fallback for long search strings.
- GotoIf(IntPtrLessThan(IntPtrConstant(1), needle_length),
- &call_runtime_unchecked);
- Node* string_length = SmiUntag(LoadStringLength(receiver));
- Node* start_position = SmiUntag(position.value());
-
- GotoIf(IntPtrEqual(IntPtrConstant(0), needle_length), &zero_length_needle);
- // Check that the needle fits in the start position.
- GotoUnless(IntPtrLessThanOrEqual(needle_length,
- IntPtrSub(string_length, start_position)),
- &return_minus_1);
- // Only support one-byte strings on the fast path.
- Label check_needle(this), continue_fast_path(this);
- BranchIfSimpleOneByteStringInstanceType(instance_type, &check_needle,
- &call_runtime_unchecked);
- Bind(&check_needle);
- BranchIfSimpleOneByteStringInstanceType(
- needle_instance_type, &continue_fast_path, &call_runtime_unchecked);
- Bind(&continue_fast_path);
- {
- Node* needle_byte =
- ChangeInt32ToIntPtr(LoadOneByteChar(needle, IntPtrConstant(0)));
- Node* start_address = OneByteCharAddress(receiver, start_position);
- Node* search_length = IntPtrSub(string_length, start_position);
- // Call out to the highly optimized memchr to perform the actual byte
- // search.
- Node* memchr =
- ExternalConstant(ExternalReference::libc_memchr_function(isolate()));
- Node* result_address =
- CallCFunction3(MachineType::Pointer(), MachineType::Pointer(),
- MachineType::IntPtr(), MachineType::UintPtr(), memchr,
- start_address, needle_byte, search_length);
- GotoIf(WordEqual(result_address, IntPtrConstant(0)), &return_minus_1);
- Node* result_index =
- IntPtrAdd(IntPtrSub(result_address, start_address), start_position);
- arguments.PopAndReturn(SmiTag(result_index));
- }
- Bind(&zero_length_needle);
- {
- Comment("0-length needle");
- arguments.PopAndReturn(SmiTag(IntPtrMin(string_length, start_position)));
- }
- }
+ GotoIfNot(IsStringInstanceType(needle_instance_type), &call_runtime);
- Bind(&return_minus_1);
- { arguments.PopAndReturn(SmiConstant(-1)); }
+ StringIndexOf(
+ receiver, instance_type, needle, needle_instance_type, position.value(),
+ [&arguments](Node* result) { arguments.PopAndReturn(result); });
+ }
Bind(&call_runtime);
{
@@ -878,17 +1074,6 @@ TF_BUILTIN(StringPrototypeIndexOf, StringBuiltinsAssembler) {
search_string.value(), position.value());
arguments.PopAndReturn(result);
}
-
- Bind(&call_runtime_unchecked);
- {
- // Simplified version of the runtime call where the types of the arguments
- // are already known due to type checks in this stub.
- Comment("Call Runtime Unchecked");
- Node* result =
- CallRuntime(Runtime::kStringIndexOfUnchecked, context, receiver,
- search_string.value(), position.value());
- arguments.PopAndReturn(result);
- }
}
// ES6 section 21.1.3.9
@@ -985,6 +1170,358 @@ BUILTIN(StringPrototypeNormalize) {
return *string;
}
+compiler::Node* StringBuiltinsAssembler::IsNullOrUndefined(Node* const value) {
+ return Word32Or(IsUndefined(value), IsNull(value));
+}
+
+void StringBuiltinsAssembler::RequireObjectCoercible(Node* const context,
+ Node* const value,
+ const char* method_name) {
+ Label out(this), throw_exception(this, Label::kDeferred);
+ Branch(IsNullOrUndefined(value), &throw_exception, &out);
+
+ Bind(&throw_exception);
+ TailCallRuntime(
+ Runtime::kThrowCalledOnNullOrUndefined, context,
+ HeapConstant(factory()->NewStringFromAsciiChecked(method_name, TENURED)));
+
+ Bind(&out);
+}
+
+void StringBuiltinsAssembler::MaybeCallFunctionAtSymbol(
+ Node* const context, Node* const object, Handle<Symbol> symbol,
+ const NodeFunction0& regexp_call, const NodeFunction1& generic_call) {
+ Label out(this);
+
+ // Smis definitely don't have an attached symbol.
+ GotoIf(TaggedIsSmi(object), &out);
+
+ Node* const object_map = LoadMap(object);
+
+ // Skip the slow lookup for Strings.
+ {
+ Label next(this);
+
+ GotoIfNot(IsStringInstanceType(LoadMapInstanceType(object_map)), &next);
+
+ Node* const native_context = LoadNativeContext(context);
+ Node* const initial_proto_initial_map = LoadContextElement(
+ native_context, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX);
+
+ Node* const string_fun =
+ LoadContextElement(native_context, Context::STRING_FUNCTION_INDEX);
+ Node* const initial_map =
+ LoadObjectField(string_fun, JSFunction::kPrototypeOrInitialMapOffset);
+ Node* const proto_map = LoadMap(LoadMapPrototype(initial_map));
+
+ Branch(WordEqual(proto_map, initial_proto_initial_map), &out, &next);
+
+ Bind(&next);
+ }
+
+ // Take the fast path for RegExps.
+ {
+ Label stub_call(this), slow_lookup(this);
+
+ RegExpBuiltinsAssembler regexp_asm(state());
+ regexp_asm.BranchIfFastRegExp(context, object, object_map, &stub_call,
+ &slow_lookup);
+
+ Bind(&stub_call);
+ Return(regexp_call());
+
+ Bind(&slow_lookup);
+ }
+
+ GotoIf(IsNullOrUndefined(object), &out);
+
+ // Fall back to a slow lookup of {object[symbol]}.
+
+ Callable getproperty_callable = CodeFactory::GetProperty(isolate());
+ Node* const key = HeapConstant(symbol);
+ Node* const maybe_func = CallStub(getproperty_callable, context, object, key);
+
+ GotoIf(IsUndefined(maybe_func), &out);
+
+ // Attempt to call the function.
+
+ Node* const result = generic_call(maybe_func);
+ Return(result);
+
+ Bind(&out);
+}
+
+// ES6 section 21.1.3.16 String.prototype.replace ( search, replace )
+TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
+ Label out(this);
+
+ Node* const receiver = Parameter(0);
+ Node* const search = Parameter(1);
+ Node* const replace = Parameter(2);
+ Node* const context = Parameter(5);
+
+ Node* const smi_zero = SmiConstant(0);
+
+ RequireObjectCoercible(context, receiver, "String.prototype.replace");
+
+ // Redirect to replacer method if {search[@@replace]} is not undefined.
+
+ MaybeCallFunctionAtSymbol(
+ context, search, isolate()->factory()->replace_symbol(),
+ [=]() {
+ Callable tostring_callable = CodeFactory::ToString(isolate());
+ Node* const subject_string =
+ CallStub(tostring_callable, context, receiver);
+
+ Callable replace_callable = CodeFactory::RegExpReplace(isolate());
+ return CallStub(replace_callable, context, search, subject_string,
+ replace);
+ },
+ [=](Node* fn) {
+ Callable call_callable = CodeFactory::Call(isolate());
+ return CallJS(call_callable, context, fn, search, receiver, replace);
+ });
+
+ // Convert {receiver} and {search} to strings.
+
+ Callable tostring_callable = CodeFactory::ToString(isolate());
+ Callable indexof_callable = CodeFactory::StringIndexOf(isolate());
+
+ Node* const subject_string = CallStub(tostring_callable, context, receiver);
+ Node* const search_string = CallStub(tostring_callable, context, search);
+
+ Node* const subject_length = LoadStringLength(subject_string);
+ Node* const search_length = LoadStringLength(search_string);
+
+ // Fast-path single-char {search}, long {receiver}, and simple string
+ // {replace}.
+ {
+ Label next(this);
+
+ GotoIfNot(SmiEqual(search_length, SmiConstant(1)), &next);
+ GotoIfNot(SmiGreaterThan(subject_length, SmiConstant(0xFF)), &next);
+ GotoIf(TaggedIsSmi(replace), &next);
+ GotoIfNot(IsString(replace), &next);
+
+ Node* const dollar_string = HeapConstant(
+ isolate()->factory()->LookupSingleCharacterStringFromCode('$'));
+ Node* const dollar_ix =
+ CallStub(indexof_callable, context, replace, dollar_string, smi_zero);
+ GotoIfNot(SmiIsNegative(dollar_ix), &next);
+
+ // Searching by traversing a cons string tree and replace with cons of
+ // slices works only when the replaced string is a single character, being
+ // replaced by a simple string and only pays off for long strings.
+ // TODO(jgruber): Reevaluate if this is still beneficial.
+ // TODO(jgruber): TailCallRuntime when it correctly handles adapter frames.
+ Return(CallRuntime(Runtime::kStringReplaceOneCharWithString, context,
+ subject_string, search_string, replace));
+
+ Bind(&next);
+ }
+
+ // TODO(jgruber): Extend StringIndexOf to handle two-byte strings and
+ // longer substrings - we can handle up to 8 chars (one-byte) / 4 chars
+ // (2-byte).
+
+ Node* const match_start_index = CallStub(
+ indexof_callable, context, subject_string, search_string, smi_zero);
+ CSA_ASSERT(this, TaggedIsSmi(match_start_index));
+
+ // Early exit if no match found.
+ {
+ Label next(this), return_subject(this);
+
+ GotoIfNot(SmiIsNegative(match_start_index), &next);
+
+ // The spec requires to perform ToString(replace) if the {replace} is not
+ // callable even if we are going to exit here.
+ // Since ToString() being applied to Smi does not have side effects for
+ // numbers we can skip it.
+ GotoIf(TaggedIsSmi(replace), &return_subject);
+ GotoIf(IsCallableMap(LoadMap(replace)), &return_subject);
+
+ // TODO(jgruber): Could introduce ToStringSideeffectsStub which only
+ // performs observable parts of ToString.
+ CallStub(tostring_callable, context, replace);
+ Goto(&return_subject);
+
+ Bind(&return_subject);
+ Return(subject_string);
+
+ Bind(&next);
+ }
+
+ Node* const match_end_index = SmiAdd(match_start_index, search_length);
+
+ Callable substring_callable = CodeFactory::SubString(isolate());
+ Callable stringadd_callable =
+ CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
+
+ Variable var_result(this, MachineRepresentation::kTagged,
+ EmptyStringConstant());
+
+ // Compute the prefix.
+ {
+ Label next(this);
+
+ GotoIf(SmiEqual(match_start_index, smi_zero), &next);
+ Node* const prefix = CallStub(substring_callable, context, subject_string,
+ smi_zero, match_start_index);
+ var_result.Bind(prefix);
+
+ Goto(&next);
+ Bind(&next);
+ }
+
+ // Compute the string to replace with.
+
+ Label if_iscallablereplace(this), if_notcallablereplace(this);
+ GotoIf(TaggedIsSmi(replace), &if_notcallablereplace);
+ Branch(IsCallableMap(LoadMap(replace)), &if_iscallablereplace,
+ &if_notcallablereplace);
+
+ Bind(&if_iscallablereplace);
+ {
+ Callable call_callable = CodeFactory::Call(isolate());
+ Node* const replacement =
+ CallJS(call_callable, context, replace, UndefinedConstant(),
+ search_string, match_start_index, subject_string);
+ Node* const replacement_string =
+ CallStub(tostring_callable, context, replacement);
+ var_result.Bind(CallStub(stringadd_callable, context, var_result.value(),
+ replacement_string));
+ Goto(&out);
+ }
+
+ Bind(&if_notcallablereplace);
+ {
+ Node* const replace_string = CallStub(tostring_callable, context, replace);
+
+ // TODO(jgruber): Simplified GetSubstitution implementation in CSA.
+ Node* const matched = CallStub(substring_callable, context, subject_string,
+ match_start_index, match_end_index);
+ Node* const replacement_string =
+ CallRuntime(Runtime::kGetSubstitution, context, matched, subject_string,
+ match_start_index, replace_string);
+ var_result.Bind(CallStub(stringadd_callable, context, var_result.value(),
+ replacement_string));
+ Goto(&out);
+ }
+
+ Bind(&out);
+ {
+ Node* const suffix = CallStub(substring_callable, context, subject_string,
+ match_end_index, subject_length);
+ Node* const result =
+ CallStub(stringadd_callable, context, var_result.value(), suffix);
+ Return(result);
+ }
+}
+
+// ES6 section 21.1.3.19 String.prototype.split ( separator, limit )
+TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
+ Label out(this);
+
+ Node* const receiver = Parameter(0);
+ Node* const separator = Parameter(1);
+ Node* const limit = Parameter(2);
+ Node* const context = Parameter(5);
+
+ Node* const smi_zero = SmiConstant(0);
+
+ RequireObjectCoercible(context, receiver, "String.prototype.split");
+
+ // Redirect to splitter method if {separator[@@split]} is not undefined.
+
+ MaybeCallFunctionAtSymbol(
+ context, separator, isolate()->factory()->split_symbol(),
+ [=]() {
+ Callable tostring_callable = CodeFactory::ToString(isolate());
+ Node* const subject_string =
+ CallStub(tostring_callable, context, receiver);
+
+ Callable split_callable = CodeFactory::RegExpSplit(isolate());
+ return CallStub(split_callable, context, separator, subject_string,
+ limit);
+ },
+ [=](Node* fn) {
+ Callable call_callable = CodeFactory::Call(isolate());
+ return CallJS(call_callable, context, fn, separator, receiver, limit);
+ });
+
+ // String and integer conversions.
+ // TODO(jgruber): The old implementation used Uint32Max instead of SmiMax -
+ // but AFAIK there should not be a difference since arrays are capped at Smi
+ // lengths.
+
+ Callable tostring_callable = CodeFactory::ToString(isolate());
+ Node* const subject_string = CallStub(tostring_callable, context, receiver);
+ Node* const limit_number =
+ Select(IsUndefined(limit), [=]() { return SmiConstant(Smi::kMaxValue); },
+ [=]() { return ToUint32(context, limit); },
+ MachineRepresentation::kTagged);
+ Node* const separator_string =
+ CallStub(tostring_callable, context, separator);
+
+ // Shortcut for {limit} == 0.
+ {
+ Label next(this);
+ GotoIfNot(SmiEqual(limit_number, smi_zero), &next);
+
+ const ElementsKind kind = FAST_ELEMENTS;
+ Node* const native_context = LoadNativeContext(context);
+ Node* const array_map = LoadJSArrayElementsMap(kind, native_context);
+
+ Node* const length = smi_zero;
+ Node* const capacity = IntPtrConstant(0);
+ Node* const result = AllocateJSArray(kind, array_map, capacity, length);
+
+ Return(result);
+
+ Bind(&next);
+ }
+
+ // ECMA-262 says that if {separator} is undefined, the result should
+ // be an array of size 1 containing the entire string.
+ {
+ Label next(this);
+ GotoIfNot(IsUndefined(separator), &next);
+
+ const ElementsKind kind = FAST_ELEMENTS;
+ Node* const native_context = LoadNativeContext(context);
+ Node* const array_map = LoadJSArrayElementsMap(kind, native_context);
+
+ Node* const length = SmiConstant(1);
+ Node* const capacity = IntPtrConstant(1);
+ Node* const result = AllocateJSArray(kind, array_map, capacity, length);
+
+ Node* const fixed_array = LoadElements(result);
+ StoreFixedArrayElement(fixed_array, 0, subject_string);
+
+ Return(result);
+
+ Bind(&next);
+ }
+
+ // If the separator string is empty then return the elements in the subject.
+ {
+ Label next(this);
+ GotoIfNot(SmiEqual(LoadStringLength(separator_string), smi_zero), &next);
+
+ Node* const result = CallRuntime(Runtime::kStringToArray, context,
+ subject_string, limit_number);
+ Return(result);
+
+ Bind(&next);
+ }
+
+ Node* const result =
+ CallRuntime(Runtime::kStringSplit, context, subject_string,
+ separator_string, limit_number);
+ Return(result);
+}
+
// ES6 section B.2.3.1 String.prototype.substr ( start, length )
TF_BUILTIN(StringPrototypeSubstr, CodeStubAssembler) {
Label out(this), handle_length(this);
@@ -1067,7 +1604,7 @@ TF_BUILTIN(StringPrototypeSubstr, CodeStubAssembler) {
Node* const minimal_length = SmiSub(string_length, var_start.value());
var_length.Bind(SmiMin(positive_length, minimal_length));
- GotoUnless(SmiLessThanOrEqual(var_length.value(), zero), &out);
+ GotoIfNot(SmiLessThanOrEqual(var_length.value(), zero), &out);
Return(EmptyStringConstant());
}
@@ -1091,7 +1628,7 @@ TF_BUILTIN(StringPrototypeSubstr, CodeStubAssembler) {
Bind(&if_ispositive);
{
var_length.Bind(SmiSub(string_length, var_start.value()));
- GotoUnless(SmiLessThanOrEqual(var_length.value(), zero), &out);
+ GotoIfNot(SmiLessThanOrEqual(var_length.value(), zero), &out);
Return(EmptyStringConstant());
}
}
@@ -1331,7 +1868,7 @@ compiler::Node* StringBuiltinsAssembler::LoadSurrogatePairAt(
&return_result);
Node* next_index = SmiAdd(index, SmiConstant(Smi::FromInt(1)));
- GotoUnless(SmiLessThan(next_index, length), &return_result);
+ GotoIfNot(SmiLessThan(next_index, length), &return_result);
var_trail.Bind(StringCharCodeAt(string, next_index));
Branch(Word32Equal(Word32And(var_trail.value(), Int32Constant(0xFC00)),
Int32Constant(0xDC00)),
@@ -1393,9 +1930,9 @@ TF_BUILTIN(StringIteratorPrototypeNext, StringBuiltinsAssembler) {
Node* context = Parameter(3);
GotoIf(TaggedIsSmi(iterator), &throw_bad_receiver);
- GotoUnless(Word32Equal(LoadInstanceType(iterator),
- Int32Constant(JS_STRING_ITERATOR_TYPE)),
- &throw_bad_receiver);
+ GotoIfNot(Word32Equal(LoadInstanceType(iterator),
+ Int32Constant(JS_STRING_ITERATOR_TYPE)),
+ &throw_bad_receiver);
Node* string = LoadObjectField(iterator, JSStringIterator::kStringOffset);
Node* position =
@@ -1438,13 +1975,204 @@ TF_BUILTIN(StringIteratorPrototypeNext, StringBuiltinsAssembler) {
Bind(&throw_bad_receiver);
{
// The {receiver} is not a valid JSGeneratorObject.
- Node* result =
- CallRuntime(Runtime::kThrowIncompatibleMethodReceiver, context,
- HeapConstant(factory()->NewStringFromAsciiChecked(
- "String Iterator.prototype.next", TENURED)),
- iterator);
- Return(result); // Never reached.
+ CallRuntime(Runtime::kThrowIncompatibleMethodReceiver, context,
+ HeapConstant(factory()->NewStringFromAsciiChecked(
+ "String Iterator.prototype.next", TENURED)),
+ iterator);
+ Unreachable();
+ }
+}
+
+namespace {
+
+inline bool ToUpperOverflows(uc32 character) {
+ // y with umlauts and the micro sign are the only characters that stop
+ // fitting into one-byte when converting to uppercase.
+ static const uc32 yuml_code = 0xff;
+ static const uc32 micro_code = 0xb5;
+ return (character == yuml_code || character == micro_code);
+}
+
+template <class Converter>
+MUST_USE_RESULT static Object* ConvertCaseHelper(
+ Isolate* isolate, String* string, SeqString* result, int result_length,
+ unibrow::Mapping<Converter, 128>* mapping) {
+ DisallowHeapAllocation no_gc;
+ // We try this twice, once with the assumption that the result is no longer
+ // than the input and, if that assumption breaks, again with the exact
+ // length. This may not be pretty, but it is nicer than what was here before
+ // and I hereby claim my vaffel-is.
+ //
+ // NOTE: This assumes that the upper/lower case of an ASCII
+ // character is also ASCII. This is currently the case, but it
+ // might break in the future if we implement more context and locale
+ // dependent upper/lower conversions.
+ bool has_changed_character = false;
+
+ // Convert all characters to upper case, assuming that they will fit
+ // in the buffer
+ StringCharacterStream stream(string);
+ unibrow::uchar chars[Converter::kMaxWidth];
+ // We can assume that the string is not empty
+ uc32 current = stream.GetNext();
+ bool ignore_overflow = Converter::kIsToLower || result->IsSeqTwoByteString();
+ for (int i = 0; i < result_length;) {
+ bool has_next = stream.HasMore();
+ uc32 next = has_next ? stream.GetNext() : 0;
+ int char_length = mapping->get(current, next, chars);
+ if (char_length == 0) {
+ // The case conversion of this character is the character itself.
+ result->Set(i, current);
+ i++;
+ } else if (char_length == 1 &&
+ (ignore_overflow || !ToUpperOverflows(current))) {
+ // Common case: converting the letter resulted in one character.
+ DCHECK(static_cast<uc32>(chars[0]) != current);
+ result->Set(i, chars[0]);
+ has_changed_character = true;
+ i++;
+ } else if (result_length == string->length()) {
+ bool overflows = ToUpperOverflows(current);
+ // We've assumed that the result would be as long as the
+ // input but here is a character that converts to several
+ // characters. No matter, we calculate the exact length
+ // of the result and try the whole thing again.
+ //
+ // Note that this leaves room for optimization. We could just
+ // memcpy what we already have to the result string. Also,
+ // the result string is the last object allocated we could
+ // "realloc" it and probably, in the vast majority of cases,
+ // extend the existing string to be able to hold the full
+ // result.
+ int next_length = 0;
+ if (has_next) {
+ next_length = mapping->get(next, 0, chars);
+ if (next_length == 0) next_length = 1;
+ }
+ int current_length = i + char_length + next_length;
+ while (stream.HasMore()) {
+ current = stream.GetNext();
+ overflows |= ToUpperOverflows(current);
+ // NOTE: we use 0 as the next character here because, while
+ // the next character may affect what a character converts to,
+ // it does not in any case affect the length of what it convert
+ // to.
+ int char_length = mapping->get(current, 0, chars);
+ if (char_length == 0) char_length = 1;
+ current_length += char_length;
+ if (current_length > String::kMaxLength) {
+ AllowHeapAllocation allocate_error_and_return;
+ THROW_NEW_ERROR_RETURN_FAILURE(isolate,
+ NewInvalidStringLengthError());
+ }
+ }
+ // Try again with the real length. Return signed if we need
+ // to allocate a two-byte string for to uppercase.
+ return (overflows && !ignore_overflow) ? Smi::FromInt(-current_length)
+ : Smi::FromInt(current_length);
+ } else {
+ for (int j = 0; j < char_length; j++) {
+ result->Set(i, chars[j]);
+ i++;
+ }
+ has_changed_character = true;
+ }
+ current = next;
+ }
+ if (has_changed_character) {
+ return result;
+ } else {
+ // If we didn't actually change anything in doing the conversion
+ // we simple return the result and let the converted string
+ // become garbage; there is no reason to keep two identical strings
+ // alive.
+ return string;
+ }
+}
+
+template <class Converter>
+MUST_USE_RESULT static Object* ConvertCase(
+ Handle<String> s, Isolate* isolate,
+ unibrow::Mapping<Converter, 128>* mapping) {
+ s = String::Flatten(s);
+ int length = s->length();
+ // Assume that the string is not empty; we need this assumption later
+ if (length == 0) return *s;
+
+ // Simpler handling of ASCII strings.
+ //
+ // NOTE: This assumes that the upper/lower case of an ASCII
+ // character is also ASCII. This is currently the case, but it
+ // might break in the future if we implement more context and locale
+ // dependent upper/lower conversions.
+ if (s->IsOneByteRepresentationUnderneath()) {
+ // Same length as input.
+ Handle<SeqOneByteString> result =
+ isolate->factory()->NewRawOneByteString(length).ToHandleChecked();
+ DisallowHeapAllocation no_gc;
+ String::FlatContent flat_content = s->GetFlatContent();
+ DCHECK(flat_content.IsFlat());
+ bool has_changed_character = false;
+ int index_to_first_unprocessed = FastAsciiConvert<Converter::kIsToLower>(
+ reinterpret_cast<char*>(result->GetChars()),
+ reinterpret_cast<const char*>(flat_content.ToOneByteVector().start()),
+ length, &has_changed_character);
+ // If not ASCII, we discard the result and take the 2 byte path.
+ if (index_to_first_unprocessed == length)
+ return has_changed_character ? *result : *s;
+ }
+
+ Handle<SeqString> result; // Same length as input.
+ if (s->IsOneByteRepresentation()) {
+ result = isolate->factory()->NewRawOneByteString(length).ToHandleChecked();
+ } else {
+ result = isolate->factory()->NewRawTwoByteString(length).ToHandleChecked();
+ }
+
+ Object* answer = ConvertCaseHelper(isolate, *s, *result, length, mapping);
+ if (answer->IsException(isolate) || answer->IsString()) return answer;
+
+ DCHECK(answer->IsSmi());
+ length = Smi::cast(answer)->value();
+ if (s->IsOneByteRepresentation() && length > 0) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, isolate->factory()->NewRawOneByteString(length));
+ } else {
+ if (length < 0) length = -length;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, isolate->factory()->NewRawTwoByteString(length));
}
+ return ConvertCaseHelper(isolate, *s, *result, length, mapping);
+}
+
+} // namespace
+
+BUILTIN(StringPrototypeToLocaleLowerCase) {
+ HandleScope scope(isolate);
+ TO_THIS_STRING(string, "String.prototype.toLocaleLowerCase");
+ return ConvertCase(string, isolate,
+ isolate->runtime_state()->to_lower_mapping());
+}
+
+BUILTIN(StringPrototypeToLocaleUpperCase) {
+ HandleScope scope(isolate);
+ TO_THIS_STRING(string, "String.prototype.toLocaleUpperCase");
+ return ConvertCase(string, isolate,
+ isolate->runtime_state()->to_upper_mapping());
+}
+
+BUILTIN(StringPrototypeToLowerCase) {
+ HandleScope scope(isolate);
+ TO_THIS_STRING(string, "String.prototype.toLowerCase");
+ return ConvertCase(string, isolate,
+ isolate->runtime_state()->to_lower_mapping());
+}
+
+BUILTIN(StringPrototypeToUpperCase) {
+ HandleScope scope(isolate);
+ TO_THIS_STRING(string, "String.prototype.toUpperCase");
+ return ConvertCase(string, isolate,
+ isolate->runtime_state()->to_upper_mapping());
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-symbol.cc b/deps/v8/src/builtins/builtins-symbol.cc
index 6067edba6d..f57d0bffd9 100644
--- a/deps/v8/src/builtins/builtins-symbol.cc
+++ b/deps/v8/src/builtins/builtins-symbol.cc
@@ -5,6 +5,8 @@
#include "src/builtins/builtins-utils.h"
#include "src/builtins/builtins.h"
#include "src/code-stub-assembler.h"
+#include "src/counters.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-typedarray.cc b/deps/v8/src/builtins/builtins-typedarray.cc
index ab1ebbc69e..9a9ec59d17 100644
--- a/deps/v8/src/builtins/builtins-typedarray.cc
+++ b/deps/v8/src/builtins/builtins-typedarray.cc
@@ -5,10 +5,24 @@
#include "src/builtins/builtins-utils.h"
#include "src/builtins/builtins.h"
#include "src/code-stub-assembler.h"
+#include "src/counters.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
+class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit TypedArrayBuiltinsAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ protected:
+ void GenerateTypedArrayPrototypeGetter(const char* method_name,
+ int object_offset);
+ template <IterationKind kIterationKind>
+ void GenerateTypedArrayPrototypeIterationMethod(const char* method_name);
+};
+
// -----------------------------------------------------------------------------
// ES6 section 22.2 TypedArray Objects
@@ -19,152 +33,204 @@ BUILTIN(TypedArrayPrototypeBuffer) {
return *typed_array->GetBuffer();
}
-namespace {
-
-void Generate_TypedArrayPrototypeGetter(compiler::CodeAssemblerState* state,
- const char* method_name,
- int object_offset) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- CodeStubAssembler assembler(state);
-
- Node* receiver = assembler.Parameter(0);
- Node* context = assembler.Parameter(3);
+void TypedArrayBuiltinsAssembler::GenerateTypedArrayPrototypeGetter(
+ const char* method_name, int object_offset) {
+ Node* receiver = Parameter(0);
+ Node* context = Parameter(3);
// Check if the {receiver} is actually a JSTypedArray.
- Label if_receiverisincompatible(&assembler, Label::kDeferred);
- assembler.GotoIf(assembler.TaggedIsSmi(receiver), &if_receiverisincompatible);
- Node* receiver_instance_type = assembler.LoadInstanceType(receiver);
- assembler.GotoUnless(
- assembler.Word32Equal(receiver_instance_type,
- assembler.Int32Constant(JS_TYPED_ARRAY_TYPE)),
- &if_receiverisincompatible);
+ Label receiver_is_incompatible(this, Label::kDeferred);
+ GotoIf(TaggedIsSmi(receiver), &receiver_is_incompatible);
+ GotoIfNot(HasInstanceType(receiver, JS_TYPED_ARRAY_TYPE),
+ &receiver_is_incompatible);
// Check if the {receiver}'s JSArrayBuffer was neutered.
Node* receiver_buffer =
- assembler.LoadObjectField(receiver, JSTypedArray::kBufferOffset);
- Label if_receiverisneutered(&assembler, Label::kDeferred);
- assembler.GotoIf(assembler.IsDetachedBuffer(receiver_buffer),
- &if_receiverisneutered);
- assembler.Return(assembler.LoadObjectField(receiver, object_offset));
+ LoadObjectField(receiver, JSTypedArray::kBufferOffset);
+ Label if_receiverisneutered(this, Label::kDeferred);
+ GotoIf(IsDetachedBuffer(receiver_buffer), &if_receiverisneutered);
+ Return(LoadObjectField(receiver, object_offset));
- assembler.Bind(&if_receiverisneutered);
+ Bind(&if_receiverisneutered);
{
// The {receiver}s buffer was neutered, default to zero.
- assembler.Return(assembler.SmiConstant(0));
+ Return(SmiConstant(0));
}
- assembler.Bind(&if_receiverisincompatible);
+ Bind(&receiver_is_incompatible);
{
- // The {receiver} is not a valid JSGeneratorObject.
- Node* result = assembler.CallRuntime(
- Runtime::kThrowIncompatibleMethodReceiver, context,
- assembler.HeapConstant(assembler.factory()->NewStringFromAsciiChecked(
- method_name, TENURED)),
- receiver);
- assembler.Return(result); // Never reached.
+ // The {receiver} is not a valid JSTypedArray.
+ CallRuntime(Runtime::kThrowIncompatibleMethodReceiver, context,
+ HeapConstant(
+ factory()->NewStringFromAsciiChecked(method_name, TENURED)),
+ receiver);
+ Unreachable();
}
}
-} // namespace
-
// ES6 section 22.2.3.2 get %TypedArray%.prototype.byteLength
-void Builtins::Generate_TypedArrayPrototypeByteLength(
- compiler::CodeAssemblerState* state) {
- Generate_TypedArrayPrototypeGetter(state,
- "get TypedArray.prototype.byteLength",
- JSTypedArray::kByteLengthOffset);
+TF_BUILTIN(TypedArrayPrototypeByteLength, TypedArrayBuiltinsAssembler) {
+ GenerateTypedArrayPrototypeGetter("get TypedArray.prototype.byteLength",
+ JSTypedArray::kByteLengthOffset);
}
// ES6 section 22.2.3.3 get %TypedArray%.prototype.byteOffset
-void Builtins::Generate_TypedArrayPrototypeByteOffset(
- compiler::CodeAssemblerState* state) {
- Generate_TypedArrayPrototypeGetter(state,
- "get TypedArray.prototype.byteOffset",
- JSTypedArray::kByteOffsetOffset);
+TF_BUILTIN(TypedArrayPrototypeByteOffset, TypedArrayBuiltinsAssembler) {
+ GenerateTypedArrayPrototypeGetter("get TypedArray.prototype.byteOffset",
+ JSTypedArray::kByteOffsetOffset);
}
// ES6 section 22.2.3.18 get %TypedArray%.prototype.length
-void Builtins::Generate_TypedArrayPrototypeLength(
- compiler::CodeAssemblerState* state) {
- Generate_TypedArrayPrototypeGetter(state, "get TypedArray.prototype.length",
- JSTypedArray::kLengthOffset);
+TF_BUILTIN(TypedArrayPrototypeLength, TypedArrayBuiltinsAssembler) {
+ GenerateTypedArrayPrototypeGetter("get TypedArray.prototype.length",
+ JSTypedArray::kLengthOffset);
}
-namespace {
-
template <IterationKind kIterationKind>
-void Generate_TypedArrayPrototypeIterationMethod(
- compiler::CodeAssemblerState* state, const char* method_name) {
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Label Label;
- typedef CodeStubAssembler::Variable Variable;
- CodeStubAssembler assembler(state);
-
- Node* receiver = assembler.Parameter(0);
- Node* context = assembler.Parameter(3);
+void TypedArrayBuiltinsAssembler::GenerateTypedArrayPrototypeIterationMethod(
+ const char* method_name) {
+ Node* receiver = Parameter(0);
+ Node* context = Parameter(3);
- Label throw_bad_receiver(&assembler, Label::kDeferred);
- Label throw_typeerror(&assembler, Label::kDeferred);
+ Label throw_bad_receiver(this, Label::kDeferred);
+ Label throw_typeerror(this, Label::kDeferred);
- assembler.GotoIf(assembler.TaggedIsSmi(receiver), &throw_bad_receiver);
+ GotoIf(TaggedIsSmi(receiver), &throw_bad_receiver);
- Node* map = assembler.LoadMap(receiver);
- Node* instance_type = assembler.LoadMapInstanceType(map);
- assembler.GotoIf(
- assembler.Word32NotEqual(instance_type,
- assembler.Int32Constant(JS_TYPED_ARRAY_TYPE)),
- &throw_bad_receiver);
+ Node* map = LoadMap(receiver);
+ Node* instance_type = LoadMapInstanceType(map);
+ GotoIf(Word32NotEqual(instance_type, Int32Constant(JS_TYPED_ARRAY_TYPE)),
+ &throw_bad_receiver);
// Check if the {receiver}'s JSArrayBuffer was neutered.
Node* receiver_buffer =
- assembler.LoadObjectField(receiver, JSTypedArray::kBufferOffset);
- Label if_receiverisneutered(&assembler, Label::kDeferred);
- assembler.GotoIf(assembler.IsDetachedBuffer(receiver_buffer),
- &if_receiverisneutered);
+ LoadObjectField(receiver, JSTypedArray::kBufferOffset);
+ Label if_receiverisneutered(this, Label::kDeferred);
+ GotoIf(IsDetachedBuffer(receiver_buffer), &if_receiverisneutered);
- assembler.Return(assembler.CreateArrayIterator(receiver, map, instance_type,
- context, kIterationKind));
+ Return(CreateArrayIterator(receiver, map, instance_type, context,
+ kIterationKind));
- Variable var_message(&assembler, MachineRepresentation::kTagged);
- assembler.Bind(&throw_bad_receiver);
- var_message.Bind(
- assembler.SmiConstant(Smi::FromInt(MessageTemplate::kNotTypedArray)));
- assembler.Goto(&throw_typeerror);
+ Variable var_message(this, MachineRepresentation::kTagged);
+ Bind(&throw_bad_receiver);
+ var_message.Bind(SmiConstant(MessageTemplate::kNotTypedArray));
+ Goto(&throw_typeerror);
- assembler.Bind(&if_receiverisneutered);
+ Bind(&if_receiverisneutered);
var_message.Bind(
- assembler.SmiConstant(Smi::FromInt(MessageTemplate::kDetachedOperation)));
- assembler.Goto(&throw_typeerror);
+ SmiConstant(Smi::FromInt(MessageTemplate::kDetachedOperation)));
+ Goto(&throw_typeerror);
- assembler.Bind(&throw_typeerror);
+ Bind(&throw_typeerror);
{
- Node* arg1 = assembler.HeapConstant(
- assembler.isolate()->factory()->NewStringFromAsciiChecked(method_name,
- TENURED));
- Node* result = assembler.CallRuntime(Runtime::kThrowTypeError, context,
- var_message.value(), arg1);
- assembler.Return(result);
+ Node* method_arg = HeapConstant(
+ isolate()->factory()->NewStringFromAsciiChecked(method_name, TENURED));
+ Node* result = CallRuntime(Runtime::kThrowTypeError, context,
+ var_message.value(), method_arg);
+ Return(result);
}
}
-} // namespace
-void Builtins::Generate_TypedArrayPrototypeValues(
- compiler::CodeAssemblerState* state) {
- Generate_TypedArrayPrototypeIterationMethod<IterationKind::kValues>(
- state, "%TypedArray%.prototype.values()");
+TF_BUILTIN(TypedArrayPrototypeValues, TypedArrayBuiltinsAssembler) {
+ GenerateTypedArrayPrototypeIterationMethod<IterationKind::kValues>(
+ "%TypedArray%.prototype.values()");
}
-void Builtins::Generate_TypedArrayPrototypeEntries(
- compiler::CodeAssemblerState* state) {
- Generate_TypedArrayPrototypeIterationMethod<IterationKind::kEntries>(
- state, "%TypedArray%.prototype.entries()");
+TF_BUILTIN(TypedArrayPrototypeEntries, TypedArrayBuiltinsAssembler) {
+ GenerateTypedArrayPrototypeIterationMethod<IterationKind::kEntries>(
+ "%TypedArray%.prototype.entries()");
}
-void Builtins::Generate_TypedArrayPrototypeKeys(
- compiler::CodeAssemblerState* state) {
- Generate_TypedArrayPrototypeIterationMethod<IterationKind::kKeys>(
- state, "%TypedArray%.prototype.keys()");
+TF_BUILTIN(TypedArrayPrototypeKeys, TypedArrayBuiltinsAssembler) {
+ GenerateTypedArrayPrototypeIterationMethod<IterationKind::kKeys>(
+ "%TypedArray%.prototype.keys()");
+}
+
+namespace {
+
+int64_t CapRelativeIndex(Handle<Object> num, int64_t minimum, int64_t maximum) {
+ int64_t relative;
+ if (V8_LIKELY(num->IsSmi())) {
+ relative = Smi::cast(*num)->value();
+ } else {
+ DCHECK(num->IsHeapNumber());
+ double fp = HeapNumber::cast(*num)->value();
+ if (V8_UNLIKELY(!std::isfinite(fp))) {
+ // +Infinity / -Infinity
+ DCHECK(!std::isnan(fp));
+ return fp < 0 ? minimum : maximum;
+ }
+ relative = static_cast<int64_t>(fp);
+ }
+ return relative < 0 ? std::max<int64_t>(relative + maximum, minimum)
+ : std::min<int64_t>(relative, maximum);
+}
+
+} // namespace
+
+BUILTIN(TypedArrayPrototypeCopyWithin) {
+ HandleScope scope(isolate);
+
+ Handle<JSTypedArray> array;
+ const char* method = "%TypedArray%.prototype.copyWithin";
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, array, JSTypedArray::Validate(isolate, args.receiver(), method));
+
+ if (V8_UNLIKELY(array->WasNeutered())) return *array;
+
+ int64_t len = array->length_value();
+ int64_t to = 0;
+ int64_t from = 0;
+ int64_t final = len;
+
+ if (V8_LIKELY(args.length() > 1)) {
+ Handle<Object> num;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, num, Object::ToInteger(isolate, args.at<Object>(1)));
+ to = CapRelativeIndex(num, 0, len);
+
+ if (args.length() > 2) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, num, Object::ToInteger(isolate, args.at<Object>(2)));
+ from = CapRelativeIndex(num, 0, len);
+
+ Handle<Object> end = args.atOrUndefined(isolate, 3);
+ if (!end->IsUndefined(isolate)) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, num,
+ Object::ToInteger(isolate, end));
+ final = CapRelativeIndex(num, 0, len);
+ }
+ }
+ }
+
+ int64_t count = std::min<int64_t>(final - from, len - to);
+ if (count <= 0) return *array;
+
+ // TypedArray buffer may have been transferred/detached during parameter
+ // processing above. Return early in this case, to prevent potential UAF error
+ // TODO(caitp): throw here, as though the full algorithm were performed (the
+ // throw would have come from ecma262/#sec-integerindexedelementget)
+ // (see )
+ if (V8_UNLIKELY(array->WasNeutered())) return *array;
+
+ // Ensure processed indexes are within array bounds
+ DCHECK_GE(from, 0);
+ DCHECK_LT(from, len);
+ DCHECK_GE(to, 0);
+ DCHECK_LT(to, len);
+ DCHECK_GE(len - count, 0);
+
+ Handle<FixedTypedArrayBase> elements(
+ FixedTypedArrayBase::cast(array->elements()));
+ size_t element_size = array->element_size();
+ to = to * element_size;
+ from = from * element_size;
+ count = count * element_size;
+
+ uint8_t* data = static_cast<uint8_t*>(elements->DataPtr());
+ std::memmove(data + to, data + from, count);
+
+ return *array;
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-utils.h b/deps/v8/src/builtins/builtins-utils.h
index be689ac038..7a2424deb6 100644
--- a/deps/v8/src/builtins/builtins-utils.h
+++ b/deps/v8/src/builtins/builtins-utils.h
@@ -8,6 +8,8 @@
#include "src/arguments.h"
#include "src/base/logging.h"
#include "src/builtins/builtins.h"
+#include "src/factory.h"
+#include "src/isolate.h"
namespace v8 {
namespace internal {
@@ -121,13 +123,13 @@ class BuiltinArguments : public Arguments {
public: \
explicit Name##Assembler(compiler::CodeAssemblerState* state) \
: AssemblerBase(state) {} \
- void Generate##NameImpl(); \
+ void Generate##Name##Impl(); \
}; \
void Builtins::Generate_##Name(compiler::CodeAssemblerState* state) { \
Name##Assembler assembler(state); \
- assembler.Generate##NameImpl(); \
+ assembler.Generate##Name##Impl(); \
} \
- void Name##Assembler::Generate##NameImpl()
+ void Name##Assembler::Generate##Name##Impl()
// ----------------------------------------------------------------------------
diff --git a/deps/v8/src/builtins/builtins-wasm.cc b/deps/v8/src/builtins/builtins-wasm.cc
new file mode 100644
index 0000000000..c809cccba2
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-wasm.cc
@@ -0,0 +1,30 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-utils.h"
+#include "src/code-stub-assembler.h"
+#include "src/objects-inl.h"
+#include "src/wasm/wasm-opcodes.h"
+
+namespace v8 {
+namespace internal {
+
+typedef compiler::Node Node;
+
+TF_BUILTIN(WasmStackGuard, CodeStubAssembler) {
+ Node* context = SmiConstant(Smi::kZero);
+ TailCallRuntime(Runtime::kWasmStackGuard, context);
+}
+
+#define DECLARE_ENUM(name) \
+ TF_BUILTIN(ThrowWasm##name, CodeStubAssembler) { \
+ int message_id = wasm::WasmOpcodes::TrapReasonToMessageId(wasm::k##name); \
+ TailCallRuntime(Runtime::kThrowWasmErrorFromTrapIf, \
+ SmiConstant(Smi::kZero), SmiConstant(message_id)); \
+ }
+FOREACH_WASM_TRAPREASON(DECLARE_ENUM)
+#undef DECLARE_ENUM
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc
index 5997eb3550..518075a632 100644
--- a/deps/v8/src/builtins/builtins.cc
+++ b/deps/v8/src/builtins/builtins.cc
@@ -3,13 +3,14 @@
// found in the LICENSE file.
#include "src/builtins/builtins.h"
+#include "src/api.h"
#include "src/code-events.h"
#include "src/compiler/code-assembler.h"
#include "src/ic/ic-state.h"
#include "src/interface-descriptors.h"
#include "src/isolate.h"
#include "src/macro-assembler.h"
-#include "src/objects.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -98,7 +99,8 @@ Code* BuildWithCodeStubAssemblerJS(Isolate* isolate,
Code* BuildWithCodeStubAssemblerCS(Isolate* isolate,
CodeAssemblerGenerator generator,
CallDescriptors::Key interface_descriptor,
- Code::Flags flags, const char* name) {
+ Code::Flags flags, const char* name,
+ int result_size) {
HandleScope scope(isolate);
Zone zone(isolate->allocator(), ZONE_NAME);
// The interface descriptor with given key must be initialized at this point
@@ -106,7 +108,8 @@ Code* BuildWithCodeStubAssemblerCS(Isolate* isolate,
CallInterfaceDescriptor descriptor(isolate, interface_descriptor);
// Ensure descriptor is already initialized.
DCHECK_LE(0, descriptor.GetRegisterParameterCount());
- compiler::CodeAssemblerState state(isolate, &zone, descriptor, flags, name);
+ compiler::CodeAssemblerState state(isolate, &zone, descriptor, flags, name,
+ result_size);
generator(&state);
Handle<Code> code = compiler::CodeAssembler::GenerateCode(&state);
PostBuildProfileAndTracing(isolate, *code, name);
@@ -136,11 +139,11 @@ void Builtins::SetUp(Isolate* isolate, bool create_heap_objects) {
code = BuildWithCodeStubAssemblerJS(isolate, &Generate_##Name, Argc, \
kBuiltinFlags, #Name); \
builtins_[index++] = code;
-#define BUILD_TFS(Name, Kind, Extra, InterfaceDescriptor) \
+#define BUILD_TFS(Name, Kind, Extra, InterfaceDescriptor, result_size) \
{ InterfaceDescriptor##Descriptor descriptor(isolate); } \
code = BuildWithCodeStubAssemblerCS( \
isolate, &Generate_##Name, CallDescriptors::InterfaceDescriptor, \
- Code::ComputeFlags(Code::Kind, Extra), #Name); \
+ Code::ComputeFlags(Code::Kind, Extra), #Name, result_size); \
builtins_[index++] = code;
#define BUILD_ASM(Name) \
code = \
diff --git a/deps/v8/src/builtins/builtins.h b/deps/v8/src/builtins/builtins.h
index a21b272f20..f2b0c4f095 100644
--- a/deps/v8/src/builtins/builtins.h
+++ b/deps/v8/src/builtins/builtins.h
@@ -6,11 +6,15 @@
#define V8_BUILTINS_BUILTINS_H_
#include "src/base/flags.h"
-#include "src/handles.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
+template <typename T>
+class Handle;
+class Isolate;
+
#define CODE_AGE_LIST_WITH_ARG(V, A) \
V(Quadragenarian, A) \
V(Quinquagenarian, A) \
@@ -38,7 +42,7 @@ namespace internal {
// TFJ: Builtin in Turbofan, with JS linkage (callable as Javascript function).
// Args: name, arguments count
// TFS: Builtin in Turbofan, with CodeStub linkage.
-// Args: name, code kind, extra IC state, interface descriptor
+// Args: name, code kind, extra IC state, interface descriptor, return_size
// ASM: Builtin in platform-dependent assembly.
// Args: name
// ASH: Handlers implemented in platform-dependent assembly.
@@ -52,8 +56,9 @@ namespace internal {
\
/* Declared first for dependency reasons */ \
ASM(CompileLazy) \
- TFS(ToObject, BUILTIN, kNoExtraICState, TypeConversion) \
- TFS(FastNewObject, BUILTIN, kNoExtraICState, FastNewObject) \
+ TFS(ToObject, BUILTIN, kNoExtraICState, TypeConversion, 1) \
+ TFS(FastNewObject, BUILTIN, kNoExtraICState, FastNewObject, 1) \
+ TFS(HasProperty, BUILTIN, kNoExtraICState, HasProperty, 1) \
\
/* Calls */ \
ASM(ArgumentsAdaptorTrampoline) \
@@ -74,6 +79,9 @@ namespace internal {
ASM(TailCall_ReceiverIsNullOrUndefined) \
ASM(TailCall_ReceiverIsNotNullOrUndefined) \
ASM(TailCall_ReceiverIsAny) \
+ ASM(CallWithSpread) \
+ ASM(CallForwardVarargs) \
+ ASM(CallFunctionForwardVarargs) \
\
/* Construct */ \
/* ES6 section 9.2.2 [[Construct]] ( argumentsList, newTarget) */ \
@@ -85,34 +93,38 @@ namespace internal {
ASM(ConstructProxy) \
/* ES6 section 7.3.13 Construct (F, [argumentsList], [newTarget]) */ \
ASM(Construct) \
+ ASM(ConstructWithSpread) \
ASM(JSConstructStubApi) \
ASM(JSConstructStubGeneric) \
ASM(JSBuiltinsConstructStub) \
ASM(JSBuiltinsConstructStubForDerived) \
- TFS(FastNewClosure, BUILTIN, kNoExtraICState, FastNewClosure) \
+ TFS(FastNewClosure, BUILTIN, kNoExtraICState, FastNewClosure, 1) \
TFS(FastNewFunctionContextEval, BUILTIN, kNoExtraICState, \
- FastNewFunctionContext) \
+ FastNewFunctionContext, 1) \
TFS(FastNewFunctionContextFunction, BUILTIN, kNoExtraICState, \
- FastNewFunctionContext) \
- TFS(FastCloneRegExp, BUILTIN, kNoExtraICState, FastCloneRegExp) \
+ FastNewFunctionContext, 1) \
+ TFS(FastNewStrictArguments, BUILTIN, kNoExtraICState, FastNewArguments, 1) \
+ TFS(FastNewSloppyArguments, BUILTIN, kNoExtraICState, FastNewArguments, 1) \
+ TFS(FastNewRestParameter, BUILTIN, kNoExtraICState, FastNewArguments, 1) \
+ TFS(FastCloneRegExp, BUILTIN, kNoExtraICState, FastCloneRegExp, 1) \
TFS(FastCloneShallowArrayTrack, BUILTIN, kNoExtraICState, \
- FastCloneShallowArray) \
+ FastCloneShallowArray, 1) \
TFS(FastCloneShallowArrayDontTrack, BUILTIN, kNoExtraICState, \
- FastCloneShallowArray) \
+ FastCloneShallowArray, 1) \
TFS(FastCloneShallowObject0, BUILTIN, kNoExtraICState, \
- FastCloneShallowObject) \
+ FastCloneShallowObject, 1) \
TFS(FastCloneShallowObject1, BUILTIN, kNoExtraICState, \
- FastCloneShallowObject) \
+ FastCloneShallowObject, 1) \
TFS(FastCloneShallowObject2, BUILTIN, kNoExtraICState, \
- FastCloneShallowObject) \
+ FastCloneShallowObject, 1) \
TFS(FastCloneShallowObject3, BUILTIN, kNoExtraICState, \
- FastCloneShallowObject) \
+ FastCloneShallowObject, 1) \
TFS(FastCloneShallowObject4, BUILTIN, kNoExtraICState, \
- FastCloneShallowObject) \
+ FastCloneShallowObject, 1) \
TFS(FastCloneShallowObject5, BUILTIN, kNoExtraICState, \
- FastCloneShallowObject) \
+ FastCloneShallowObject, 1) \
TFS(FastCloneShallowObject6, BUILTIN, kNoExtraICState, \
- FastCloneShallowObject) \
+ FastCloneShallowObject, 1) \
\
/* Apply and entries */ \
ASM(Apply) \
@@ -125,24 +137,27 @@ namespace internal {
ASM(StackCheck) \
\
/* String helpers */ \
- TFS(StringEqual, BUILTIN, kNoExtraICState, Compare) \
- TFS(StringNotEqual, BUILTIN, kNoExtraICState, Compare) \
- TFS(StringLessThan, BUILTIN, kNoExtraICState, Compare) \
- TFS(StringLessThanOrEqual, BUILTIN, kNoExtraICState, Compare) \
- TFS(StringGreaterThan, BUILTIN, kNoExtraICState, Compare) \
- TFS(StringGreaterThanOrEqual, BUILTIN, kNoExtraICState, Compare) \
- TFS(StringCharAt, BUILTIN, kNoExtraICState, StringCharAt) \
- TFS(StringCharCodeAt, BUILTIN, kNoExtraICState, StringCharCodeAt) \
+ TFS(StringCharAt, BUILTIN, kNoExtraICState, StringCharAt, 1) \
+ TFS(StringCharCodeAt, BUILTIN, kNoExtraICState, StringCharCodeAt, 1) \
+ TFS(StringEqual, BUILTIN, kNoExtraICState, Compare, 1) \
+ TFS(StringGreaterThan, BUILTIN, kNoExtraICState, Compare, 1) \
+ TFS(StringGreaterThanOrEqual, BUILTIN, kNoExtraICState, Compare, 1) \
+ TFS(StringIndexOf, BUILTIN, kNoExtraICState, StringIndexOf, 1) \
+ TFS(StringLessThan, BUILTIN, kNoExtraICState, Compare, 1) \
+ TFS(StringLessThanOrEqual, BUILTIN, kNoExtraICState, Compare, 1) \
+ TFS(StringNotEqual, BUILTIN, kNoExtraICState, Compare, 1) \
\
/* Interpreter */ \
ASM(InterpreterEntryTrampoline) \
ASM(InterpreterPushArgsAndCall) \
ASM(InterpreterPushArgsAndCallFunction) \
+ ASM(InterpreterPushArgsAndCallWithFinalSpread) \
ASM(InterpreterPushArgsAndTailCall) \
ASM(InterpreterPushArgsAndTailCallFunction) \
ASM(InterpreterPushArgsAndConstruct) \
ASM(InterpreterPushArgsAndConstructFunction) \
ASM(InterpreterPushArgsAndConstructArray) \
+ ASM(InterpreterPushArgsAndConstructWithFinalSpread) \
ASM(InterpreterEnterBytecodeAdvance) \
ASM(InterpreterEnterBytecodeDispatch) \
ASM(InterpreterOnStackReplacement) \
@@ -175,62 +190,67 @@ namespace internal {
\
/* TurboFan support builtins */ \
TFS(CopyFastSmiOrObjectElements, BUILTIN, kNoExtraICState, \
- CopyFastSmiOrObjectElements) \
- TFS(GrowFastDoubleElements, BUILTIN, kNoExtraICState, GrowArrayElements) \
+ CopyFastSmiOrObjectElements, 1) \
+ TFS(GrowFastDoubleElements, BUILTIN, kNoExtraICState, GrowArrayElements, 1) \
TFS(GrowFastSmiOrObjectElements, BUILTIN, kNoExtraICState, \
- GrowArrayElements) \
+ GrowArrayElements, 1) \
TFS(NewUnmappedArgumentsElements, BUILTIN, kNoExtraICState, \
- NewArgumentsElements) \
+ NewArgumentsElements, 1) \
TFS(NewRestParameterElements, BUILTIN, kNoExtraICState, \
- NewArgumentsElements) \
+ NewArgumentsElements, 1) \
\
/* Debugger */ \
- DBG(FrameDropper_LiveEdit) \
+ DBG(FrameDropperTrampoline) \
+ DBG(HandleDebuggerStatement) \
DBG(Return_DebugBreak) \
DBG(Slot_DebugBreak) \
\
/* Type conversions */ \
- TFS(ToBoolean, BUILTIN, kNoExtraICState, TypeConversion) \
- TFS(OrdinaryToPrimitive_Number, BUILTIN, kNoExtraICState, TypeConversion) \
- TFS(OrdinaryToPrimitive_String, BUILTIN, kNoExtraICState, TypeConversion) \
+ TFS(ToBoolean, BUILTIN, kNoExtraICState, TypeConversion, 1) \
+ TFS(OrdinaryToPrimitive_Number, BUILTIN, kNoExtraICState, TypeConversion, 1) \
+ TFS(OrdinaryToPrimitive_String, BUILTIN, kNoExtraICState, TypeConversion, 1) \
TFS(NonPrimitiveToPrimitive_Default, BUILTIN, kNoExtraICState, \
- TypeConversion) \
+ TypeConversion, 1) \
TFS(NonPrimitiveToPrimitive_Number, BUILTIN, kNoExtraICState, \
- TypeConversion) \
+ TypeConversion, 1) \
TFS(NonPrimitiveToPrimitive_String, BUILTIN, kNoExtraICState, \
- TypeConversion) \
- TFS(StringToNumber, BUILTIN, kNoExtraICState, TypeConversion) \
- TFS(ToName, BUILTIN, kNoExtraICState, TypeConversion) \
- TFS(NonNumberToNumber, BUILTIN, kNoExtraICState, TypeConversion) \
- TFS(ToNumber, BUILTIN, kNoExtraICState, TypeConversion) \
- TFS(ToString, BUILTIN, kNoExtraICState, TypeConversion) \
- TFS(ToInteger, BUILTIN, kNoExtraICState, TypeConversion) \
- TFS(ToLength, BUILTIN, kNoExtraICState, TypeConversion) \
- TFS(Typeof, BUILTIN, kNoExtraICState, Typeof) \
- TFS(GetSuperConstructor, BUILTIN, kNoExtraICState, TypeConversion) \
+ TypeConversion, 1) \
+ TFS(StringToNumber, BUILTIN, kNoExtraICState, TypeConversion, 1) \
+ TFS(ToName, BUILTIN, kNoExtraICState, TypeConversion, 1) \
+ TFS(NonNumberToNumber, BUILTIN, kNoExtraICState, TypeConversion, 1) \
+ TFS(ToNumber, BUILTIN, kNoExtraICState, TypeConversion, 1) \
+ TFS(ToString, BUILTIN, kNoExtraICState, TypeConversion, 1) \
+ TFS(ToInteger, BUILTIN, kNoExtraICState, TypeConversion, 1) \
+ TFS(ToLength, BUILTIN, kNoExtraICState, TypeConversion, 1) \
+ TFS(ClassOf, BUILTIN, kNoExtraICState, Typeof, 1) \
+ TFS(Typeof, BUILTIN, kNoExtraICState, Typeof, 1) \
+ TFS(GetSuperConstructor, BUILTIN, kNoExtraICState, TypeConversion, 1) \
\
/* Handlers */ \
- TFS(KeyedLoadIC_Megamorphic_TF, KEYED_LOAD_IC, kNoExtraICState, \
- LoadWithVector) \
- TFS(KeyedLoadIC_Miss, BUILTIN, kNoExtraICState, LoadWithVector) \
- TFS(KeyedLoadIC_Slow, HANDLER, Code::KEYED_LOAD_IC, LoadWithVector) \
- TFS(KeyedStoreIC_Megamorphic_TF, KEYED_STORE_IC, kNoExtraICState, \
- StoreWithVector) \
- TFS(KeyedStoreIC_Megamorphic_Strict_TF, KEYED_STORE_IC, \
- StoreICState::kStrictModeState, StoreWithVector) \
- ASM(KeyedStoreIC_Miss) \
- ASH(KeyedStoreIC_Slow, HANDLER, Code::KEYED_STORE_IC) \
- TFS(LoadGlobalIC_Miss, BUILTIN, kNoExtraICState, LoadGlobalWithVector) \
- TFS(LoadGlobalIC_Slow, HANDLER, Code::LOAD_GLOBAL_IC, LoadGlobalWithVector) \
- ASH(LoadIC_Getter_ForDeopt, LOAD_IC, kNoExtraICState) \
- TFS(LoadIC_Miss, BUILTIN, kNoExtraICState, LoadWithVector) \
- TFS(LoadIC_Normal, HANDLER, Code::LOAD_IC, LoadWithVector) \
- TFS(LoadIC_Slow, HANDLER, Code::LOAD_IC, LoadWithVector) \
- TFS(StoreIC_Miss, BUILTIN, kNoExtraICState, StoreWithVector) \
- TFS(StoreIC_Normal, HANDLER, Code::STORE_IC, StoreWithVector) \
- ASH(StoreIC_Setter_ForDeopt, STORE_IC, StoreICState::kStrictModeState) \
- TFS(StoreIC_SlowSloppy, HANDLER, Code::STORE_IC, StoreWithVector) \
- TFS(StoreIC_SlowStrict, HANDLER, Code::STORE_IC, StoreWithVector) \
+ TFS(LoadICProtoArray, BUILTIN, kNoExtraICState, LoadICProtoArray, 1) \
+ TFS(LoadICProtoArrayThrowIfNonexistent, BUILTIN, kNoExtraICState, \
+ LoadICProtoArray, 1) \
+ TFS(KeyedLoadIC_Megamorphic, BUILTIN, kNoExtraICState, LoadWithVector, 1) \
+ TFS(KeyedLoadIC_Miss, BUILTIN, kNoExtraICState, LoadWithVector, 1) \
+ TFS(KeyedLoadIC_Slow, HANDLER, Code::LOAD_IC, LoadWithVector, 1) \
+ TFS(KeyedLoadIC_IndexedString, HANDLER, Code::LOAD_IC, LoadWithVector, 1) \
+ TFS(KeyedStoreIC_Megamorphic, BUILTIN, kNoExtraICState, StoreWithVector, 1) \
+ TFS(KeyedStoreIC_Megamorphic_Strict, BUILTIN, kNoExtraICState, \
+ StoreWithVector, 1) \
+ TFS(KeyedStoreIC_Miss, BUILTIN, kNoExtraICState, StoreWithVector, 1) \
+ TFS(KeyedStoreIC_Slow, HANDLER, Code::STORE_IC, StoreWithVector, 1) \
+ TFS(LoadGlobalIC_Miss, BUILTIN, kNoExtraICState, LoadGlobalWithVector, 1) \
+ TFS(LoadGlobalIC_Slow, HANDLER, Code::LOAD_GLOBAL_IC, LoadGlobalWithVector, \
+ 1) \
+ TFS(LoadField, BUILTIN, kNoExtraICState, LoadField, 1) \
+ TFS(LoadIC_FunctionPrototype, HANDLER, Code::LOAD_IC, LoadWithVector, 1) \
+ ASH(LoadIC_Getter_ForDeopt, BUILTIN, kNoExtraICState) \
+ TFS(LoadIC_Miss, BUILTIN, kNoExtraICState, LoadWithVector, 1) \
+ TFS(LoadIC_Normal, HANDLER, Code::LOAD_IC, LoadWithVector, 1) \
+ TFS(LoadIC_Slow, HANDLER, Code::LOAD_IC, LoadWithVector, 1) \
+ TFS(StoreIC_Miss, BUILTIN, kNoExtraICState, StoreWithVector, 1) \
+ TFS(StoreIC_Normal, HANDLER, Code::STORE_IC, StoreWithVector, 1) \
+ ASH(StoreIC_Setter_ForDeopt, BUILTIN, kNoExtraICState) \
\
/* Built-in functions for Javascript */ \
/* Special internal builtins */ \
@@ -257,6 +277,7 @@ namespace internal {
CPP(ArraySlice) \
CPP(ArraySplice) \
CPP(ArrayUnshift) \
+ TFJ(ArrayForEach, 2) \
/* ES6 #sec-array.prototype.entries */ \
TFJ(ArrayPrototypeEntries, 0) \
/* ES6 #sec-array.prototype.keys */ \
@@ -272,6 +293,14 @@ namespace internal {
CPP(ArrayBufferPrototypeGetByteLength) \
CPP(ArrayBufferIsView) \
\
+ /* AsyncFunction */ \
+ TFJ(AsyncFunctionAwaitCaught, 3) \
+ TFJ(AsyncFunctionAwaitUncaught, 3) \
+ TFJ(AsyncFunctionAwaitRejectClosure, 1) \
+ TFJ(AsyncFunctionAwaitResolveClosure, 1) \
+ TFJ(AsyncFunctionPromiseCreate, 0) \
+ TFJ(AsyncFunctionPromiseRelease, 1) \
+ \
/* Boolean */ \
CPP(BooleanConstructor) \
CPP(BooleanConstructor_ConstructStub) \
@@ -414,7 +443,7 @@ namespace internal {
\
/* Belongs to Objects but is a dependency of GeneratorPrototypeResume */ \
TFS(CreateIterResultObject, BUILTIN, kNoExtraICState, \
- CreateIterResultObject) \
+ CreateIterResultObject, 1) \
\
/* Generator and Async */ \
CPP(GeneratorFunctionConstructor) \
@@ -444,30 +473,24 @@ namespace internal {
CPP(JsonStringify) \
\
/* ICs */ \
- TFS(LoadIC, LOAD_IC, kNoExtraICState, LoadWithVector) \
- TFS(LoadICTrampoline, LOAD_IC, kNoExtraICState, Load) \
- TFS(KeyedLoadIC, KEYED_LOAD_IC, kNoExtraICState, LoadWithVector) \
- TFS(KeyedLoadICTrampoline, KEYED_LOAD_IC, kNoExtraICState, Load) \
- TFS(StoreIC, STORE_IC, kNoExtraICState, StoreWithVector) \
- TFS(StoreICTrampoline, STORE_IC, kNoExtraICState, Store) \
- TFS(StoreICStrict, STORE_IC, StoreICState::kStrictModeState, \
- StoreWithVector) \
- TFS(StoreICStrictTrampoline, STORE_IC, StoreICState::kStrictModeState, \
- Store) \
- TFS(KeyedStoreIC, KEYED_STORE_IC, kNoExtraICState, StoreWithVector) \
- TFS(KeyedStoreICTrampoline, KEYED_STORE_IC, kNoExtraICState, Store) \
- TFS(KeyedStoreICStrict, KEYED_STORE_IC, StoreICState::kStrictModeState, \
- StoreWithVector) \
- TFS(KeyedStoreICStrictTrampoline, KEYED_STORE_IC, \
- StoreICState::kStrictModeState, Store) \
- TFS(LoadGlobalIC, LOAD_GLOBAL_IC, LoadGlobalICState::kNotInsideTypeOfState, \
- LoadGlobalWithVector) \
- TFS(LoadGlobalICInsideTypeof, LOAD_GLOBAL_IC, \
- LoadGlobalICState::kInsideTypeOfState, LoadGlobalWithVector) \
- TFS(LoadGlobalICTrampoline, LOAD_GLOBAL_IC, \
- LoadGlobalICState::kNotInsideTypeOfState, LoadGlobal) \
- TFS(LoadGlobalICInsideTypeofTrampoline, LOAD_GLOBAL_IC, \
- LoadGlobalICState::kInsideTypeOfState, LoadGlobal) \
+ TFS(LoadIC, LOAD_IC, kNoExtraICState, LoadWithVector, 1) \
+ TFS(LoadICTrampoline, LOAD_IC, kNoExtraICState, Load, 1) \
+ TFS(KeyedLoadIC, KEYED_LOAD_IC, kNoExtraICState, LoadWithVector, 1) \
+ TFS(KeyedLoadICTrampoline, KEYED_LOAD_IC, kNoExtraICState, Load, 1) \
+ TFS(StoreIC, STORE_IC, kNoExtraICState, StoreWithVector, 1) \
+ TFS(StoreICTrampoline, STORE_IC, kNoExtraICState, Store, 1) \
+ TFS(StoreICStrict, STORE_IC, kNoExtraICState, StoreWithVector, 1) \
+ TFS(StoreICStrictTrampoline, STORE_IC, kNoExtraICState, Store, 1) \
+ TFS(KeyedStoreIC, KEYED_STORE_IC, kNoExtraICState, StoreWithVector, 1) \
+ TFS(KeyedStoreICTrampoline, KEYED_STORE_IC, kNoExtraICState, Store, 1) \
+ TFS(KeyedStoreICStrict, KEYED_STORE_IC, kNoExtraICState, StoreWithVector, 1) \
+ TFS(KeyedStoreICStrictTrampoline, KEYED_STORE_IC, kNoExtraICState, Store, 1) \
+ TFS(LoadGlobalIC, LOAD_GLOBAL_IC, kNoExtraICState, LoadGlobalWithVector, 1) \
+ TFS(LoadGlobalICInsideTypeof, LOAD_GLOBAL_IC, kNoExtraICState, \
+ LoadGlobalWithVector, 1) \
+ TFS(LoadGlobalICTrampoline, LOAD_GLOBAL_IC, kNoExtraICState, LoadGlobal, 1) \
+ TFS(LoadGlobalICInsideTypeofTrampoline, LOAD_GLOBAL_IC, kNoExtraICState, \
+ LoadGlobal, 1) \
\
/* Math */ \
/* ES6 section 20.2.2.1 Math.abs ( x ) */ \
@@ -565,25 +588,25 @@ namespace internal {
CPP(NumberPrototypeToString) \
/* ES6 section 20.1.3.7 Number.prototype.valueOf ( ) */ \
TFJ(NumberPrototypeValueOf, 0) \
- TFS(Add, BUILTIN, kNoExtraICState, BinaryOp) \
- TFS(Subtract, BUILTIN, kNoExtraICState, BinaryOp) \
- TFS(Multiply, BUILTIN, kNoExtraICState, BinaryOp) \
- TFS(Divide, BUILTIN, kNoExtraICState, BinaryOp) \
- TFS(Modulus, BUILTIN, kNoExtraICState, BinaryOp) \
- TFS(BitwiseAnd, BUILTIN, kNoExtraICState, BinaryOp) \
- TFS(BitwiseOr, BUILTIN, kNoExtraICState, BinaryOp) \
- TFS(BitwiseXor, BUILTIN, kNoExtraICState, BinaryOp) \
- TFS(ShiftLeft, BUILTIN, kNoExtraICState, BinaryOp) \
- TFS(ShiftRight, BUILTIN, kNoExtraICState, BinaryOp) \
- TFS(ShiftRightLogical, BUILTIN, kNoExtraICState, BinaryOp) \
- TFS(LessThan, BUILTIN, kNoExtraICState, Compare) \
- TFS(LessThanOrEqual, BUILTIN, kNoExtraICState, Compare) \
- TFS(GreaterThan, BUILTIN, kNoExtraICState, Compare) \
- TFS(GreaterThanOrEqual, BUILTIN, kNoExtraICState, Compare) \
- TFS(Equal, BUILTIN, kNoExtraICState, Compare) \
- TFS(NotEqual, BUILTIN, kNoExtraICState, Compare) \
- TFS(StrictEqual, BUILTIN, kNoExtraICState, Compare) \
- TFS(StrictNotEqual, BUILTIN, kNoExtraICState, Compare) \
+ TFS(Add, BUILTIN, kNoExtraICState, BinaryOp, 1) \
+ TFS(Subtract, BUILTIN, kNoExtraICState, BinaryOp, 1) \
+ TFS(Multiply, BUILTIN, kNoExtraICState, BinaryOp, 1) \
+ TFS(Divide, BUILTIN, kNoExtraICState, BinaryOp, 1) \
+ TFS(Modulus, BUILTIN, kNoExtraICState, BinaryOp, 1) \
+ TFS(BitwiseAnd, BUILTIN, kNoExtraICState, BinaryOp, 1) \
+ TFS(BitwiseOr, BUILTIN, kNoExtraICState, BinaryOp, 1) \
+ TFS(BitwiseXor, BUILTIN, kNoExtraICState, BinaryOp, 1) \
+ TFS(ShiftLeft, BUILTIN, kNoExtraICState, BinaryOp, 1) \
+ TFS(ShiftRight, BUILTIN, kNoExtraICState, BinaryOp, 1) \
+ TFS(ShiftRightLogical, BUILTIN, kNoExtraICState, BinaryOp, 1) \
+ TFS(LessThan, BUILTIN, kNoExtraICState, Compare, 1) \
+ TFS(LessThanOrEqual, BUILTIN, kNoExtraICState, Compare, 1) \
+ TFS(GreaterThan, BUILTIN, kNoExtraICState, Compare, 1) \
+ TFS(GreaterThanOrEqual, BUILTIN, kNoExtraICState, Compare, 1) \
+ TFS(Equal, BUILTIN, kNoExtraICState, Compare, 1) \
+ TFS(NotEqual, BUILTIN, kNoExtraICState, Compare, 1) \
+ TFS(StrictEqual, BUILTIN, kNoExtraICState, Compare, 1) \
+ TFS(StrictNotEqual, BUILTIN, kNoExtraICState, Compare, 1) \
\
/* Object */ \
CPP(ObjectAssign) \
@@ -618,10 +641,14 @@ namespace internal {
CPP(ObjectSeal) \
CPP(ObjectValues) \
\
- TFS(HasProperty, BUILTIN, kNoExtraICState, HasProperty) \
- TFS(InstanceOf, BUILTIN, kNoExtraICState, Compare) \
- TFS(OrdinaryHasInstance, BUILTIN, kNoExtraICState, Compare) \
- TFS(ForInFilter, BUILTIN, kNoExtraICState, ForInFilter) \
+ /* instanceof */ \
+ TFS(OrdinaryHasInstance, BUILTIN, kNoExtraICState, Compare, 1) \
+ TFS(InstanceOf, BUILTIN, kNoExtraICState, Compare, 1) \
+ \
+ /* for-in */ \
+ TFS(ForInFilter, BUILTIN, kNoExtraICState, ForInFilter, 1) \
+ TFS(ForInNext, BUILTIN, kNoExtraICState, ForInNext, 1) \
+ TFS(ForInPrepare, BUILTIN, kNoExtraICState, ForInPrepare, 3) \
\
/* Promise */ \
TFJ(PromiseGetCapabilitiesExecutor, 2) \
@@ -633,13 +660,17 @@ namespace internal {
TFJ(PromiseRejectClosure, 1) \
TFJ(PromiseThen, 2) \
TFJ(PromiseCatch, 1) \
- TFJ(PerformPromiseThen, 4) \
TFJ(ResolvePromise, 2) \
- TFS(PromiseHandleReject, BUILTIN, kNoExtraICState, PromiseHandleReject) \
+ TFS(PromiseHandleReject, BUILTIN, kNoExtraICState, PromiseHandleReject, 1) \
TFJ(PromiseHandle, 5) \
TFJ(PromiseResolve, 1) \
TFJ(PromiseReject, 1) \
TFJ(InternalPromiseReject, 3) \
+ TFJ(PromiseFinally, 1) \
+ TFJ(PromiseThenFinally, 1) \
+ TFJ(PromiseCatchFinally, 1) \
+ TFJ(PromiseValueThunkFinally, 0) \
+ TFJ(PromiseThrowerFinally, 0) \
\
/* Proxy */ \
CPP(ProxyConstructor) \
@@ -684,16 +715,20 @@ namespace internal {
TFJ(RegExpPrototypeIgnoreCaseGetter, 0) \
TFJ(RegExpPrototypeMatch, 1) \
TFJ(RegExpPrototypeMultilineGetter, 0) \
- TFJ(RegExpPrototypeReplace, 2) \
TFJ(RegExpPrototypeSearch, 1) \
TFJ(RegExpPrototypeSourceGetter, 0) \
- TFJ(RegExpPrototypeSplit, 2) \
TFJ(RegExpPrototypeStickyGetter, 0) \
TFJ(RegExpPrototypeTest, 1) \
CPP(RegExpPrototypeToString) \
TFJ(RegExpPrototypeUnicodeGetter, 0) \
CPP(RegExpRightContextGetter) \
\
+ TFS(RegExpReplace, BUILTIN, kNoExtraICState, RegExpReplace, 1) \
+ TFJ(RegExpPrototypeReplace, 2) \
+ \
+ TFS(RegExpSplit, BUILTIN, kNoExtraICState, RegExpSplit, 1) \
+ TFJ(RegExpPrototypeSplit, 2) \
+ \
/* SharedArrayBuffer */ \
CPP(SharedArrayBufferPrototypeGetByteLength) \
TFJ(AtomicsLoad, 2) \
@@ -725,6 +760,10 @@ namespace internal {
CPP(StringPrototypeLocaleCompare) \
/* ES6 section 21.1.3.12 String.prototype.normalize ( [form] ) */ \
CPP(StringPrototypeNormalize) \
+ /* ES6 section 21.1.3.16 String.prototype.replace ( search, replace ) */ \
+ TFJ(StringPrototypeReplace, 2) \
+ /* ES6 section 21.1.3.19 String.prototype.split ( separator, limit ) */ \
+ TFJ(StringPrototypeSplit, 2) \
/* ES6 section B.2.3.1 String.prototype.substr ( start, length ) */ \
TFJ(StringPrototypeSubstr, 2) \
/* ES6 section 21.1.3.19 String.prototype.substring ( start, end ) */ \
@@ -734,6 +773,14 @@ namespace internal {
CPP(StringPrototypeStartsWith) \
/* ES6 section 21.1.3.25 String.prototype.toString () */ \
TFJ(StringPrototypeToString, 0) \
+ /* ES #sec-string.prototype.tolocalelowercase */ \
+ CPP(StringPrototypeToLocaleLowerCase) \
+ /* ES #sec-string.prototype.tolocaleuppercase */ \
+ CPP(StringPrototypeToLocaleUpperCase) \
+ /* ES #sec-string.prototype.tolowercase */ \
+ CPP(StringPrototypeToLowerCase) \
+ /* ES #sec-string.prototype.touppercase */ \
+ CPP(StringPrototypeToUpperCase) \
CPP(StringPrototypeTrim) \
CPP(StringPrototypeTrimLeft) \
CPP(StringPrototypeTrimRight) \
@@ -772,7 +819,35 @@ namespace internal {
/* ES6 #sec-%typedarray%.prototype.keys */ \
TFJ(TypedArrayPrototypeKeys, 0) \
/* ES6 #sec-%typedarray%.prototype.values */ \
- TFJ(TypedArrayPrototypeValues, 0)
+ TFJ(TypedArrayPrototypeValues, 0) \
+ /* ES6 #sec-%typedarray%.prototype.copywithin */ \
+ CPP(TypedArrayPrototypeCopyWithin) \
+ \
+ /* Wasm */ \
+ TFS(WasmStackGuard, BUILTIN, kNoExtraICState, WasmRuntimeCall, 1) \
+ TFS(ThrowWasmTrapUnreachable, BUILTIN, kNoExtraICState, WasmRuntimeCall, 1) \
+ TFS(ThrowWasmTrapMemOutOfBounds, BUILTIN, kNoExtraICState, WasmRuntimeCall, \
+ 1) \
+ TFS(ThrowWasmTrapDivByZero, BUILTIN, kNoExtraICState, WasmRuntimeCall, 1) \
+ TFS(ThrowWasmTrapDivUnrepresentable, BUILTIN, kNoExtraICState, \
+ WasmRuntimeCall, 1) \
+ TFS(ThrowWasmTrapRemByZero, BUILTIN, kNoExtraICState, WasmRuntimeCall, 1) \
+ TFS(ThrowWasmTrapFloatUnrepresentable, BUILTIN, kNoExtraICState, \
+ WasmRuntimeCall, 1) \
+ TFS(ThrowWasmTrapFuncInvalid, BUILTIN, kNoExtraICState, WasmRuntimeCall, 1) \
+ TFS(ThrowWasmTrapFuncSigMismatch, BUILTIN, kNoExtraICState, WasmRuntimeCall, \
+ 1) \
+ \
+ /* Async-from-Sync Iterator */ \
+ \
+ /* %AsyncFromSyncIteratorPrototype% */ \
+ /* (proposal-async-iteration/#sec-%asyncfromsynciteratorprototype%-object)*/ \
+ TFJ(AsyncFromSyncIteratorPrototypeNext, 1) \
+ TFJ(AsyncFromSyncIteratorPrototypeThrow, 1) \
+ TFJ(AsyncFromSyncIteratorPrototypeReturn, 1) \
+ \
+ /* proposal-async-iteration/#sec-async-iterator-value-unwrap-functions */ \
+ TFJ(AsyncIteratorValueUnwrap, 1)
#define IGNORE_BUILTIN(...)
@@ -792,6 +867,7 @@ namespace internal {
// Forward declarations.
class ObjectVisitor;
+enum class InterpreterPushArgsMode : unsigned;
namespace compiler {
class CodeAssemblerState;
}
@@ -811,7 +887,7 @@ class Builtins {
// Disassembler support.
const char* Lookup(byte* pc);
- enum Name {
+ enum Name : int32_t {
#define DEF_ENUM(Name, ...) k##Name,
BUILTIN_LIST_ALL(DEF_ENUM)
#undef DEF_ENUM
@@ -833,10 +909,9 @@ class Builtins {
Handle<Code> NonPrimitiveToPrimitive(
ToPrimitiveHint hint = ToPrimitiveHint::kDefault);
Handle<Code> OrdinaryToPrimitive(OrdinaryToPrimitiveHint hint);
- Handle<Code> InterpreterPushArgsAndCall(
- TailCallMode tail_call_mode,
- CallableType function_type = CallableType::kAny);
- Handle<Code> InterpreterPushArgsAndConstruct(CallableType function_type);
+ Handle<Code> InterpreterPushArgsAndCall(TailCallMode tail_call_mode,
+ InterpreterPushArgsMode mode);
+ Handle<Code> InterpreterPushArgsAndConstruct(InterpreterPushArgsMode mode);
Handle<Code> NewFunctionContext(ScopeType scope_type);
Handle<Code> NewCloneShallowArray(AllocationSiteMode allocation_mode);
Handle<Code> NewCloneShallowObject(int length);
@@ -888,13 +963,15 @@ class Builtins {
static void Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
TailCallMode tail_call_mode);
+ static void Generate_CallForwardVarargs(MacroAssembler* masm,
+ Handle<Code> code);
static void Generate_InterpreterPushArgsAndCallImpl(
MacroAssembler* masm, TailCallMode tail_call_mode,
- CallableType function_type);
+ InterpreterPushArgsMode mode);
static void Generate_InterpreterPushArgsAndConstructImpl(
- MacroAssembler* masm, CallableType function_type);
+ MacroAssembler* masm, InterpreterPushArgsMode mode);
enum class MathMaxMinKind { kMax, kMin };
static void Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind);
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index 985d20f4ff..c074dd88ea 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -115,6 +115,8 @@ namespace {
void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
bool create_implicit_receiver,
bool check_derived_construct) {
+ Label post_instantiation_deopt_entry;
+
// ----------- S t a t e -------------
// -- eax: number of arguments
// -- esi: context
@@ -163,6 +165,9 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
__ PushRoot(Heap::kTheHoleValueRootIndex);
}
+ // Deoptimizer re-enters stub code here.
+ __ bind(&post_instantiation_deopt_entry);
+
// Set up pointer to last argument.
__ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
@@ -183,7 +188,8 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
// Store offset of return address for deoptimizer.
if (create_implicit_receiver && !is_api_function) {
- masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+ masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
+ masm->pc_offset());
}
// Restore context from the frame.
@@ -240,6 +246,35 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
__ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1);
}
__ ret(0);
+
+ // Store offset of trampoline address for deoptimizer. This is the bailout
+ // point after the receiver instantiation but before the function invocation.
+ // We need to restore some registers in order to continue the above code.
+ if (create_implicit_receiver && !is_api_function) {
+ masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
+ masm->pc_offset());
+
+ // ----------- S t a t e -------------
+ // -- eax : newly allocated object
+ // -- esp[0] : constructor function
+ // -----------------------------------
+
+ __ pop(edi);
+ __ push(eax);
+ __ push(eax);
+
+ // Retrieve smi-tagged arguments count from the stack.
+ __ mov(eax, Operand(ebp, ConstructFrameConstants::kLengthOffset));
+ __ SmiUntag(eax);
+
+ // Retrieve the new target value from the stack. This was placed into the
+ // frame description in place of the receiver by the optimizing compiler.
+ __ mov(edx, Operand(ebp, eax, times_pointer_size,
+ StandardFrameConstants::kCallerSPOffset));
+
+ // Continue with constructor function invocation.
+ __ jmp(&post_instantiation_deopt_entry);
+ }
}
} // namespace
@@ -534,9 +569,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// it is present) and load it into kInterpreterBytecodeArrayRegister.
__ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
Label load_debug_bytecode_array, bytecode_array_loaded;
- __ cmp(FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset),
- Immediate(DebugInfo::uninitialized()));
- __ j(not_equal, &load_debug_bytecode_array);
+ __ JumpIfNotSmi(FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset),
+ &load_debug_bytecode_array);
__ mov(kInterpreterBytecodeArrayRegister,
FieldOperand(eax, SharedFunctionInfo::kFunctionDataOffset));
__ bind(&bytecode_array_loaded);
@@ -694,7 +728,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
// static
void Builtins::Generate_InterpreterPushArgsAndCallImpl(
MacroAssembler* masm, TailCallMode tail_call_mode,
- CallableType function_type) {
+ InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- ebx : the address of the first argument to be pushed. Subsequent
@@ -726,12 +760,14 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
// Call the target.
__ Push(edx); // Re-push return address.
- if (function_type == CallableType::kJSFunction) {
+ if (mode == InterpreterPushArgsMode::kJSFunction) {
__ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
tail_call_mode),
RelocInfo::CODE_TARGET);
+ } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Jump(masm->isolate()->builtins()->CallWithSpread(),
+ RelocInfo::CODE_TARGET);
} else {
- DCHECK_EQ(function_type, CallableType::kAny);
__ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
tail_call_mode),
RelocInfo::CODE_TARGET);
@@ -844,7 +880,7 @@ void Generate_InterpreterPushArgsAndReturnAddress(
// static
void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
- MacroAssembler* masm, CallableType construct_type) {
+ MacroAssembler* masm, InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edx : the new target
@@ -870,7 +906,7 @@ void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
__ Pop(edi);
__ AssertUndefinedOrAllocationSite(ebx);
- if (construct_type == CallableType::kJSFunction) {
+ if (mode == InterpreterPushArgsMode::kJSFunction) {
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
__ AssertFunction(edi);
@@ -879,9 +915,12 @@ void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
__ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kConstructStubOffset));
__ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
__ jmp(ecx);
+ } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ // Call the constructor with unmodified eax, edi, edx values.
+ __ Jump(masm->isolate()->builtins()->ConstructWithSpread(),
+ RelocInfo::CODE_TARGET);
} else {
- DCHECK_EQ(construct_type, CallableType::kAny);
-
+ DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
// Call the constructor with unmodified eax, edi, edx values.
__ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
@@ -1024,6 +1063,12 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
Register new_target = edx;
Register argument_count = eax;
+ // Do we have a valid feedback vector?
+ __ mov(ebx, FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ mov(ebx, FieldOperand(ebx, Cell::kValueOffset));
+ __ JumpIfRoot(ebx, Heap::kUndefinedValueRootIndex,
+ &gotta_call_runtime_no_stack);
+
__ push(argument_count);
__ push(new_target);
__ push(closure);
@@ -1034,9 +1079,8 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ mov(map, FieldOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
__ mov(index, FieldOperand(map, FixedArray::kLengthOffset));
__ cmp(index, Immediate(Smi::FromInt(2)));
- __ j(less, &gotta_call_runtime);
+ __ j(less, &try_shared);
- // Find literals.
// edx : native context
// ebx : length / index
// eax : optimized code map
@@ -1054,19 +1098,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ mov(temp, FieldOperand(temp, WeakCell::kValueOffset));
__ cmp(temp, native_context);
__ j(not_equal, &loop_bottom);
- // Literals available?
- __ mov(temp, FieldOperand(map, index, times_half_pointer_size,
- SharedFunctionInfo::kOffsetToPreviousLiterals));
- __ mov(temp, FieldOperand(temp, WeakCell::kValueOffset));
- __ JumpIfSmi(temp, &gotta_call_runtime);
-
- // Save the literals in the closure.
- __ mov(ecx, Operand(esp, 0));
- __ mov(FieldOperand(ecx, JSFunction::kLiteralsOffset), temp);
- __ push(index);
- __ RecordWriteField(ecx, JSFunction::kLiteralsOffset, temp, index,
- kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ pop(index);
// Code available?
Register entry = ecx;
@@ -1075,7 +1106,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ mov(entry, FieldOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
- // Found literals and code. Get them into the closure and return.
+ // Found code. Get it into the closure and return.
__ pop(closure);
// Store code entry in the closure.
__ lea(entry, FieldOperand(entry, Code::kHeaderSize));
@@ -1109,9 +1140,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ cmp(index, Immediate(Smi::FromInt(1)));
__ j(greater, &loop_top);
- // We found neither literals nor code.
- __ jmp(&gotta_call_runtime);
-
+ // We found no code.
__ bind(&try_shared);
__ pop(closure);
__ pop(new_target);
@@ -1621,14 +1650,14 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ bind(&target_not_constructor);
{
__ mov(Operand(esp, kPointerSize), edi);
- __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ __ TailCallRuntime(Runtime::kThrowNotConstructor);
}
// 4c. The new.target is not a constructor, throw an appropriate TypeError.
__ bind(&new_target_not_constructor);
{
__ mov(Operand(esp, kPointerSize), edx);
- __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ __ TailCallRuntime(Runtime::kThrowNotConstructor);
}
}
@@ -2105,7 +2134,7 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ mov(ebp, esp);
// Store the arguments adaptor context sentinel.
- __ push(Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ push(Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
// Push the function on the stack.
__ push(edi);
@@ -2289,6 +2318,86 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
}
}
+// static
+void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
+ // ----------- S t a t e -------------
+ // -- edi : the target to call (can be any Object)
+ // -- ecx : start index (to support rest parameters)
+ // -- esp[0] : return address.
+ // -- esp[4] : thisArgument
+ // -----------------------------------
+
+ // Check if we have an arguments adaptor frame below the function frame.
+ Label arguments_adaptor, arguments_done;
+ __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ cmp(Operand(ebx, CommonFrameConstants::kContextOrFrameTypeOffset),
+ Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(equal, &arguments_adaptor, Label::kNear);
+ {
+ __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ mov(eax, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(eax,
+ FieldOperand(eax, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ mov(ebx, ebp);
+ }
+ __ jmp(&arguments_done, Label::kNear);
+ __ bind(&arguments_adaptor);
+ {
+ // Just load the length from the ArgumentsAdaptorFrame.
+ __ mov(eax, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ }
+ __ bind(&arguments_done);
+
+ Label stack_empty, stack_done;
+ __ SmiUntag(eax);
+ __ sub(eax, ecx);
+ __ j(less_equal, &stack_empty);
+ {
+ // Check for stack overflow.
+ {
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack
+ // limit".
+ Label done;
+ __ LoadRoot(ecx, Heap::kRealStackLimitRootIndex);
+ // Make ecx the space we have left. The stack might already be
+ // overflowed here which will cause ecx to become negative.
+ __ neg(ecx);
+ __ add(ecx, esp);
+ __ sar(ecx, kPointerSizeLog2);
+ // Check if the arguments will overflow the stack.
+ __ cmp(ecx, eax);
+ __ j(greater, &done, Label::kNear); // Signed comparison.
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ bind(&done);
+ }
+
+ // Forward the arguments from the caller frame.
+ {
+ Label loop;
+ __ mov(ecx, eax);
+ __ pop(edx);
+ __ bind(&loop);
+ {
+ __ Push(Operand(ebx, ecx, times_pointer_size, 1 * kPointerSize));
+ __ dec(ecx);
+ __ j(not_zero, &loop);
+ }
+ __ push(edx);
+ }
+ }
+ __ jmp(&stack_done, Label::kNear);
+ __ bind(&stack_empty);
+ {
+ // We just pass the receiver, which is already on the stack.
+ __ Move(eax, Immediate(0));
+ }
+ __ bind(&stack_done);
+
+ __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
namespace {
// Drops top JavaScript frame and an arguments adaptor frame below it (if
@@ -2339,7 +2448,7 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
{
Label no_interpreter_frame;
__ cmp(Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset),
- Immediate(Smi::FromInt(StackFrame::STUB)));
+ Immediate(StackFrame::TypeToMarker(StackFrame::STUB)));
__ j(not_equal, &no_interpreter_frame, Label::kNear);
__ mov(ebp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ bind(&no_interpreter_frame);
@@ -2350,7 +2459,7 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
Label no_arguments_adaptor, formal_parameter_count_loaded;
__ mov(scratch2, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ cmp(Operand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(not_equal, &no_arguments_adaptor, Label::kNear);
// Drop current frame and load arguments count from arguments adaptor frame.
@@ -2671,6 +2780,178 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
}
}
+static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
+ // Free up some registers.
+ __ movd(xmm0, edx);
+ __ movd(xmm1, edi);
+
+ Register argc = eax;
+
+ Register scratch = ecx;
+ Register scratch2 = edi;
+
+ Register spread = ebx;
+ Register spread_map = edx;
+
+ Register spread_len = edx;
+
+ Label runtime_call, push_args;
+ __ mov(spread, Operand(esp, kPointerSize));
+ __ JumpIfSmi(spread, &runtime_call);
+ __ mov(spread_map, FieldOperand(spread, HeapObject::kMapOffset));
+
+ // Check that the spread is an array.
+ __ CmpInstanceType(spread_map, JS_ARRAY_TYPE);
+ __ j(not_equal, &runtime_call);
+
+ // Check that we have the original ArrayPrototype.
+ __ mov(scratch, FieldOperand(spread_map, Map::kPrototypeOffset));
+ __ mov(scratch2, NativeContextOperand());
+ __ cmp(scratch,
+ ContextOperand(scratch2, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+ __ j(not_equal, &runtime_call);
+
+ // Check that the ArrayPrototype hasn't been modified in a way that would
+ // affect iteration.
+ __ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
+ __ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
+ Immediate(Smi::FromInt(Isolate::kProtectorValid)));
+ __ j(not_equal, &runtime_call);
+
+ // Check that the map of the initial array iterator hasn't changed.
+ __ mov(scratch2, NativeContextOperand());
+ __ mov(scratch,
+ ContextOperand(scratch2,
+ Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
+ __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
+ __ cmp(scratch,
+ ContextOperand(scratch2,
+ Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
+ __ j(not_equal, &runtime_call);
+
+ // For FastPacked kinds, iteration will have the same effect as simply
+ // accessing each property in order.
+ Label no_protector_check;
+ __ mov(scratch, FieldOperand(spread_map, Map::kBitField2Offset));
+ __ DecodeField<Map::ElementsKindBits>(scratch);
+ __ cmp(scratch, Immediate(FAST_HOLEY_ELEMENTS));
+ __ j(above, &runtime_call);
+ // For non-FastHoley kinds, we can skip the protector check.
+ __ cmp(scratch, Immediate(FAST_SMI_ELEMENTS));
+ __ j(equal, &no_protector_check);
+ __ cmp(scratch, Immediate(FAST_ELEMENTS));
+ __ j(equal, &no_protector_check);
+ // Check the ArrayProtector cell.
+ __ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
+ __ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
+ Immediate(Smi::FromInt(Isolate::kProtectorValid)));
+ __ j(not_equal, &runtime_call);
+
+ __ bind(&no_protector_check);
+ // Load the FixedArray backing store, but use the length from the array.
+ __ mov(spread_len, FieldOperand(spread, JSArray::kLengthOffset));
+ __ SmiUntag(spread_len);
+ __ mov(spread, FieldOperand(spread, JSArray::kElementsOffset));
+ __ jmp(&push_args);
+
+ __ bind(&runtime_call);
+ {
+ // Call the builtin for the result of the spread.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Need to save these on the stack.
+ __ movd(edi, xmm1);
+ __ movd(edx, xmm0);
+ __ Push(edi);
+ __ Push(edx);
+ __ SmiTag(argc);
+ __ Push(argc);
+ __ Push(spread);
+ __ CallRuntime(Runtime::kSpreadIterableFixed);
+ __ mov(spread, eax);
+ __ Pop(argc);
+ __ SmiUntag(argc);
+ __ Pop(edx);
+ __ Pop(edi);
+ // Free up some registers.
+ __ movd(xmm0, edx);
+ __ movd(xmm1, edi);
+ }
+
+ {
+ // Calculate the new nargs including the result of the spread.
+ __ mov(spread_len, FieldOperand(spread, FixedArray::kLengthOffset));
+ __ SmiUntag(spread_len);
+
+ __ bind(&push_args);
+ // argc += spread_len - 1. Subtract 1 for the spread itself.
+ __ lea(argc, Operand(argc, spread_len, times_1, -1));
+ }
+
+ // Check for stack overflow.
+ {
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ Label done;
+ __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
+ // Make scratch the space we have left. The stack might already be
+ // overflowed here which will cause scratch to become negative.
+ __ neg(scratch);
+ __ add(scratch, esp);
+ __ sar(scratch, kPointerSizeLog2);
+ // Check if the arguments will overflow the stack.
+ __ cmp(scratch, spread_len);
+ __ j(greater, &done, Label::kNear); // Signed comparison.
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ bind(&done);
+ }
+
+ // Put the evaluated spread onto the stack as additional arguments.
+ {
+ Register return_address = edi;
+ // Pop the return address and spread argument.
+ __ PopReturnAddressTo(return_address);
+ __ Pop(scratch);
+
+ Register scratch2 = esi;
+ __ movd(xmm2, esi);
+
+ __ mov(scratch, Immediate(0));
+ Label done, push, loop;
+ __ bind(&loop);
+ __ cmp(scratch, spread_len);
+ __ j(equal, &done, Label::kNear);
+ __ mov(scratch2, FieldOperand(spread, scratch, times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex);
+ __ j(not_equal, &push, Label::kNear);
+ __ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
+ __ bind(&push);
+ __ Push(scratch2);
+ __ inc(scratch);
+ __ jmp(&loop);
+ __ bind(&done);
+ __ PushReturnAddressFrom(return_address);
+ __ movd(esi, xmm2);
+ __ movd(edi, xmm1);
+ __ movd(edx, xmm0);
+ }
+}
+
+// static
+void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- edi : the target to call (can be any Object)
+ // -----------------------------------
+
+ // CheckSpreadAndPushToStack will push edx to save it.
+ __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
+ CheckSpreadAndPushToStack(masm);
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+ TailCallMode::kDisallow),
+ RelocInfo::CODE_TARGET);
+}
+
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2794,6 +3075,19 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
}
// static
+void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- edx : the new target (either the same as the constructor or
+ // the JSFunction on which new was invoked initially)
+ // -- edi : the constructor to call (can be any Object)
+ // -----------------------------------
+
+ CheckSpreadAndPushToStack(masm);
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
+// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- edx : requested object size (untagged)
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index e9c3719114..fe975e29e9 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -563,6 +563,8 @@ namespace {
void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
bool create_implicit_receiver,
bool check_derived_construct) {
+ Label post_instantiation_deopt_entry;
+
// ----------- S t a t e -------------
// -- a0 : number of arguments
// -- a1 : constructor function
@@ -611,6 +613,9 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
__ PushRoot(Heap::kTheHoleValueRootIndex);
}
+ // Deoptimizer re-enters stub code here.
+ __ bind(&post_instantiation_deopt_entry);
+
// Set up pointer to last argument.
__ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
@@ -644,7 +649,8 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
// Store offset of return address for deoptimizer.
if (create_implicit_receiver && !is_api_function) {
- masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+ masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
+ masm->pc_offset());
}
// Restore context from the frame.
@@ -705,6 +711,35 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
__ IncrementCounter(isolate->counters()->constructed_objects(), 1, a1, a2);
}
__ Ret();
+
+ // Store offset of trampoline address for deoptimizer. This is the bailout
+ // point after the receiver instantiation but before the function invocation.
+ // We need to restore some registers in order to continue the above code.
+ if (create_implicit_receiver && !is_api_function) {
+ masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
+ masm->pc_offset());
+
+ // ----------- S t a t e -------------
+ // -- a0 : newly allocated object
+ // -- sp[0] : constructor function
+ // -----------------------------------
+
+ __ Pop(a1);
+ __ Push(a0, a0);
+
+ // Retrieve smi-tagged arguments count from the stack.
+ __ lw(a0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ __ SmiUntag(a0);
+
+ // Retrieve the new target value from the stack. This was placed into the
+ // frame description in place of the receiver by the optimizing compiler.
+ __ Addu(a3, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ Lsa(a3, a3, a0, kPointerSizeLog2);
+ __ lw(a3, MemOperand(a3));
+
+ // Continue with constructor function invocation.
+ __ jmp(&post_instantiation_deopt_entry);
+ }
}
} // namespace
@@ -1005,8 +1040,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Register debug_info = kInterpreterBytecodeArrayRegister;
DCHECK(!debug_info.is(a0));
__ lw(debug_info, FieldMemOperand(a0, SharedFunctionInfo::kDebugInfoOffset));
- __ Branch(&load_debug_bytecode_array, ne, debug_info,
- Operand(DebugInfo::uninitialized()));
+ __ JumpIfNotSmi(debug_info, &load_debug_bytecode_array);
__ lw(kInterpreterBytecodeArrayRegister,
FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
__ bind(&bytecode_array_loaded);
@@ -1018,8 +1052,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Operand(masm->CodeObject())); // Self-reference to this code.
// Increment invocation count for the function.
- __ lw(a0, FieldMemOperand(a1, JSFunction::kLiteralsOffset));
- __ lw(a0, FieldMemOperand(a0, LiteralsArray::kFeedbackVectorOffset));
+ __ lw(a0, FieldMemOperand(a1, JSFunction::kFeedbackVectorOffset));
+ __ lw(a0, FieldMemOperand(a0, Cell::kValueOffset));
__ lw(t0, FieldMemOperand(
a0, FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
@@ -1159,7 +1193,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
// static
void Builtins::Generate_InterpreterPushArgsAndCallImpl(
MacroAssembler* masm, TailCallMode tail_call_mode,
- CallableType function_type) {
+ InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a2 : the address of the first argument to be pushed. Subsequent
@@ -1175,12 +1209,14 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
Generate_InterpreterPushArgs(masm, t0, a2, t4, t1, &stack_overflow);
// Call the target.
- if (function_type == CallableType::kJSFunction) {
+ if (mode == InterpreterPushArgsMode::kJSFunction) {
__ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
tail_call_mode),
RelocInfo::CODE_TARGET);
+ } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Jump(masm->isolate()->builtins()->CallWithSpread(),
+ RelocInfo::CODE_TARGET);
} else {
- DCHECK_EQ(function_type, CallableType::kAny);
__ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
tail_call_mode),
RelocInfo::CODE_TARGET);
@@ -1196,7 +1232,7 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
// static
void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
- MacroAssembler* masm, CallableType construct_type) {
+ MacroAssembler* masm, InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
// -- a0 : argument count (not including receiver)
// -- a3 : new target
@@ -1213,7 +1249,7 @@ void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
Generate_InterpreterPushArgs(masm, a0, t4, t1, t0, &stack_overflow);
__ AssertUndefinedOrAllocationSite(a2, t0);
- if (construct_type == CallableType::kJSFunction) {
+ if (mode == InterpreterPushArgsMode::kJSFunction) {
__ AssertFunction(a1);
// Tail call to the function-specific construct stub (still in the caller
@@ -1222,8 +1258,12 @@ void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
__ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kConstructStubOffset));
__ Addu(at, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(at);
+ } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ // Call the constructor with a0, a1, and a3 unmodified.
+ __ Jump(masm->isolate()->builtins()->ConstructWithSpread(),
+ RelocInfo::CODE_TARGET);
} else {
- DCHECK_EQ(construct_type, CallableType::kAny);
+ DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
// Call the constructor with a0, a1, and a3 unmodified.
__ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
@@ -1348,18 +1388,24 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
Register argument_count = a0;
Register closure = a1;
Register new_target = a3;
+ Register map = a0;
+ Register index = a2;
+
+ // Do we have a valid feedback vector?
+ __ lw(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ lw(index, FieldMemOperand(index, Cell::kValueOffset));
+ __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex,
+ &gotta_call_runtime_no_stack);
+
__ push(argument_count);
__ push(new_target);
__ push(closure);
- Register map = a0;
- Register index = a2;
__ lw(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ lw(map, FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
__ lw(index, FieldMemOperand(map, FixedArray::kLengthOffset));
- __ Branch(&gotta_call_runtime, lt, index, Operand(Smi::FromInt(2)));
+ __ Branch(&try_shared, lt, index, Operand(Smi::FromInt(2)));
- // Find literals.
// a3 : native context
// a2 : length / index
// a0 : optimized code map
@@ -1379,20 +1425,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
SharedFunctionInfo::kOffsetToPreviousContext));
__ lw(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
__ Branch(&loop_bottom, ne, temp, Operand(native_context));
- // Literals available?
- __ lw(temp, FieldMemOperand(array_pointer,
- SharedFunctionInfo::kOffsetToPreviousLiterals));
- __ lw(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
- __ JumpIfSmi(temp, &gotta_call_runtime);
-
- // Save the literals in the closure.
- __ lw(t0, MemOperand(sp, 0));
- __ sw(temp, FieldMemOperand(t0, JSFunction::kLiteralsOffset));
- __ push(index);
- __ RecordWriteField(t0, JSFunction::kLiteralsOffset, temp, index,
- kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ pop(index);
// Code available?
Register entry = t0;
@@ -1402,7 +1434,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ lw(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
- // Found literals and code. Get them into the closure and return.
+ // Found code. Get it into the closure and return.
__ pop(closure);
// Store code entry in the closure.
__ Addu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -1437,9 +1469,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
__ Branch(&loop_top, gt, index, Operand(Smi::FromInt(1)));
- // We found neither literals nor code.
- __ jmp(&gotta_call_runtime);
-
+ // We found no code.
__ bind(&try_shared);
__ pop(closure);
__ pop(new_target);
@@ -2090,20 +2120,20 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ bind(&target_not_constructor);
{
__ sw(a1, MemOperand(sp));
- __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ __ TailCallRuntime(Runtime::kThrowNotConstructor);
}
// 4c. The new.target is not a constructor, throw an appropriate TypeError.
__ bind(&new_target_not_constructor);
{
__ sw(a3, MemOperand(sp));
- __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ __ TailCallRuntime(Runtime::kThrowNotConstructor);
}
}
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ sll(a0, a0, kSmiTagSize);
- __ li(t0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ li(t0, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ MultiPush(a0.bit() | a1.bit() | t0.bit() | fp.bit() | ra.bit());
__ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
kPointerSize));
@@ -2265,6 +2295,72 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
}
}
+// static
+void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
+ // ----------- S t a t e -------------
+ // -- a1 : the target to call (can be any Object)
+ // -- a2 : start index (to support rest parameters)
+ // -- ra : return address.
+ // -- sp[0] : thisArgument
+ // -----------------------------------
+
+ // Check if we have an arguments adaptor frame below the function frame.
+ Label arguments_adaptor, arguments_done;
+ __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ lw(a0, MemOperand(a3, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ Branch(&arguments_adaptor, eq, a0,
+ Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
+ {
+ __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ lw(a0, FieldMemOperand(a0, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a0,
+ FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ mov(a3, fp);
+ }
+ __ Branch(&arguments_done);
+ __ bind(&arguments_adaptor);
+ {
+ // Just get the length from the ArgumentsAdaptorFrame.
+ __ lw(a0, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ }
+ __ bind(&arguments_done);
+
+ Label stack_empty, stack_done, stack_overflow;
+ __ SmiUntag(a0);
+ __ Subu(a0, a0, a2);
+ __ Branch(&stack_empty, le, a0, Operand(zero_reg));
+ {
+ // Check for stack overflow.
+ Generate_StackOverflowCheck(masm, a0, t0, t1, &stack_overflow);
+
+ // Forward the arguments from the caller frame.
+ {
+ Label loop;
+ __ mov(a2, a0);
+ __ bind(&loop);
+ {
+ __ Lsa(at, a3, a2, kPointerSizeLog2);
+ __ lw(at, MemOperand(at, 1 * kPointerSize));
+ __ push(at);
+ __ Subu(a2, a2, Operand(1));
+ __ Branch(&loop, ne, a2, Operand(zero_reg));
+ }
+ }
+ }
+ __ Branch(&stack_done);
+ __ bind(&stack_overflow);
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ bind(&stack_empty);
+ {
+ // We just pass the receiver, which is already on the stack.
+ __ li(a0, Operand(0));
+ }
+ __ bind(&stack_done);
+
+ __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
namespace {
// Drops top JavaScript frame and an arguments adaptor frame below it (if
@@ -2315,7 +2411,7 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
__ lw(scratch3,
MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Branch(&no_interpreter_frame, ne, scratch3,
- Operand(Smi::FromInt(StackFrame::STUB)));
+ Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
__ lw(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ bind(&no_interpreter_frame);
}
@@ -2327,7 +2423,7 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
__ lw(scratch3,
MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Branch(&no_arguments_adaptor, ne, scratch3,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
// Drop current frame and load arguments count from arguments adaptor frame.
__ mov(fp, scratch2);
@@ -2623,6 +2719,151 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
}
}
+static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
+ Register argc = a0;
+ Register constructor = a1;
+ Register new_target = a3;
+
+ Register scratch = t0;
+ Register scratch2 = t1;
+
+ Register spread = a2;
+ Register spread_map = t3;
+
+ Register spread_len = t3;
+
+ Register native_context = t4;
+
+ Label runtime_call, push_args;
+ __ lw(spread, MemOperand(sp, 0));
+ __ JumpIfSmi(spread, &runtime_call);
+ __ lw(spread_map, FieldMemOperand(spread, HeapObject::kMapOffset));
+ __ lw(native_context, NativeContextMemOperand());
+
+ // Check that the spread is an array.
+ __ lbu(scratch, FieldMemOperand(spread_map, Map::kInstanceTypeOffset));
+ __ Branch(&runtime_call, ne, scratch, Operand(JS_ARRAY_TYPE));
+
+ // Check that we have the original ArrayPrototype.
+ __ lw(scratch, FieldMemOperand(spread_map, Map::kPrototypeOffset));
+ __ lw(scratch2, ContextMemOperand(native_context,
+ Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+ __ Branch(&runtime_call, ne, scratch, Operand(scratch2));
+
+ // Check that the ArrayPrototype hasn't been modified in a way that would
+ // affect iteration.
+ __ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
+ __ lw(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
+ __ Branch(&runtime_call, ne, scratch,
+ Operand(Smi::FromInt(Isolate::kProtectorValid)));
+
+ // Check that the map of the initial array iterator hasn't changed.
+ __ lw(scratch,
+ ContextMemOperand(native_context,
+ Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
+ __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
+ __ lw(scratch2,
+ ContextMemOperand(native_context,
+ Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
+ __ Branch(&runtime_call, ne, scratch, Operand(scratch2));
+
+ // For FastPacked kinds, iteration will have the same effect as simply
+ // accessing each property in order.
+ Label no_protector_check;
+ __ lbu(scratch, FieldMemOperand(spread_map, Map::kBitField2Offset));
+ __ DecodeField<Map::ElementsKindBits>(scratch);
+ __ Branch(&runtime_call, hi, scratch, Operand(FAST_HOLEY_ELEMENTS));
+ // For non-FastHoley kinds, we can skip the protector check.
+ __ Branch(&no_protector_check, eq, scratch, Operand(FAST_SMI_ELEMENTS));
+ __ Branch(&no_protector_check, eq, scratch, Operand(FAST_ELEMENTS));
+ // Check the ArrayProtector cell.
+ __ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
+ __ lw(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
+ __ Branch(&runtime_call, ne, scratch,
+ Operand(Smi::FromInt(Isolate::kProtectorValid)));
+
+ __ bind(&no_protector_check);
+ // Load the FixedArray backing store, but use the length from the array.
+ __ lw(spread_len, FieldMemOperand(spread, JSArray::kLengthOffset));
+ __ SmiUntag(spread_len);
+ __ lw(spread, FieldMemOperand(spread, JSArray::kElementsOffset));
+ __ Branch(&push_args);
+
+ __ bind(&runtime_call);
+ {
+ // Call the builtin for the result of the spread.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(argc);
+ __ Push(constructor, new_target, argc, spread);
+ __ CallRuntime(Runtime::kSpreadIterableFixed);
+ __ mov(spread, v0);
+ __ Pop(constructor, new_target, argc);
+ __ SmiUntag(argc);
+ }
+
+ {
+ // Calculate the new nargs including the result of the spread.
+ __ lw(spread_len, FieldMemOperand(spread, FixedArray::kLengthOffset));
+ __ SmiUntag(spread_len);
+
+ __ bind(&push_args);
+ // argc += spread_len - 1. Subtract 1 for the spread itself.
+ __ Addu(argc, argc, spread_len);
+ __ Subu(argc, argc, Operand(1));
+
+ // Pop the spread argument off the stack.
+ __ Pop(scratch);
+ }
+
+ // Check for stack overflow.
+ {
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ Label done;
+ __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
+ // Make scratch the space we have left. The stack might already be
+ // overflowed here which will cause ip to become negative.
+ __ Subu(scratch, sp, scratch);
+ // Check if the arguments will overflow the stack.
+ __ sll(at, spread_len, kPointerSizeLog2);
+ __ Branch(&done, gt, scratch, Operand(at)); // Signed comparison.
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ bind(&done);
+ }
+
+ // Put the evaluated spread onto the stack as additional arguments.
+ {
+ __ mov(scratch, zero_reg);
+ Label done, push, loop;
+ __ bind(&loop);
+ __ Branch(&done, eq, scratch, Operand(spread_len));
+ __ Lsa(scratch2, spread, scratch, kPointerSizeLog2);
+ __ lw(scratch2, FieldMemOperand(scratch2, FixedArray::kHeaderSize));
+ __ JumpIfNotRoot(scratch2, Heap::kTheHoleValueRootIndex, &push);
+ __ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
+ __ bind(&push);
+ __ Push(scratch2);
+ __ Addu(scratch, scratch, Operand(1));
+ __ Branch(&loop);
+ __ bind(&done);
+ }
+}
+
+// static
+void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the target to call (can be any Object).
+ // -----------------------------------
+
+ // CheckSpreadAndPushToStack will push a3 to save it.
+ __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+ CheckSpreadAndPushToStack(masm);
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+ TailCallMode::kDisallow),
+ RelocInfo::CODE_TARGET);
+}
+
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2805,6 +3046,19 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
}
// static
+void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the constructor to call (can be any Object)
+ // -- a3 : the new target (either the same as the constructor or
+ // the JSFunction on which new was invoked initially)
+ // -----------------------------------
+
+ CheckSpreadAndPushToStack(masm);
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
+// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : requested object size (untagged)
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index 36c72cb5e8..8fcce9fa26 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -561,6 +561,8 @@ namespace {
void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
bool create_implicit_receiver,
bool check_derived_construct) {
+ Label post_instantiation_deopt_entry;
+
// ----------- S t a t e -------------
// -- a0 : number of arguments
// -- a1 : constructor function
@@ -605,6 +607,9 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
__ PushRoot(Heap::kTheHoleValueRootIndex);
}
+ // Deoptimizer re-enters stub code here.
+ __ bind(&post_instantiation_deopt_entry);
+
// Set up pointer to last argument.
__ Daddu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
@@ -638,7 +643,8 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
// Store offset of return address for deoptimizer.
if (create_implicit_receiver && !is_api_function) {
- masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+ masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
+ masm->pc_offset());
}
// Restore context from the frame.
@@ -700,6 +706,35 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
__ IncrementCounter(isolate->counters()->constructed_objects(), 1, a1, a2);
}
__ Ret();
+
+ // Store offset of trampoline address for deoptimizer. This is the bailout
+ // point after the receiver instantiation but before the function invocation.
+ // We need to restore some registers in order to continue the above code.
+ if (create_implicit_receiver && !is_api_function) {
+ masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
+ masm->pc_offset());
+
+ // ----------- S t a t e -------------
+ // -- a0 : newly allocated object
+ // -- sp[0] : constructor function
+ // -----------------------------------
+
+ __ Pop(a1);
+ __ Push(a0, a0);
+
+ // Retrieve smi-tagged arguments count from the stack.
+ __ ld(a0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ __ SmiUntag(a0);
+
+ // Retrieve the new target value from the stack. This was placed into the
+ // frame description in place of the receiver by the optimizing compiler.
+ __ Daddu(a3, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ Dlsa(a3, a3, a0, kPointerSizeLog2);
+ __ ld(a3, MemOperand(a3));
+
+ // Continue with constructor function invocation.
+ __ jmp(&post_instantiation_deopt_entry);
+ }
}
} // namespace
@@ -996,8 +1031,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Register debug_info = kInterpreterBytecodeArrayRegister;
DCHECK(!debug_info.is(a0));
__ ld(debug_info, FieldMemOperand(a0, SharedFunctionInfo::kDebugInfoOffset));
- __ Branch(&load_debug_bytecode_array, ne, debug_info,
- Operand(DebugInfo::uninitialized()));
+ __ JumpIfNotSmi(debug_info, &load_debug_bytecode_array);
__ ld(kInterpreterBytecodeArrayRegister,
FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
__ bind(&bytecode_array_loaded);
@@ -1009,8 +1043,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Operand(masm->CodeObject())); // Self-reference to this code.
// Increment invocation count for the function.
- __ ld(a0, FieldMemOperand(a1, JSFunction::kLiteralsOffset));
- __ ld(a0, FieldMemOperand(a0, LiteralsArray::kFeedbackVectorOffset));
+ __ ld(a0, FieldMemOperand(a1, JSFunction::kFeedbackVectorOffset));
+ __ ld(a0, FieldMemOperand(a0, Cell::kValueOffset));
__ ld(a4, FieldMemOperand(
a0, FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
@@ -1150,7 +1184,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
// static
void Builtins::Generate_InterpreterPushArgsAndCallImpl(
MacroAssembler* masm, TailCallMode tail_call_mode,
- CallableType function_type) {
+ InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a2 : the address of the first argument to be pushed. Subsequent
@@ -1166,12 +1200,14 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
Generate_InterpreterPushArgs(masm, a3, a2, a4, t0, &stack_overflow);
// Call the target.
- if (function_type == CallableType::kJSFunction) {
+ if (mode == InterpreterPushArgsMode::kJSFunction) {
__ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
tail_call_mode),
RelocInfo::CODE_TARGET);
+ } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Jump(masm->isolate()->builtins()->CallWithSpread(),
+ RelocInfo::CODE_TARGET);
} else {
- DCHECK_EQ(function_type, CallableType::kAny);
__ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
tail_call_mode),
RelocInfo::CODE_TARGET);
@@ -1187,7 +1223,7 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
// static
void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
- MacroAssembler* masm, CallableType construct_type) {
+ MacroAssembler* masm, InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
// -- a0 : argument count (not including receiver)
// -- a3 : new target
@@ -1204,7 +1240,7 @@ void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
Generate_InterpreterPushArgs(masm, a0, a4, a5, t0, &stack_overflow);
__ AssertUndefinedOrAllocationSite(a2, t0);
- if (construct_type == CallableType::kJSFunction) {
+ if (mode == InterpreterPushArgsMode::kJSFunction) {
__ AssertFunction(a1);
// Tail call to the function-specific construct stub (still in the caller
@@ -1213,8 +1249,12 @@ void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
__ ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kConstructStubOffset));
__ Daddu(at, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(at);
+ } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ // Call the constructor with a0, a1, and a3 unmodified.
+ __ Jump(masm->isolate()->builtins()->ConstructWithSpread(),
+ RelocInfo::CODE_TARGET);
} else {
- DCHECK_EQ(construct_type, CallableType::kAny);
+ DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
// Call the constructor with a0, a1, and a3 unmodified.
__ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
@@ -1339,18 +1379,24 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
Register argument_count = a0;
Register closure = a1;
Register new_target = a3;
+ Register map = a0;
+ Register index = a2;
+
+ // Do we have a valid feedback vector?
+ __ ld(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ ld(index, FieldMemOperand(index, Cell::kValueOffset));
+ __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex,
+ &gotta_call_runtime_no_stack);
+
__ push(argument_count);
__ push(new_target);
__ push(closure);
- Register map = a0;
- Register index = a2;
__ ld(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ ld(map, FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
__ ld(index, FieldMemOperand(map, FixedArray::kLengthOffset));
- __ Branch(&gotta_call_runtime, lt, index, Operand(Smi::FromInt(2)));
+ __ Branch(&try_shared, lt, index, Operand(Smi::FromInt(2)));
- // Find literals.
// a3 : native context
// a2 : length / index
// a0 : optimized code map
@@ -1370,20 +1416,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
SharedFunctionInfo::kOffsetToPreviousContext));
__ ld(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
__ Branch(&loop_bottom, ne, temp, Operand(native_context));
- // Literals available?
- __ ld(temp, FieldMemOperand(array_pointer,
- SharedFunctionInfo::kOffsetToPreviousLiterals));
- __ ld(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
- __ JumpIfSmi(temp, &gotta_call_runtime);
-
- // Save the literals in the closure.
- __ ld(a4, MemOperand(sp, 0));
- __ sd(temp, FieldMemOperand(a4, JSFunction::kLiteralsOffset));
- __ push(index);
- __ RecordWriteField(a4, JSFunction::kLiteralsOffset, temp, index,
- kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ pop(index);
// Code available?
Register entry = a4;
@@ -1393,7 +1425,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ ld(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
- // Found literals and code. Get them into the closure and return.
+ // Found code. Get it into the closure and return.
__ pop(closure);
// Store code entry in the closure.
__ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -1428,9 +1460,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
__ Branch(&loop_top, gt, index, Operand(Smi::FromInt(1)));
- // We found neither literals nor code.
- __ jmp(&gotta_call_runtime);
-
+ // We found no code.
__ bind(&try_shared);
__ pop(closure);
__ pop(new_target);
@@ -2094,21 +2124,21 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ bind(&target_not_constructor);
{
__ sd(target, MemOperand(sp));
- __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ __ TailCallRuntime(Runtime::kThrowNotConstructor);
}
// 4c. The new.target is not a constructor, throw an appropriate TypeError.
__ bind(&new_target_not_constructor);
{
__ sd(new_target, MemOperand(sp));
- __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ __ TailCallRuntime(Runtime::kThrowNotConstructor);
}
}
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
// __ sll(a0, a0, kSmiTagSize);
__ dsll32(a0, a0, 0);
- __ li(a4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ li(a4, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ MultiPush(a0.bit() | a1.bit() | a4.bit() | fp.bit() | ra.bit());
__ Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
kPointerSize));
@@ -2204,7 +2234,7 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
__ Branch(&create_runtime, ne, a2, Operand(at));
__ LoadRoot(at, Heap::kArrayProtectorRootIndex);
- __ lw(a2, UntagSmiFieldMemOperand(at, PropertyCell::kValueOffset));
+ __ lw(a2, FieldMemOperand(at, PropertyCell::kValueOffset));
__ Branch(&create_runtime, ne, a2,
Operand(Smi::FromInt(Isolate::kProtectorValid)));
__ lw(a2, UntagSmiFieldMemOperand(a0, JSArray::kLengthOffset));
@@ -2296,6 +2326,72 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
}
}
+// static
+void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
+ // ----------- S t a t e -------------
+ // -- a1 : the target to call (can be any Object)
+ // -- a2 : start index (to support rest parameters)
+ // -- ra : return address.
+ // -- sp[0] : thisArgument
+ // -----------------------------------
+
+ // Check if we have an arguments adaptor frame below the function frame.
+ Label arguments_adaptor, arguments_done;
+ __ ld(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ld(a0, MemOperand(a3, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ Branch(&arguments_adaptor, eq, a0,
+ Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
+ {
+ __ ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ ld(a0, FieldMemOperand(a0, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a0,
+ FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ mov(a3, fp);
+ }
+ __ Branch(&arguments_done);
+ __ bind(&arguments_adaptor);
+ {
+ // Just get the length from the ArgumentsAdaptorFrame.
+ __ lw(a0, UntagSmiMemOperand(
+ a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ }
+ __ bind(&arguments_done);
+
+ Label stack_empty, stack_done, stack_overflow;
+ __ Subu(a0, a0, a2);
+ __ Branch(&stack_empty, le, a0, Operand(zero_reg));
+ {
+ // Check for stack overflow.
+ Generate_StackOverflowCheck(masm, a0, a4, a5, &stack_overflow);
+
+ // Forward the arguments from the caller frame.
+ {
+ Label loop;
+ __ mov(a2, a0);
+ __ bind(&loop);
+ {
+ __ Dlsa(at, a3, a2, kPointerSizeLog2);
+ __ ld(at, MemOperand(at, 1 * kPointerSize));
+ __ push(at);
+ __ Subu(a2, a2, Operand(1));
+ __ Branch(&loop, ne, a2, Operand(zero_reg));
+ }
+ }
+ }
+ __ Branch(&stack_done);
+ __ bind(&stack_overflow);
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ bind(&stack_empty);
+ {
+ // We just pass the receiver, which is already on the stack.
+ __ mov(a0, zero_reg);
+ }
+ __ bind(&stack_done);
+
+ __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
namespace {
// Drops top JavaScript frame and an arguments adaptor frame below it (if
@@ -2346,7 +2442,7 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
__ ld(scratch3,
MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Branch(&no_interpreter_frame, ne, scratch3,
- Operand(Smi::FromInt(StackFrame::STUB)));
+ Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
__ ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ bind(&no_interpreter_frame);
}
@@ -2358,7 +2454,7 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
__ ld(scratch3,
MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Branch(&no_arguments_adaptor, ne, scratch3,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
// Drop current frame and load arguments count from arguments adaptor frame.
__ mov(fp, scratch2);
@@ -2649,6 +2745,150 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
}
}
+static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
+ Register argc = a0;
+ Register constructor = a1;
+ Register new_target = a3;
+
+ Register scratch = t0;
+ Register scratch2 = t1;
+
+ Register spread = a2;
+ Register spread_map = a4;
+
+ Register spread_len = a4;
+
+ Register native_context = a5;
+
+ Label runtime_call, push_args;
+ __ ld(spread, MemOperand(sp, 0));
+ __ JumpIfSmi(spread, &runtime_call);
+ __ ld(spread_map, FieldMemOperand(spread, HeapObject::kMapOffset));
+ __ ld(native_context, NativeContextMemOperand());
+
+ // Check that the spread is an array.
+ __ lbu(scratch, FieldMemOperand(spread_map, Map::kInstanceTypeOffset));
+ __ Branch(&runtime_call, ne, scratch, Operand(JS_ARRAY_TYPE));
+
+ // Check that we have the original ArrayPrototype.
+ __ ld(scratch, FieldMemOperand(spread_map, Map::kPrototypeOffset));
+ __ ld(scratch2, ContextMemOperand(native_context,
+ Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+ __ Branch(&runtime_call, ne, scratch, Operand(scratch2));
+
+ // Check that the ArrayPrototype hasn't been modified in a way that would
+ // affect iteration.
+ __ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
+ __ ld(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
+ __ Branch(&runtime_call, ne, scratch,
+ Operand(Smi::FromInt(Isolate::kProtectorValid)));
+
+ // Check that the map of the initial array iterator hasn't changed.
+ __ ld(scratch,
+ ContextMemOperand(native_context,
+ Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
+ __ ld(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
+ __ ld(scratch2,
+ ContextMemOperand(native_context,
+ Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
+ __ Branch(&runtime_call, ne, scratch, Operand(scratch2));
+
+ // For FastPacked kinds, iteration will have the same effect as simply
+ // accessing each property in order.
+ Label no_protector_check;
+ __ lbu(scratch, FieldMemOperand(spread_map, Map::kBitField2Offset));
+ __ DecodeField<Map::ElementsKindBits>(scratch);
+ __ Branch(&runtime_call, hi, scratch, Operand(FAST_HOLEY_ELEMENTS));
+ // For non-FastHoley kinds, we can skip the protector check.
+ __ Branch(&no_protector_check, eq, scratch, Operand(FAST_SMI_ELEMENTS));
+ __ Branch(&no_protector_check, eq, scratch, Operand(FAST_ELEMENTS));
+ // Check the ArrayProtector cell.
+ __ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
+ __ ld(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
+ __ Branch(&runtime_call, ne, scratch,
+ Operand(Smi::FromInt(Isolate::kProtectorValid)));
+
+ __ bind(&no_protector_check);
+ // Load the FixedArray backing store, but use the length from the array.
+ __ lw(spread_len, UntagSmiFieldMemOperand(spread, JSArray::kLengthOffset));
+ __ ld(spread, FieldMemOperand(spread, JSArray::kElementsOffset));
+ __ Branch(&push_args);
+
+ __ bind(&runtime_call);
+ {
+ // Call the builtin for the result of the spread.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(argc);
+ __ Push(constructor, new_target, argc, spread);
+ __ CallRuntime(Runtime::kSpreadIterableFixed);
+ __ mov(spread, v0);
+ __ Pop(constructor, new_target, argc);
+ __ SmiUntag(argc);
+ }
+
+ {
+ // Calculate the new nargs including the result of the spread.
+ __ lw(spread_len,
+ UntagSmiFieldMemOperand(spread, FixedArray::kLengthOffset));
+
+ __ bind(&push_args);
+ // argc += spread_len - 1. Subtract 1 for the spread itself.
+ __ Daddu(argc, argc, spread_len);
+ __ Dsubu(argc, argc, Operand(1));
+
+ // Pop the spread argument off the stack.
+ __ Pop(scratch);
+ }
+
+ // Check for stack overflow.
+ {
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ Label done;
+ __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
+ // Make scratch the space we have left. The stack might already be
+ // overflowed here which will cause ip to become negative.
+ __ Dsubu(scratch, sp, scratch);
+ // Check if the arguments will overflow the stack.
+ __ dsll(at, spread_len, kPointerSizeLog2);
+ __ Branch(&done, gt, scratch, Operand(at)); // Signed comparison.
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ bind(&done);
+ }
+
+ // Put the evaluated spread onto the stack as additional arguments.
+ {
+ __ mov(scratch, zero_reg);
+ Label done, push, loop;
+ __ bind(&loop);
+ __ Branch(&done, eq, scratch, Operand(spread_len));
+ __ Dlsa(scratch2, spread, scratch, kPointerSizeLog2);
+ __ ld(scratch2, FieldMemOperand(scratch2, FixedArray::kHeaderSize));
+ __ JumpIfNotRoot(scratch2, Heap::kTheHoleValueRootIndex, &push);
+ __ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
+ __ bind(&push);
+ __ Push(scratch2);
+ __ Daddu(scratch, scratch, Operand(1));
+ __ Branch(&loop);
+ __ bind(&done);
+ }
+}
+
+// static
+void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the target to call (can be any Object).
+ // -----------------------------------
+
+ // CheckSpreadAndPushToStack will push a3 to save it.
+ __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+ CheckSpreadAndPushToStack(masm);
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+ TailCallMode::kDisallow),
+ RelocInfo::CODE_TARGET);
+}
+
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
@@ -2828,6 +3068,19 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
}
// static
+void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the constructor to call (can be any Object)
+ // -- a3 : the new target (either the same as the constructor or
+ // the JSFunction on which new was invoked initially)
+ // -----------------------------------
+
+ CheckSpreadAndPushToStack(masm);
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
+// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : requested object size (untagged)
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index 655d466030..be07f748c1 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -560,6 +560,7 @@ namespace {
void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
bool create_implicit_receiver,
bool check_derived_construct) {
+ Label post_instantiation_deopt_entry;
// ----------- S t a t e -------------
// -- r3 : number of arguments
// -- r4 : constructor function
@@ -608,6 +609,9 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
__ Push(r7, r7);
}
+ // Deoptimizer re-enters stub code here.
+ __ bind(&post_instantiation_deopt_entry);
+
// Set up pointer to last argument.
__ addi(r5, fp, Operand(StandardFrameConstants::kCallerSPOffset));
@@ -636,14 +640,17 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
// r3: number of arguments
// r4: constructor function
// r6: new target
-
- ParameterCount actual(r3);
- __ InvokeFunction(r4, r6, actual, CALL_FUNCTION,
- CheckDebugStepCallWrapper());
+ {
+ ConstantPoolUnavailableScope constant_pool_unavailable(masm);
+ ParameterCount actual(r3);
+ __ InvokeFunction(r4, r6, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
+ }
// Store offset of return address for deoptimizer.
if (create_implicit_receiver && !is_api_function) {
- masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+ masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
+ masm->pc_offset());
}
// Restore context from the frame.
@@ -708,6 +715,34 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
__ IncrementCounter(isolate->counters()->constructed_objects(), 1, r4, r5);
}
__ blr();
+ // Store offset of trampoline address for deoptimizer. This is the bailout
+ // point after the receiver instantiation but before the function invocation.
+ // We need to restore some registers in order to continue the above code.
+ if (create_implicit_receiver && !is_api_function) {
+ masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
+ masm->pc_offset());
+
+ // ----------- S t a t e -------------
+ // -- r3 : newly allocated object
+ // -- sp[0] : constructor function
+ // -----------------------------------
+
+ __ pop(r4);
+ __ Push(r3, r3);
+
+ // Retrieve smi-tagged arguments count from the stack.
+ __ LoadP(r3, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ __ SmiUntag(r3);
+
+ // Retrieve the new target value from the stack. This was placed into the
+ // frame description in place of the receiver by the optimizing compiler.
+ __ addi(r6, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
+ __ LoadPX(r6, MemOperand(r6, ip));
+
+ // Continue with constructor function invocation.
+ __ b(&post_instantiation_deopt_entry);
+ }
}
} // namespace
@@ -1019,8 +1054,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load original bytecode array or the debug copy.
__ LoadP(kInterpreterBytecodeArrayRegister,
FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
- __ CmpSmiLiteral(debug_info, DebugInfo::uninitialized(), r0);
- __ beq(&array_done);
+ __ TestIfSmi(debug_info, r0);
+ __ beq(&array_done, cr0);
__ LoadP(kInterpreterBytecodeArrayRegister,
FieldMemOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex));
__ bind(&array_done);
@@ -1033,11 +1068,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bne(&switch_to_different_code_kind);
// Increment invocation count for the function.
- __ LoadP(r7, FieldMemOperand(r4, JSFunction::kLiteralsOffset));
- __ LoadP(r7, FieldMemOperand(r7, LiteralsArray::kFeedbackVectorOffset));
- __ LoadP(r8, FieldMemOperand(r7, FeedbackVector::kInvocationCountIndex *
- kPointerSize +
- FeedbackVector::kHeaderSize));
+ __ LoadP(r7, FieldMemOperand(r4, JSFunction::kFeedbackVectorOffset));
+ __ LoadP(r7, FieldMemOperand(r7, Cell::kValueOffset));
+ __ LoadP(r8, FieldMemOperand(
+ r7, FeedbackVector::kInvocationCountIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
__ AddSmiLiteral(r8, r8, Smi::FromInt(1), r0);
__ StoreP(r8, FieldMemOperand(
r7, FeedbackVector::kInvocationCountIndex * kPointerSize +
@@ -1163,7 +1198,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
// static
void Builtins::Generate_InterpreterPushArgsAndCallImpl(
MacroAssembler* masm, TailCallMode tail_call_mode,
- CallableType function_type) {
+ InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments (not including the receiver)
// -- r5 : the address of the first argument to be pushed. Subsequent
@@ -1180,12 +1215,14 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
Generate_InterpreterPushArgs(masm, r6, r5, r6, r7, &stack_overflow);
// Call the target.
- if (function_type == CallableType::kJSFunction) {
+ if (mode == InterpreterPushArgsMode::kJSFunction) {
__ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
tail_call_mode),
RelocInfo::CODE_TARGET);
+ } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Jump(masm->isolate()->builtins()->CallWithSpread(),
+ RelocInfo::CODE_TARGET);
} else {
- DCHECK_EQ(function_type, CallableType::kAny);
__ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
tail_call_mode),
RelocInfo::CODE_TARGET);
@@ -1201,7 +1238,7 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
// static
void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
- MacroAssembler* masm, CallableType construct_type) {
+ MacroAssembler* masm, InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
// -- r3 : argument count (not including receiver)
// -- r6 : new target
@@ -1224,7 +1261,7 @@ void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
__ bind(&skip);
__ AssertUndefinedOrAllocationSite(r5, r8);
- if (construct_type == CallableType::kJSFunction) {
+ if (mode == InterpreterPushArgsMode::kJSFunction) {
__ AssertFunction(r4);
// Tail call to the function-specific construct stub (still in the caller
@@ -1234,9 +1271,12 @@ void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
// Jump to the construct function.
__ addi(ip, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(ip);
-
+ } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ // Call the constructor with r3, r4, and r6 unmodified.
+ __ Jump(masm->isolate()->builtins()->ConstructWithSpread(),
+ RelocInfo::CODE_TARGET);
} else {
- DCHECK_EQ(construct_type, CallableType::kAny);
+ DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
// Call the constructor with r3, r4, and r6 unmodified.
__ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
@@ -1360,15 +1400,20 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
Register closure = r4;
Register map = r9;
Register index = r5;
+
+ // Do we have a valid feedback vector?
+ __ LoadP(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ LoadP(index, FieldMemOperand(index, Cell::kValueOffset));
+ __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
+
__ LoadP(map,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(map,
FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
__ LoadP(index, FieldMemOperand(map, FixedArray::kLengthOffset));
__ CmpSmiLiteral(index, Smi::FromInt(2), r0);
- __ blt(&gotta_call_runtime);
+ __ blt(&try_shared);
- // Find literals.
// r10 : native context
// r5 : length / index
// r9 : optimized code map
@@ -1389,18 +1434,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ LoadP(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
__ cmp(temp, native_context);
__ bne(&loop_bottom);
- // Literals available?
- __ LoadP(temp,
- FieldMemOperand(array_pointer,
- SharedFunctionInfo::kOffsetToPreviousLiterals));
- __ LoadP(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
- __ JumpIfSmi(temp, &gotta_call_runtime);
-
- // Save the literals in the closure.
- __ StoreP(temp, FieldMemOperand(closure, JSFunction::kLiteralsOffset), r0);
- __ RecordWriteField(closure, JSFunction::kLiteralsOffset, temp, r7,
- kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
// Code available?
Register entry = r7;
@@ -1410,7 +1443,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
- // Found literals and code. Get them into the closure and return.
+ // Found code. Get it into the closure and return.
// Store code entry in the closure.
__ addi(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
__ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
@@ -1444,7 +1477,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ CmpSmiLiteral(index, Smi::FromInt(1), r0);
__ bgt(&loop_top);
- // We found neither literals nor code.
+ // We found no code.
__ b(&gotta_call_runtime);
__ bind(&try_shared);
@@ -2113,20 +2146,20 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ bind(&target_not_constructor);
{
__ StoreP(r4, MemOperand(sp, 0));
- __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ __ TailCallRuntime(Runtime::kThrowNotConstructor);
}
// 4c. The new.target is not a constructor, throw an appropriate TypeError.
__ bind(&new_target_not_constructor);
{
__ StoreP(r6, MemOperand(sp, 0));
- __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ __ TailCallRuntime(Runtime::kThrowNotConstructor);
}
}
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ SmiTag(r3);
- __ LoadSmiLiteral(r7, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ mov(r7, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ mflr(r0);
__ push(r0);
if (FLAG_enable_embedded_constant_pool) {
@@ -2300,6 +2333,76 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
}
}
+// static
+void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
+ // ----------- S t a t e -------------
+ // -- r4 : the target to call (can be any Object)
+ // -- r5 : start index (to support rest parameters)
+ // -- lr : return address.
+ // -- sp[0] : thisArgument
+ // -----------------------------------
+
+ // Check if we have an arguments adaptor frame below the function frame.
+ Label arguments_adaptor, arguments_done;
+ __ LoadP(r6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(ip, MemOperand(r6, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ cmpi(ip, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ beq(&arguments_adaptor);
+ {
+ __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ LoadP(r3, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadWordArith(
+ r3,
+ FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ mr(r6, fp);
+ }
+ __ b(&arguments_done);
+ __ bind(&arguments_adaptor);
+ {
+ // Load the length from the ArgumentsAdaptorFrame.
+ __ LoadP(r3, MemOperand(r6, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ }
+ __ bind(&arguments_done);
+
+ Label stack_empty, stack_done, stack_overflow;
+ __ SmiUntag(r3);
+ __ sub(r3, r3, r5);
+ __ cmpi(r3, Operand::Zero());
+ __ ble(&stack_empty);
+ {
+ // Check for stack overflow.
+ Generate_StackOverflowCheck(masm, r3, r5, &stack_overflow);
+
+ // Forward the arguments from the caller frame.
+ {
+ Label loop;
+ __ addi(r6, r6, Operand(kPointerSize));
+ __ mr(r5, r3);
+ __ bind(&loop);
+ {
+ __ ShiftLeftImm(ip, r5, Operand(kPointerSizeLog2));
+ __ LoadPX(ip, MemOperand(r6, ip));
+ __ push(ip);
+ __ subi(r5, r5, Operand(1));
+ __ cmpi(r5, Operand::Zero());
+ __ bne(&loop);
+ }
+ }
+ }
+ __ b(&stack_done);
+ __ bind(&stack_overflow);
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ bind(&stack_empty);
+ {
+ // We just pass the receiver, which is already on the stack.
+ __ mov(r3, Operand::Zero());
+ }
+ __ bind(&stack_done);
+
+ __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
namespace {
// Drops top JavaScript frame and an arguments adaptor frame below it (if
@@ -2350,7 +2453,7 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
Label no_interpreter_frame;
__ LoadP(scratch3,
MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::STUB), r0);
+ __ cmpi(scratch3, Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
__ bne(&no_interpreter_frame);
__ LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ bind(&no_interpreter_frame);
@@ -2363,7 +2466,8 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
__ LoadP(
scratch3,
MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ cmpi(scratch3,
+ Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ bne(&no_arguments_adaptor);
// Drop current frame and load arguments count from arguments adaptor frame.
@@ -2685,6 +2789,156 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
}
}
+static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
+ Register argc = r3;
+ Register constructor = r4;
+ Register new_target = r6;
+
+ Register scratch = r5;
+ Register scratch2 = r9;
+
+ Register spread = r7;
+ Register spread_map = r8;
+ Register spread_len = r8;
+ Label runtime_call, push_args;
+ __ LoadP(spread, MemOperand(sp, 0));
+ __ JumpIfSmi(spread, &runtime_call);
+ __ LoadP(spread_map, FieldMemOperand(spread, HeapObject::kMapOffset));
+
+ // Check that the spread is an array.
+ __ CompareInstanceType(spread_map, scratch, JS_ARRAY_TYPE);
+ __ bne(&runtime_call);
+
+ // Check that we have the original ArrayPrototype.
+ __ LoadP(scratch, FieldMemOperand(spread_map, Map::kPrototypeOffset));
+ __ LoadP(scratch2, NativeContextMemOperand());
+ __ LoadP(scratch2,
+ ContextMemOperand(scratch2, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+ __ cmp(scratch, scratch2);
+ __ bne(&runtime_call);
+
+ // Check that the ArrayPrototype hasn't been modified in a way that would
+ // affect iteration.
+ __ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
+ __ LoadP(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
+ __ CmpSmiLiteral(scratch, Smi::FromInt(Isolate::kProtectorValid), r0);
+ __ bne(&runtime_call);
+
+ // Check that the map of the initial array iterator hasn't changed.
+ __ LoadP(scratch2, NativeContextMemOperand());
+ __ LoadP(scratch,
+ ContextMemOperand(scratch2,
+ Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
+ __ LoadP(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
+ __ LoadP(scratch2,
+ ContextMemOperand(
+ scratch2, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
+ __ cmp(scratch, scratch2);
+ __ bne(&runtime_call);
+
+ // For FastPacked kinds, iteration will have the same effect as simply
+ // accessing each property in order.
+ Label no_protector_check;
+ __ lbz(scratch, FieldMemOperand(spread_map, Map::kBitField2Offset));
+ __ DecodeField<Map::ElementsKindBits>(scratch);
+ __ cmpi(scratch, Operand(FAST_HOLEY_ELEMENTS));
+ __ bgt(&runtime_call);
+ // For non-FastHoley kinds, we can skip the protector check.
+ __ cmpi(scratch, Operand(FAST_SMI_ELEMENTS));
+ __ beq(&no_protector_check);
+ __ cmpi(scratch, Operand(FAST_ELEMENTS));
+ __ beq(&no_protector_check);
+ // Check the ArrayProtector cell.
+ __ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
+ __ LoadP(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
+ __ CmpSmiLiteral(scratch, Smi::FromInt(Isolate::kProtectorValid), r0);
+ __ bne(&runtime_call);
+
+ __ bind(&no_protector_check);
+ // Load the FixedArray backing store, but use the length from the array.
+ __ LoadP(spread_len, FieldMemOperand(spread, JSArray::kLengthOffset));
+ __ SmiUntag(spread_len);
+ __ LoadP(spread, FieldMemOperand(spread, JSArray::kElementsOffset));
+ __ b(&push_args);
+
+ __ bind(&runtime_call);
+ {
+ // Call the builtin for the result of the spread.
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(argc);
+ __ Push(constructor, new_target, argc, spread);
+ __ CallRuntime(Runtime::kSpreadIterableFixed);
+ __ mr(spread, r3);
+ __ Pop(constructor, new_target, argc);
+ __ SmiUntag(argc);
+ }
+
+ {
+ // Calculate the new nargs including the result of the spread.
+ __ LoadP(spread_len, FieldMemOperand(spread, FixedArray::kLengthOffset));
+ __ SmiUntag(spread_len);
+
+ __ bind(&push_args);
+ // argc += spread_len - 1. Subtract 1 for the spread itself.
+ __ add(argc, argc, spread_len);
+ __ subi(argc, argc, Operand(1));
+
+ // Pop the spread argument off the stack.
+ __ Pop(scratch);
+ }
+
+ // Check for stack overflow.
+ {
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ Label done;
+ __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
+ // Make scratch the space we have left. The stack might already be
+ // overflowed here which will cause scratch to become negative.
+ __ sub(scratch, sp, scratch);
+ // Check if the arguments will overflow the stack.
+ __ ShiftLeftImm(r0, spread_len, Operand(kPointerSizeLog2));
+ __ cmp(scratch, r0);
+ __ bgt(&done); // Signed comparison.
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ bind(&done);
+ }
+
+ // Put the evaluated spread onto the stack as additional arguments.
+ {
+ __ li(scratch, Operand::Zero());
+ Label done, push, loop;
+ __ bind(&loop);
+ __ cmp(scratch, spread_len);
+ __ beq(&done);
+ __ ShiftLeftImm(r0, scratch, Operand(kPointerSizeLog2));
+ __ add(scratch2, spread, r0);
+ __ LoadP(scratch2, FieldMemOperand(scratch2, FixedArray::kHeaderSize));
+ __ JumpIfNotRoot(scratch2, Heap::kTheHoleValueRootIndex, &push);
+ __ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
+ __ bind(&push);
+ __ Push(scratch2);
+ __ addi(scratch, scratch, Operand(1));
+ __ b(&loop);
+ __ bind(&done);
+ }
+}
+
+// static
+void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : the number of arguments (not including the receiver)
+ // -- r4 : the constructor to call (can be any Object)
+ // -----------------------------------
+
+ // CheckSpreadAndPushToStack will push r6 to save it.
+ __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+ CheckSpreadAndPushToStack(masm);
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+ TailCallMode::kDisallow),
+ RelocInfo::CODE_TARGET);
+}
+
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2805,6 +3059,18 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
+void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : the number of arguments (not including the receiver)
+ // -- r4 : the constructor to call (can be any Object)
+ // -- r6 : the new target (either the same as the constructor or
+ // the JSFunction on which new was invoked initially)
+ // -----------------------------------
+
+ CheckSpreadAndPushToStack(masm);
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index 9fc184af30..429282d69e 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -556,6 +556,7 @@ namespace {
void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
bool create_implicit_receiver,
bool check_derived_construct) {
+ Label post_instantiation_deopt_entry;
// ----------- S t a t e -------------
// -- r2 : number of arguments
// -- r3 : constructor function
@@ -606,6 +607,9 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
__ Push(r6, r6);
}
+ // Deoptimizer re-enters stub code here.
+ __ bind(&post_instantiation_deopt_entry);
+
// Set up pointer to last argument.
__ la(r4, MemOperand(fp, StandardFrameConstants::kCallerSPOffset));
@@ -641,7 +645,8 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
// Store offset of return address for deoptimizer.
if (create_implicit_receiver && !is_api_function) {
- masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+ masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
+ masm->pc_offset());
}
// Restore context from the frame.
@@ -707,6 +712,35 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
__ IncrementCounter(isolate->counters()->constructed_objects(), 1, r3, r4);
}
__ Ret();
+
+ // Store offset of trampoline address for deoptimizer. This is the bailout
+ // point after the receiver instantiation but before the function invocation.
+ // We need to restore some registers in order to continue the above code.
+ if (create_implicit_receiver && !is_api_function) {
+ masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
+ masm->pc_offset());
+
+ // ----------- S t a t e -------------
+ // -- r2 : newly allocated object
+ // -- sp[0] : constructor function
+ // -----------------------------------
+
+ __ pop(r3);
+ __ Push(r2, r2);
+
+ // Retrieve smi-tagged arguments count from the stack.
+ __ LoadP(r2, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ __ SmiUntag(r2);
+
+ // Retrieve the new target value from the stack. This was placed into the
+ // frame description in place of the receiver by the optimizing compiler.
+ __ la(r5, MemOperand(fp, StandardFrameConstants::kCallerSPOffset));
+ __ ShiftLeftP(ip, r2, Operand(kPointerSizeLog2));
+ __ LoadP(r5, MemOperand(r5, ip));
+
+ // Continue with constructor function invocation.
+ __ b(&post_instantiation_deopt_entry);
+ }
}
} // namespace
@@ -1025,7 +1059,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load original bytecode array or the debug copy.
__ LoadP(kInterpreterBytecodeArrayRegister,
FieldMemOperand(r2, SharedFunctionInfo::kFunctionDataOffset));
- __ CmpSmiLiteral(debug_info, DebugInfo::uninitialized(), r0);
+ __ TestIfSmi(debug_info);
__ beq(&array_done);
__ LoadP(kInterpreterBytecodeArrayRegister,
FieldMemOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex));
@@ -1038,11 +1072,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bne(&switch_to_different_code_kind);
// Increment invocation count for the function.
- __ LoadP(r6, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
- __ LoadP(r6, FieldMemOperand(r6, LiteralsArray::kFeedbackVectorOffset));
- __ LoadP(r1, FieldMemOperand(r6, FeedbackVector::kInvocationCountIndex *
- kPointerSize +
- FeedbackVector::kHeaderSize));
+ __ LoadP(r6, FieldMemOperand(r3, JSFunction::kFeedbackVectorOffset));
+ __ LoadP(r6, FieldMemOperand(r6, Cell::kValueOffset));
+ __ LoadP(r1, FieldMemOperand(
+ r6, FeedbackVector::kInvocationCountIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
__ AddSmiLiteral(r1, r1, Smi::FromInt(1), r0);
__ StoreP(r1, FieldMemOperand(
r6, FeedbackVector::kInvocationCountIndex * kPointerSize +
@@ -1170,7 +1204,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
// static
void Builtins::Generate_InterpreterPushArgsAndCallImpl(
MacroAssembler* masm, TailCallMode tail_call_mode,
- CallableType function_type) {
+ InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
// -- r2 : the number of arguments (not including the receiver)
// -- r4 : the address of the first argument to be pushed. Subsequent
@@ -1187,12 +1221,14 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
Generate_InterpreterPushArgs(masm, r5, r4, r5, r6, &stack_overflow);
// Call the target.
- if (function_type == CallableType::kJSFunction) {
+ if (mode == InterpreterPushArgsMode::kJSFunction) {
__ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
tail_call_mode),
RelocInfo::CODE_TARGET);
+ } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Jump(masm->isolate()->builtins()->CallWithSpread(),
+ RelocInfo::CODE_TARGET);
} else {
- DCHECK_EQ(function_type, CallableType::kAny);
__ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
tail_call_mode),
RelocInfo::CODE_TARGET);
@@ -1208,7 +1244,7 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
// static
void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
- MacroAssembler* masm, CallableType construct_type) {
+ MacroAssembler* masm, InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
// -- r2 : argument count (not including receiver)
// -- r5 : new target
@@ -1230,7 +1266,7 @@ void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
__ bind(&skip);
__ AssertUndefinedOrAllocationSite(r4, r7);
- if (construct_type == CallableType::kJSFunction) {
+ if (mode == InterpreterPushArgsMode::kJSFunction) {
__ AssertFunction(r3);
// Tail call to the function-specific construct stub (still in the caller
@@ -1240,9 +1276,12 @@ void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
// Jump to the construct function.
__ AddP(ip, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(ip);
-
+ } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ // Call the constructor with r2, r3, and r5 unmodified.
+ __ Jump(masm->isolate()->builtins()->ConstructWithSpread(),
+ RelocInfo::CODE_TARGET);
} else {
- DCHECK_EQ(construct_type, CallableType::kAny);
+ DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
// Call the constructor with r2, r3, and r5 unmodified.
__ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
@@ -1365,13 +1404,19 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
Register closure = r3;
Register map = r8;
Register index = r4;
+
+ // Do we have a valid feedback vector?
+ __ LoadP(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ LoadP(index, FieldMemOperand(index, Cell::kValueOffset));
+ __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
+
__ LoadP(map,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(map,
FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
__ LoadP(index, FieldMemOperand(map, FixedArray::kLengthOffset));
__ CmpSmiLiteral(index, Smi::FromInt(2), r0);
- __ blt(&gotta_call_runtime);
+ __ blt(&try_shared);
// Find literals.
// r9 : native context
@@ -1394,18 +1439,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ LoadP(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
__ CmpP(temp, native_context);
__ bne(&loop_bottom, Label::kNear);
- // Literals available?
- __ LoadP(temp,
- FieldMemOperand(array_pointer,
- SharedFunctionInfo::kOffsetToPreviousLiterals));
- __ LoadP(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
- __ JumpIfSmi(temp, &gotta_call_runtime);
-
- // Save the literals in the closure.
- __ StoreP(temp, FieldMemOperand(closure, JSFunction::kLiteralsOffset), r0);
- __ RecordWriteField(closure, JSFunction::kLiteralsOffset, temp, r6,
- kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
// Code available?
Register entry = r6;
@@ -1415,7 +1448,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
- // Found literals and code. Get them into the closure and return.
+ // Found code. Get it into the closure and return.
// Store code entry in the closure.
__ AddP(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
__ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
@@ -1449,7 +1482,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ CmpSmiLiteral(index, Smi::FromInt(1), r0);
__ bgt(&loop_top);
- // We found neither literals nor code.
+ // We found no code.
__ b(&gotta_call_runtime);
__ bind(&try_shared);
@@ -2115,20 +2148,20 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ bind(&target_not_constructor);
{
__ StoreP(r3, MemOperand(sp, 0));
- __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ __ TailCallRuntime(Runtime::kThrowNotConstructor);
}
// 4c. The new.target is not a constructor, throw an appropriate TypeError.
__ bind(&new_target_not_constructor);
{
__ StoreP(r5, MemOperand(sp, 0));
- __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ __ TailCallRuntime(Runtime::kThrowNotConstructor);
}
}
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ SmiTag(r2);
- __ LoadSmiLiteral(r6, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ Load(r6, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
// Stack updated as such:
// old SP --->
// R14 Return Addr
@@ -2312,6 +2345,75 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
}
}
+// static
+void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
+ // ----------- S t a t e -------------
+ // -- r3 : the target to call (can be any Object)
+ // -- r4 : start index (to support rest parameters)
+ // -- lr : return address.
+ // -- sp[0] : thisArgument
+ // -----------------------------------
+
+ // Check if we have an arguments adaptor frame below the function frame.
+ Label arguments_adaptor, arguments_done;
+ __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(ip, MemOperand(r5, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ CmpP(ip, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ beq(&arguments_adaptor);
+ {
+ __ LoadP(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ LoadP(r2, FieldMemOperand(r2, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadW(r2, FieldMemOperand(
+ r2, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ LoadRR(r5, fp);
+ }
+ __ b(&arguments_done);
+ __ bind(&arguments_adaptor);
+ {
+ // Load the length from the ArgumentsAdaptorFrame.
+ __ LoadP(r2, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ }
+ __ bind(&arguments_done);
+
+ Label stack_empty, stack_done, stack_overflow;
+ __ SmiUntag(r2);
+ __ SubP(r2, r2, r4);
+ __ CmpP(r2, Operand::Zero());
+ __ ble(&stack_empty);
+ {
+ // Check for stack overflow.
+ Generate_StackOverflowCheck(masm, r2, r4, &stack_overflow);
+
+ // Forward the arguments from the caller frame.
+ {
+ Label loop;
+ __ AddP(r5, r5, Operand(kPointerSize));
+ __ LoadRR(r4, r2);
+ __ bind(&loop);
+ {
+ __ ShiftLeftP(ip, r4, Operand(kPointerSizeLog2));
+ __ LoadP(ip, MemOperand(r5, ip));
+ __ push(ip);
+ __ SubP(r4, r4, Operand(1));
+ __ CmpP(r4, Operand::Zero());
+ __ bne(&loop);
+ }
+ }
+ }
+ __ b(&stack_done);
+ __ bind(&stack_overflow);
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ bind(&stack_empty);
+ {
+ // We just pass the receiver, which is already on the stack.
+ __ mov(r2, Operand::Zero());
+ }
+ __ bind(&stack_done);
+
+ __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
namespace {
// Drops top JavaScript frame and an arguments adaptor frame below it (if
@@ -2362,7 +2464,7 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
Label no_interpreter_frame;
__ LoadP(scratch3,
MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::STUB), r0);
+ __ CmpP(scratch3, Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
__ bne(&no_interpreter_frame);
__ LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ bind(&no_interpreter_frame);
@@ -2375,7 +2477,8 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
__ LoadP(
scratch3,
MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ CmpP(scratch3,
+ Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ bne(&no_arguments_adaptor);
// Drop current frame and load arguments count from arguments adaptor frame.
@@ -2698,6 +2801,156 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
}
}
+static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
+ Register argc = r2;
+ Register constructor = r3;
+ Register new_target = r5;
+
+ Register scratch = r4;
+ Register scratch2 = r8;
+
+ Register spread = r6;
+ Register spread_map = r7;
+ Register spread_len = r7;
+ Label runtime_call, push_args;
+ __ LoadP(spread, MemOperand(sp, 0));
+ __ JumpIfSmi(spread, &runtime_call);
+ __ LoadP(spread_map, FieldMemOperand(spread, HeapObject::kMapOffset));
+
+ // Check that the spread is an array.
+ __ CompareInstanceType(spread_map, scratch, JS_ARRAY_TYPE);
+ __ bne(&runtime_call);
+
+ // Check that we have the original ArrayPrototype.
+ __ LoadP(scratch, FieldMemOperand(spread_map, Map::kPrototypeOffset));
+ __ LoadP(scratch2, NativeContextMemOperand());
+ __ LoadP(scratch2,
+ ContextMemOperand(scratch2, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+ __ CmpP(scratch, scratch2);
+ __ bne(&runtime_call);
+
+ // Check that the ArrayPrototype hasn't been modified in a way that would
+ // affect iteration.
+ __ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
+ __ LoadP(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
+ __ CmpSmiLiteral(scratch, Smi::FromInt(Isolate::kProtectorValid), r0);
+ __ bne(&runtime_call);
+
+ // Check that the map of the initial array iterator hasn't changed.
+ __ LoadP(scratch2, NativeContextMemOperand());
+ __ LoadP(scratch,
+ ContextMemOperand(scratch2,
+ Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
+ __ LoadP(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
+ __ LoadP(scratch2,
+ ContextMemOperand(
+ scratch2, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
+ __ CmpP(scratch, scratch2);
+ __ bne(&runtime_call);
+
+ // For FastPacked kinds, iteration will have the same effect as simply
+ // accessing each property in order.
+ Label no_protector_check;
+ __ LoadlB(scratch, FieldMemOperand(spread_map, Map::kBitField2Offset));
+ __ DecodeField<Map::ElementsKindBits>(scratch);
+ __ CmpP(scratch, Operand(FAST_HOLEY_ELEMENTS));
+ __ bgt(&runtime_call);
+ // For non-FastHoley kinds, we can skip the protector check.
+ __ CmpP(scratch, Operand(FAST_SMI_ELEMENTS));
+ __ beq(&no_protector_check);
+ __ CmpP(scratch, Operand(FAST_ELEMENTS));
+ __ beq(&no_protector_check);
+ // Check the ArrayProtector cell.
+ __ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
+ __ LoadP(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
+ __ CmpSmiLiteral(scratch, Smi::FromInt(Isolate::kProtectorValid), r0);
+ __ bne(&runtime_call);
+
+ __ bind(&no_protector_check);
+ // Load the FixedArray backing store, but use the length from the array.
+ __ LoadP(spread_len, FieldMemOperand(spread, JSArray::kLengthOffset));
+ __ SmiUntag(spread_len);
+ __ LoadP(spread, FieldMemOperand(spread, JSArray::kElementsOffset));
+ __ b(&push_args);
+
+ __ bind(&runtime_call);
+ {
+ // Call the builtin for the result of the spread.
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(argc);
+ __ Push(constructor, new_target, argc, spread);
+ __ CallRuntime(Runtime::kSpreadIterableFixed);
+ __ LoadRR(spread, r2);
+ __ Pop(constructor, new_target, argc);
+ __ SmiUntag(argc);
+ }
+
+ {
+ // Calculate the new nargs including the result of the spread.
+ __ LoadP(spread_len, FieldMemOperand(spread, FixedArray::kLengthOffset));
+ __ SmiUntag(spread_len);
+
+ __ bind(&push_args);
+ // argc += spread_len - 1. Subtract 1 for the spread itself.
+ __ AddP(argc, argc, spread_len);
+ __ SubP(argc, argc, Operand(1));
+
+ // Pop the spread argument off the stack.
+ __ Pop(scratch);
+ }
+
+ // Check for stack overflow.
+ {
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ Label done;
+ __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
+ // Make scratch the space we have left. The stack might already be
+ // overflowed here which will cause scratch to become negative.
+ __ SubP(scratch, sp, scratch);
+ // Check if the arguments will overflow the stack.
+ __ ShiftLeftP(r0, spread_len, Operand(kPointerSizeLog2));
+ __ CmpP(scratch, r0);
+ __ bgt(&done); // Signed comparison.
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ bind(&done);
+ }
+
+ // Put the evaluated spread onto the stack as additional arguments.
+ {
+ __ LoadImmP(scratch, Operand::Zero());
+ Label done, push, loop;
+ __ bind(&loop);
+ __ CmpP(scratch, spread_len);
+ __ beq(&done);
+ __ ShiftLeftP(r0, scratch, Operand(kPointerSizeLog2));
+ __ AddP(scratch2, spread, r0);
+ __ LoadP(scratch2, FieldMemOperand(scratch2, FixedArray::kHeaderSize));
+ __ JumpIfNotRoot(scratch2, Heap::kTheHoleValueRootIndex, &push);
+ __ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
+ __ bind(&push);
+ __ Push(scratch2);
+ __ AddP(scratch, scratch, Operand(1));
+ __ b(&loop);
+ __ bind(&done);
+ }
+}
+
+// static
+void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r2 : the number of arguments (not including the receiver)
+ // -- r3 : the constructor to call (can be any Object)
+ // -----------------------------------
+
+ // CheckSpreadAndPushToStack will push r5 to save it.
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ CheckSpreadAndPushToStack(masm);
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+ TailCallMode::kDisallow),
+ RelocInfo::CODE_TARGET);
+}
+
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2818,6 +3071,18 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
+void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r2 : the number of arguments (not including the receiver)
+ // -- r3 : the constructor to call (can be any Object)
+ // -- r5 : the new target (either the same as the constructor or
+ // the JSFunction on which new was invoked initially)
+ // -----------------------------------
+
+ CheckSpreadAndPushToStack(masm);
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index 88c928b4cd..703a7e7aa8 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -6,8 +6,10 @@
#include "src/code-factory.h"
#include "src/codegen.h"
+#include "src/counters.h"
#include "src/deoptimizer.h"
#include "src/full-codegen/full-codegen.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -117,6 +119,8 @@ namespace {
void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
bool create_implicit_receiver,
bool check_derived_construct) {
+ Label post_instantiation_deopt_entry;
+
// ----------- S t a t e -------------
// -- rax: number of arguments
// -- rsi: context
@@ -161,6 +165,9 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
__ PushRoot(Heap::kTheHoleValueRootIndex);
}
+ // Deoptimizer re-enters stub code here.
+ __ bind(&post_instantiation_deopt_entry);
+
// Set up pointer to last argument.
__ leap(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
@@ -181,7 +188,8 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
// Store offset of return address for deoptimizer.
if (create_implicit_receiver && !is_api_function) {
- masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+ masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
+ masm->pc_offset());
}
// Restore context from the frame.
@@ -240,6 +248,35 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
__ IncrementCounter(counters->constructed_objects(), 1);
}
__ ret(0);
+
+ // Store offset of trampoline address for deoptimizer. This is the bailout
+ // point after the receiver instantiation but before the function invocation.
+ // We need to restore some registers in order to continue the above code.
+ if (create_implicit_receiver && !is_api_function) {
+ masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
+ masm->pc_offset());
+
+ // ----------- S t a t e -------------
+ // -- rax : newly allocated object
+ // -- rsp[0] : constructor function
+ // -----------------------------------
+
+ __ Pop(rdi);
+ __ Push(rax);
+ __ Push(rax);
+
+ // Retrieve smi-tagged arguments count from the stack.
+ __ SmiToInteger32(rax,
+ Operand(rbp, ConstructFrameConstants::kLengthOffset));
+
+ // Retrieve the new target value from the stack. This was placed into the
+ // frame description in place of the receiver by the optimizing compiler.
+ __ movp(rdx, Operand(rbp, rax, times_pointer_size,
+ StandardFrameConstants::kCallerSPOffset));
+
+ // Continue with constructor function invocation.
+ __ jmp(&post_instantiation_deopt_entry);
+ }
}
} // namespace
@@ -467,7 +504,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
ExternalReference debug_hook =
ExternalReference::debug_hook_on_function_call_address(masm->isolate());
Operand debug_hook_operand = masm->ExternalOperand(debug_hook);
- STATIC_ASSERT(StepFrame > StepIn);
__ cmpb(debug_hook_operand, Immediate(0));
__ j(not_equal, &prepare_step_in_if_stepping);
@@ -610,10 +646,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// it is present) and load it into kInterpreterBytecodeArrayRegister.
__ movp(rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
Label load_debug_bytecode_array, bytecode_array_loaded;
- DCHECK_EQ(Smi::kZero, DebugInfo::uninitialized());
- __ cmpp(FieldOperand(rax, SharedFunctionInfo::kDebugInfoOffset),
- Immediate(0));
- __ j(not_equal, &load_debug_bytecode_array);
+ __ JumpIfNotSmi(FieldOperand(rax, SharedFunctionInfo::kDebugInfoOffset),
+ &load_debug_bytecode_array);
__ movp(kInterpreterBytecodeArrayRegister,
FieldOperand(rax, SharedFunctionInfo::kFunctionDataOffset));
__ bind(&bytecode_array_loaded);
@@ -625,8 +659,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ j(not_equal, &switch_to_different_code_kind);
// Increment invocation count for the function.
- __ movp(rcx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
- __ movp(rcx, FieldOperand(rcx, LiteralsArray::kFeedbackVectorOffset));
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kFeedbackVectorOffset));
+ __ movp(rcx, FieldOperand(rcx, Cell::kValueOffset));
__ SmiAddConstant(
FieldOperand(rcx, FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize),
@@ -722,24 +756,23 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(rcx);
}
-static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
- Register scratch1, Register scratch2,
- Label* stack_overflow) {
+static void Generate_StackOverflowCheck(
+ MacroAssembler* masm, Register num_args, Register scratch,
+ Label* stack_overflow,
+ Label::Distance stack_overflow_distance = Label::kFar) {
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
- __ LoadRoot(scratch1, Heap::kRealStackLimitRootIndex);
- __ movp(scratch2, rsp);
- // Make scratch2 the space we have left. The stack might already be overflowed
- // here which will cause scratch2 to become negative.
- __ subp(scratch2, scratch1);
- // Make scratch1 the space we need for the array when it is unrolled onto the
- // stack.
- __ movp(scratch1, num_args);
- __ shlp(scratch1, Immediate(kPointerSizeLog2));
+ __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
+ __ movp(scratch, rsp);
+ // Make scratch the space we have left. The stack might already be overflowed
+ // here which will cause scratch to become negative.
+ __ subp(scratch, kScratchRegister);
+ __ sarp(scratch, Immediate(kPointerSizeLog2));
// Check if the arguments will overflow the stack.
- __ cmpp(scratch2, scratch1);
- __ j(less_equal, stack_overflow); // Signed comparison.
+ __ cmpp(scratch, num_args);
+ // Signed comparison.
+ __ j(less_equal, stack_overflow, stack_overflow_distance);
}
static void Generate_InterpreterPushArgs(MacroAssembler* masm,
@@ -766,7 +799,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
// static
void Builtins::Generate_InterpreterPushArgsAndCallImpl(
MacroAssembler* masm, TailCallMode tail_call_mode,
- CallableType function_type) {
+ InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
// -- rax : the number of arguments (not including the receiver)
// -- rbx : the address of the first argument to be pushed. Subsequent
@@ -781,7 +814,7 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
__ addp(rcx, Immediate(1)); // Add one for receiver.
// Add a stack check before pushing arguments.
- Generate_StackOverflowCheck(masm, rcx, rdx, r8, &stack_overflow);
+ Generate_StackOverflowCheck(masm, rcx, rdx, &stack_overflow);
// Pop return address to allow tail-call after pushing arguments.
__ PopReturnAddressTo(kScratchRegister);
@@ -792,12 +825,14 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
// Call the target.
__ PushReturnAddressFrom(kScratchRegister); // Re-push return address.
- if (function_type == CallableType::kJSFunction) {
+ if (mode == InterpreterPushArgsMode::kJSFunction) {
__ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
tail_call_mode),
RelocInfo::CODE_TARGET);
+ } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Jump(masm->isolate()->builtins()->CallWithSpread(),
+ RelocInfo::CODE_TARGET);
} else {
- DCHECK_EQ(function_type, CallableType::kAny);
__ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
tail_call_mode),
RelocInfo::CODE_TARGET);
@@ -814,7 +849,7 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
// static
void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
- MacroAssembler* masm, CallableType construct_type) {
+ MacroAssembler* masm, InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
// -- rax : the number of arguments (not including the receiver)
// -- rdx : the new target (either the same as the constructor or
@@ -828,7 +863,7 @@ void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
Label stack_overflow;
// Add a stack check before pushing arguments.
- Generate_StackOverflowCheck(masm, rax, r8, r9, &stack_overflow);
+ Generate_StackOverflowCheck(masm, rax, r8, &stack_overflow);
// Pop return address to allow tail-call after pushing arguments.
__ PopReturnAddressTo(kScratchRegister);
@@ -843,7 +878,7 @@ void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
__ PushReturnAddressFrom(kScratchRegister);
__ AssertUndefinedOrAllocationSite(rbx);
- if (construct_type == CallableType::kJSFunction) {
+ if (mode == InterpreterPushArgsMode::kJSFunction) {
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
__ AssertFunction(rdi);
@@ -853,8 +888,12 @@ void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
__ leap(rcx, FieldOperand(rcx, Code::kHeaderSize));
// Jump to the constructor function (rax, rbx, rdx passed on).
__ jmp(rcx);
+ } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ // Call the constructor (rax, rdx, rdi passed on).
+ __ Jump(masm->isolate()->builtins()->ConstructWithSpread(),
+ RelocInfo::CODE_TARGET);
} else {
- DCHECK_EQ(construct_type, CallableType::kAny);
+ DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
// Call the constructor (rax, rdx, rdi passed on).
__ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
@@ -886,7 +925,7 @@ void Builtins::Generate_InterpreterPushArgsAndConstructArray(
__ addp(r8, Immediate(1)); // Add one for receiver.
// Add a stack check before pushing arguments.
- Generate_StackOverflowCheck(masm, r8, rdi, r9, &stack_overflow);
+ Generate_StackOverflowCheck(masm, r8, rdi, &stack_overflow);
// Pop return address to allow tail-call after pushing arguments.
__ PopReturnAddressTo(kScratchRegister);
@@ -993,13 +1032,18 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
Register closure = rdi;
Register map = r8;
Register index = r9;
+
+ // Do we have a valid feedback vector?
+ __ movp(rbx, FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ movp(rbx, FieldOperand(rbx, Cell::kValueOffset));
+ __ JumpIfRoot(rbx, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
+
__ movp(map, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ movp(map, FieldOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
__ SmiToInteger32(index, FieldOperand(map, FixedArray::kLengthOffset));
__ cmpl(index, Immediate(2));
- __ j(less, &gotta_call_runtime);
+ __ j(less, &try_shared);
- // Find literals.
// r14 : native context
// r9 : length / index
// r8 : optimized code map
@@ -1016,17 +1060,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ movp(temp, FieldOperand(temp, WeakCell::kValueOffset));
__ cmpp(temp, native_context);
__ j(not_equal, &loop_bottom);
- // Literals available?
- __ movp(temp, FieldOperand(map, index, times_pointer_size,
- SharedFunctionInfo::kOffsetToPreviousLiterals));
- __ movp(temp, FieldOperand(temp, WeakCell::kValueOffset));
- __ JumpIfSmi(temp, &gotta_call_runtime);
-
- // Save the literals in the closure.
- __ movp(FieldOperand(closure, JSFunction::kLiteralsOffset), temp);
- __ movp(r15, index);
- __ RecordWriteField(closure, JSFunction::kLiteralsOffset, temp, r15,
- kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
// Code available?
Register entry = rcx;
@@ -1035,7 +1068,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ movp(entry, FieldOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
- // Found literals and code. Get them into the closure and return.
+ // Found code. Get it into the closure and return.
__ leap(entry, FieldOperand(entry, Code::kHeaderSize));
__ movp(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
__ RecordWriteCodeEntryField(closure, entry, r15);
@@ -1066,9 +1099,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ cmpl(index, Immediate(1));
__ j(greater, &loop_top);
- // We found neither literals nor code.
- __ jmp(&gotta_call_runtime);
-
+ // We found no code.
__ bind(&try_shared);
__ movp(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
// Is the shared function marked for tier up?
@@ -1581,7 +1612,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
{
StackArgumentsAccessor args(rsp, 0);
__ movp(args.GetReceiverOperand(), rdi);
- __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ __ TailCallRuntime(Runtime::kThrowNotConstructor);
}
// 4c. The new.target is not a constructor, throw an appropriate TypeError.
@@ -1589,7 +1620,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
{
StackArgumentsAccessor args(rsp, 0);
__ movp(args.GetReceiverOperand(), rdx);
- __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ __ TailCallRuntime(Runtime::kThrowNotConstructor);
}
}
@@ -2052,7 +2083,7 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ movp(rbp, rsp);
// Store the arguments adaptor context sentinel.
- __ Push(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ Push(Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
// Push the function on the stack.
__ Push(rdi);
@@ -2143,7 +2174,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&enough);
EnterArgumentsAdaptorFrame(masm);
// The registers rcx and r8 will be modified. The register rbx is only read.
- Generate_StackOverflowCheck(masm, rbx, rcx, r8, &stack_overflow);
+ Generate_StackOverflowCheck(masm, rbx, rcx, &stack_overflow);
// Copy receiver and all expected arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
@@ -2165,7 +2196,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
EnterArgumentsAdaptorFrame(masm);
// The registers rcx and r8 will be modified. The register rbx is only read.
- Generate_StackOverflowCheck(masm, rbx, rcx, r8, &stack_overflow);
+ Generate_StackOverflowCheck(masm, rbx, rcx, &stack_overflow);
// Copy receiver and all actual arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
@@ -2372,6 +2403,72 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
}
}
+// static
+void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
+ // ----------- S t a t e -------------
+ // -- rdi : the target to call (can be any Object)
+ // -- rcx : start index (to support rest parameters)
+ // -- rsp[0] : return address.
+ // -- rsp[8] : thisArgument
+ // -----------------------------------
+
+ // Check if we have an arguments adaptor frame below the function frame.
+ Label arguments_adaptor, arguments_done;
+ __ movp(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ cmpp(Operand(rbx, CommonFrameConstants::kContextOrFrameTypeOffset),
+ Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(equal, &arguments_adaptor, Label::kNear);
+ {
+ __ movp(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(rax, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadSharedFunctionInfoSpecialField(
+ rax, rax, SharedFunctionInfo::kFormalParameterCountOffset);
+ __ movp(rbx, rbp);
+ }
+ __ jmp(&arguments_done, Label::kNear);
+ __ bind(&arguments_adaptor);
+ {
+ __ SmiToInteger32(
+ rax, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ }
+ __ bind(&arguments_done);
+
+ Label stack_empty, stack_done, stack_overflow;
+ __ subl(rax, rcx);
+ __ j(less_equal, &stack_empty);
+ {
+ // Check for stack overflow.
+ Generate_StackOverflowCheck(masm, rax, rcx, &stack_overflow, Label::kNear);
+
+ // Forward the arguments from the caller frame.
+ {
+ Label loop;
+ __ movl(rcx, rax);
+ __ Pop(r8);
+ __ bind(&loop);
+ {
+ StackArgumentsAccessor args(rbx, rcx, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ Push(args.GetArgumentOperand(0));
+ __ decl(rcx);
+ __ j(not_zero, &loop);
+ }
+ __ Push(r8);
+ }
+ }
+ __ jmp(&stack_done, Label::kNear);
+ __ bind(&stack_overflow);
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ bind(&stack_empty);
+ {
+ // We just pass the receiver, which is already on the stack.
+ __ Set(rax, 0);
+ }
+ __ bind(&stack_done);
+
+ __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
namespace {
// Drops top JavaScript frame and an arguments adaptor frame below it (if
@@ -2420,8 +2517,8 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
// Drop possible interpreter handler/stub frame.
{
Label no_interpreter_frame;
- __ Cmp(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
- Smi::FromInt(StackFrame::STUB));
+ __ cmpp(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
+ Immediate(StackFrame::TypeToMarker(StackFrame::STUB)));
__ j(not_equal, &no_interpreter_frame, Label::kNear);
__ movp(rbp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
__ bind(&no_interpreter_frame);
@@ -2431,8 +2528,8 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
Register caller_args_count_reg = scratch1;
Label no_arguments_adaptor, formal_parameter_count_loaded;
__ movp(scratch2, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ Cmp(Operand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ cmpp(Operand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset),
+ Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(not_equal, &no_arguments_adaptor, Label::kNear);
// Drop current frame and load arguments count from arguments adaptor frame.
@@ -2758,6 +2855,148 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
}
}
+static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
+ Label runtime_call, push_args;
+ // Load the spread argument into rbx.
+ __ movp(rbx, Operand(rsp, kPointerSize));
+ __ JumpIfSmi(rbx, &runtime_call);
+ // Load the map of the spread into r15.
+ __ movp(r15, FieldOperand(rbx, HeapObject::kMapOffset));
+ // Load native context into r14.
+ __ movp(r14, NativeContextOperand());
+
+ // Check that the spread is an array.
+ __ CmpInstanceType(r15, JS_ARRAY_TYPE);
+ __ j(not_equal, &runtime_call);
+
+ // Check that we have the original ArrayPrototype.
+ __ movp(rcx, FieldOperand(r15, Map::kPrototypeOffset));
+ __ cmpp(rcx, ContextOperand(r14, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+ __ j(not_equal, &runtime_call);
+
+ // Check that the ArrayPrototype hasn't been modified in a way that would
+ // affect iteration.
+ __ LoadRoot(rcx, Heap::kArrayIteratorProtectorRootIndex);
+ __ Cmp(FieldOperand(rcx, PropertyCell::kValueOffset),
+ Smi::FromInt(Isolate::kProtectorValid));
+ __ j(not_equal, &runtime_call);
+
+ // Check that the map of the initial array iterator hasn't changed.
+ __ movp(rcx,
+ ContextOperand(r14, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
+ __ movp(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ cmpp(rcx, ContextOperand(
+ r14, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
+ __ j(not_equal, &runtime_call);
+
+ // For FastPacked kinds, iteration will have the same effect as simply
+ // accessing each property in order.
+ Label no_protector_check;
+ __ movzxbp(rcx, FieldOperand(r15, Map::kBitField2Offset));
+ __ DecodeField<Map::ElementsKindBits>(rcx);
+ __ cmpp(rcx, Immediate(FAST_HOLEY_ELEMENTS));
+ __ j(above, &runtime_call);
+ // For non-FastHoley kinds, we can skip the protector check.
+ __ cmpp(rcx, Immediate(FAST_SMI_ELEMENTS));
+ __ j(equal, &no_protector_check);
+ __ cmpp(rcx, Immediate(FAST_ELEMENTS));
+ __ j(equal, &no_protector_check);
+ // Check the ArrayProtector cell.
+ __ LoadRoot(rcx, Heap::kArrayProtectorRootIndex);
+ __ Cmp(FieldOperand(rcx, PropertyCell::kValueOffset),
+ Smi::FromInt(Isolate::kProtectorValid));
+ __ j(not_equal, &runtime_call);
+
+ __ bind(&no_protector_check);
+ // Load the FixedArray backing store, but use the length from the array.
+ __ SmiToInteger32(r9, FieldOperand(rbx, JSArray::kLengthOffset));
+ __ movp(rbx, FieldOperand(rbx, JSArray::kElementsOffset));
+ __ jmp(&push_args);
+
+ __ bind(&runtime_call);
+ {
+ // Call the builtin for the result of the spread.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(rdi); // target
+ __ Push(rdx); // new target
+ __ Integer32ToSmi(rax, rax);
+ __ Push(rax); // nargs
+ __ Push(rbx);
+ __ CallRuntime(Runtime::kSpreadIterableFixed);
+ __ movp(rbx, rax);
+ __ Pop(rax); // nargs
+ __ SmiToInteger32(rax, rax);
+ __ Pop(rdx); // new target
+ __ Pop(rdi); // target
+ }
+
+ {
+ // Calculate the new nargs including the result of the spread.
+ __ SmiToInteger32(r9, FieldOperand(rbx, FixedArray::kLengthOffset));
+
+ __ bind(&push_args);
+ // rax += r9 - 1. Subtract 1 for the spread itself.
+ __ leap(rax, Operand(rax, r9, times_1, -1));
+ }
+
+ // Check for stack overflow.
+ {
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ Label done;
+ __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
+ __ movp(rcx, rsp);
+ // Make rcx the space we have left. The stack might already be overflowed
+ // here which will cause rcx to become negative.
+ __ subp(rcx, kScratchRegister);
+ __ sarp(rcx, Immediate(kPointerSizeLog2));
+ // Check if the arguments will overflow the stack.
+ __ cmpp(rcx, r9);
+ __ j(greater, &done, Label::kNear); // Signed comparison.
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ bind(&done);
+ }
+
+ // Put the evaluated spread onto the stack as additional arguments.
+ {
+ // Pop the return address and spread argument.
+ __ PopReturnAddressTo(r8);
+ __ Pop(rcx);
+
+ __ Set(rcx, 0);
+ Label done, push, loop;
+ __ bind(&loop);
+ __ cmpl(rcx, r9);
+ __ j(equal, &done, Label::kNear);
+ __ movp(kScratchRegister, FieldOperand(rbx, rcx, times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ CompareRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
+ __ j(not_equal, &push, Label::kNear);
+ __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
+ __ bind(&push);
+ __ Push(kScratchRegister);
+ __ incl(rcx);
+ __ jmp(&loop);
+ __ bind(&done);
+ __ PushReturnAddressFrom(r8);
+ }
+}
+
+// static
+void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : the number of arguments (not including the receiver)
+ // -- rdi : the target to call (can be any Object)
+ // -----------------------------------
+
+ // CheckSpreadAndPushToStack will push rdx to save it.
+ __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+ CheckSpreadAndPushToStack(masm);
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+ TailCallMode::kDisallow),
+ RelocInfo::CODE_TARGET);
+}
+
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2880,6 +3119,19 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
+// static
+void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : the number of arguments (not including the receiver)
+ // -- rdx : the new target (either the same as the constructor or
+ // the JSFunction on which new was invoked initially)
+ // -- rdi : the constructor to call (can be any Object)
+ // -----------------------------------
+
+ CheckSpreadAndPushToStack(masm);
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
Register function_template_info,
Register scratch0, Register scratch1,
diff --git a/deps/v8/src/builtins/x87/builtins-x87.cc b/deps/v8/src/builtins/x87/builtins-x87.cc
index f5acc78415..d13e868b02 100644
--- a/deps/v8/src/builtins/x87/builtins-x87.cc
+++ b/deps/v8/src/builtins/x87/builtins-x87.cc
@@ -535,9 +535,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// it is present) and load it into kInterpreterBytecodeArrayRegister.
__ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
Label load_debug_bytecode_array, bytecode_array_loaded;
- __ cmp(FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset),
- Immediate(DebugInfo::uninitialized()));
- __ j(not_equal, &load_debug_bytecode_array);
+ __ JumpIfNotSmi(FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset),
+ &load_debug_bytecode_array);
__ mov(kInterpreterBytecodeArrayRegister,
FieldOperand(eax, SharedFunctionInfo::kFunctionDataOffset));
__ bind(&bytecode_array_loaded);
@@ -695,7 +694,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
// static
void Builtins::Generate_InterpreterPushArgsAndCallImpl(
MacroAssembler* masm, TailCallMode tail_call_mode,
- CallableType function_type) {
+ InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- ebx : the address of the first argument to be pushed. Subsequent
@@ -727,12 +726,14 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
// Call the target.
__ Push(edx); // Re-push return address.
- if (function_type == CallableType::kJSFunction) {
+ if (mode == InterpreterPushArgsMode::kJSFunction) {
__ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
tail_call_mode),
RelocInfo::CODE_TARGET);
+ } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Jump(masm->isolate()->builtins()->CallWithSpread(),
+ RelocInfo::CODE_TARGET);
} else {
- DCHECK_EQ(function_type, CallableType::kAny);
__ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
tail_call_mode),
RelocInfo::CODE_TARGET);
@@ -845,7 +846,7 @@ void Generate_InterpreterPushArgsAndReturnAddress(
// static
void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
- MacroAssembler* masm, CallableType construct_type) {
+ MacroAssembler* masm, InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edx : the new target
@@ -871,7 +872,7 @@ void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
__ Pop(edi);
__ AssertUndefinedOrAllocationSite(ebx);
- if (construct_type == CallableType::kJSFunction) {
+ if (mode == InterpreterPushArgsMode::kJSFunction) {
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
__ AssertFunction(edi);
@@ -880,9 +881,12 @@ void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
__ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kConstructStubOffset));
__ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
__ jmp(ecx);
+ } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ // Call the constructor with unmodified eax, edi, edx values.
+ __ Jump(masm->isolate()->builtins()->ConstructWithSpread(),
+ RelocInfo::CODE_TARGET);
} else {
- DCHECK_EQ(construct_type, CallableType::kAny);
-
+ DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
// Call the constructor with unmodified eax, edi, edx values.
__ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
@@ -1025,6 +1029,12 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
Register new_target = edx;
Register argument_count = eax;
+ // Do we have a valid feedback vector?
+ __ mov(ebx, FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ mov(ebx, FieldOperand(ebx, Cell::kValueOffset));
+ __ cmp(ebx, masm->isolate()->factory()->undefined_value());
+ __ j(equal, &gotta_call_runtime_no_stack);
+
__ push(argument_count);
__ push(new_target);
__ push(closure);
@@ -1035,9 +1045,8 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ mov(map, FieldOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
__ mov(index, FieldOperand(map, FixedArray::kLengthOffset));
__ cmp(index, Immediate(Smi::FromInt(2)));
- __ j(less, &gotta_call_runtime);
+ __ j(less, &try_shared);
- // Find literals.
// edx : native context
// ebx : length / index
// eax : optimized code map
@@ -1055,20 +1064,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ mov(temp, FieldOperand(temp, WeakCell::kValueOffset));
__ cmp(temp, native_context);
__ j(not_equal, &loop_bottom);
- // Literals available?
- __ mov(temp, FieldOperand(map, index, times_half_pointer_size,
- SharedFunctionInfo::kOffsetToPreviousLiterals));
- __ mov(temp, FieldOperand(temp, WeakCell::kValueOffset));
- __ JumpIfSmi(temp, &gotta_call_runtime);
-
- // Save the literals in the closure.
- __ mov(ecx, Operand(esp, 0));
- __ mov(FieldOperand(ecx, JSFunction::kLiteralsOffset), temp);
- __ push(index);
- __ RecordWriteField(ecx, JSFunction::kLiteralsOffset, temp, index,
- kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ pop(index);
-
// Code available?
Register entry = ecx;
__ mov(entry, FieldOperand(map, index, times_half_pointer_size,
@@ -1076,7 +1071,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ mov(entry, FieldOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
- // Found literals and code. Get them into the closure and return.
+ // Found code. Get it into the closure and return.
__ pop(closure);
// Store code entry in the closure.
__ lea(entry, FieldOperand(entry, Code::kHeaderSize));
@@ -1110,7 +1105,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ cmp(index, Immediate(Smi::FromInt(1)));
__ j(greater, &loop_top);
- // We found neither literals nor code.
+ // We found no code.
__ jmp(&gotta_call_runtime);
__ bind(&try_shared);
@@ -1622,14 +1617,14 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ bind(&target_not_constructor);
{
__ mov(Operand(esp, kPointerSize), edi);
- __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ __ TailCallRuntime(Runtime::kThrowNotConstructor);
}
// 4c. The new.target is not a constructor, throw an appropriate TypeError.
__ bind(&new_target_not_constructor);
{
__ mov(Operand(esp, kPointerSize), edx);
- __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ __ TailCallRuntime(Runtime::kThrowNotConstructor);
}
}
@@ -2699,6 +2694,199 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
}
}
+static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
+ // Free up some registers.
+ // Save edx/edi to stX0/stX1.
+ __ push(edx);
+ __ push(edi);
+ __ fld_s(MemOperand(esp, 0));
+ __ fld_s(MemOperand(esp, 4));
+ __ lea(esp, Operand(esp, 2 * kFloatSize));
+
+ Register argc = eax;
+
+ Register scratch = ecx;
+ Register scratch2 = edi;
+
+ Register spread = ebx;
+ Register spread_map = edx;
+
+ Register spread_len = edx;
+
+ Label runtime_call, push_args;
+ __ mov(spread, Operand(esp, kPointerSize));
+ __ JumpIfSmi(spread, &runtime_call);
+ __ mov(spread_map, FieldOperand(spread, HeapObject::kMapOffset));
+
+ // Check that the spread is an array.
+ __ CmpInstanceType(spread_map, JS_ARRAY_TYPE);
+ __ j(not_equal, &runtime_call);
+
+ // Check that we have the original ArrayPrototype.
+ __ mov(scratch, FieldOperand(spread_map, Map::kPrototypeOffset));
+ __ mov(scratch2, NativeContextOperand());
+ __ cmp(scratch,
+ ContextOperand(scratch2, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+ __ j(not_equal, &runtime_call);
+
+ // Check that the ArrayPrototype hasn't been modified in a way that would
+ // affect iteration.
+ __ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
+ __ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
+ Immediate(Smi::FromInt(Isolate::kProtectorValid)));
+ __ j(not_equal, &runtime_call);
+
+ // Check that the map of the initial array iterator hasn't changed.
+ __ mov(scratch2, NativeContextOperand());
+ __ mov(scratch,
+ ContextOperand(scratch2,
+ Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
+ __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
+ __ cmp(scratch,
+ ContextOperand(scratch2,
+ Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
+ __ j(not_equal, &runtime_call);
+
+ // For FastPacked kinds, iteration will have the same effect as simply
+ // accessing each property in order.
+ Label no_protector_check;
+ __ mov(scratch, FieldOperand(spread_map, Map::kBitField2Offset));
+ __ DecodeField<Map::ElementsKindBits>(scratch);
+ __ cmp(scratch, Immediate(FAST_HOLEY_ELEMENTS));
+ __ j(above, &runtime_call);
+ // For non-FastHoley kinds, we can skip the protector check.
+ __ cmp(scratch, Immediate(FAST_SMI_ELEMENTS));
+ __ j(equal, &no_protector_check);
+ __ cmp(scratch, Immediate(FAST_ELEMENTS));
+ __ j(equal, &no_protector_check);
+ // Check the ArrayProtector cell.
+ __ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
+ __ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
+ Immediate(Smi::FromInt(Isolate::kProtectorValid)));
+ __ j(not_equal, &runtime_call);
+
+ __ bind(&no_protector_check);
+ // Load the FixedArray backing store, but use the length from the array.
+ __ mov(spread_len, FieldOperand(spread, JSArray::kLengthOffset));
+ __ SmiUntag(spread_len);
+ __ mov(spread, FieldOperand(spread, JSArray::kElementsOffset));
+ __ jmp(&push_args);
+
+ __ bind(&runtime_call);
+ {
+ // Call the builtin for the result of the spread.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Need to save these on the stack.
+ // Restore edx/edi from stX0/stX1.
+ __ lea(esp, Operand(esp, -2 * kFloatSize));
+ __ fstp_s(MemOperand(esp, 0));
+ __ fstp_s(MemOperand(esp, 4));
+ __ pop(edx);
+ __ pop(edi);
+
+ __ Push(edi);
+ __ Push(edx);
+ __ SmiTag(argc);
+ __ Push(argc);
+ __ Push(spread);
+ __ CallRuntime(Runtime::kSpreadIterableFixed);
+ __ mov(spread, eax);
+ __ Pop(argc);
+ __ SmiUntag(argc);
+ __ Pop(edx);
+ __ Pop(edi);
+ // Free up some registers.
+ // Save edx/edi to stX0/stX1.
+ __ push(edx);
+ __ push(edi);
+ __ fld_s(MemOperand(esp, 0));
+ __ fld_s(MemOperand(esp, 4));
+ __ lea(esp, Operand(esp, 2 * kFloatSize));
+ }
+
+ {
+ // Calculate the new nargs including the result of the spread.
+ __ mov(spread_len, FieldOperand(spread, FixedArray::kLengthOffset));
+ __ SmiUntag(spread_len);
+
+ __ bind(&push_args);
+ // argc += spread_len - 1. Subtract 1 for the spread itself.
+ __ lea(argc, Operand(argc, spread_len, times_1, -1));
+ }
+
+ // Check for stack overflow.
+ {
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ Label done;
+ __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
+ // Make scratch the space we have left. The stack might already be
+ // overflowed here which will cause scratch to become negative.
+ __ neg(scratch);
+ __ add(scratch, esp);
+ __ sar(scratch, kPointerSizeLog2);
+ // Check if the arguments will overflow the stack.
+ __ cmp(scratch, spread_len);
+ __ j(greater, &done, Label::kNear); // Signed comparison.
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ bind(&done);
+ }
+
+ // Put the evaluated spread onto the stack as additional arguments.
+ {
+ Register return_address = edi;
+ // Pop the return address and spread argument.
+ __ PopReturnAddressTo(return_address);
+ __ Pop(scratch);
+
+ Register scratch2 = esi;
+ // Save esi to stX0, edx/edi in stX1/stX2 now.
+ __ push(esi);
+ __ fld_s(MemOperand(esp, 0));
+ __ lea(esp, Operand(esp, 1 * kFloatSize));
+
+ __ mov(scratch, Immediate(0));
+ Label done, push, loop;
+ __ bind(&loop);
+ __ cmp(scratch, spread_len);
+ __ j(equal, &done, Label::kNear);
+ __ mov(scratch2, FieldOperand(spread, scratch, times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ JumpIfNotRoot(scratch2, Heap::kTheHoleValueRootIndex, &push);
+ __ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
+ __ bind(&push);
+ __ Push(scratch2);
+ __ inc(scratch);
+ __ jmp(&loop);
+ __ bind(&done);
+ __ PushReturnAddressFrom(return_address);
+
+ // Now Restore esi from stX0, edx/edi from stX1/stX2.
+ __ lea(esp, Operand(esp, -3 * kFloatSize));
+ __ fstp_s(MemOperand(esp, 0));
+ __ fstp_s(MemOperand(esp, 4));
+ __ fstp_s(MemOperand(esp, 8));
+ __ pop(esi);
+ __ pop(edx);
+ __ pop(edi);
+ }
+}
+
+// static
+void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- edi : the target to call (can be any Object)
+ // -----------------------------------
+
+ // CheckSpreadAndPushToStack will push edx to save it.
+ __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
+ CheckSpreadAndPushToStack(masm);
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+ TailCallMode::kDisallow),
+ RelocInfo::CODE_TARGET);
+}
+
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2822,6 +3010,19 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
}
// static
+void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- edx : the new target (either the same as the constructor or
+ // the JSFunction on which new was invoked initially)
+ // -- edi : the constructor to call (can be any Object)
+ // -----------------------------------
+
+ CheckSpreadAndPushToStack(masm);
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
+// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- edx : requested object size (untagged)
diff --git a/deps/v8/src/code-factory.cc b/deps/v8/src/code-factory.cc
index ab652964c6..3ebfad0e7f 100644
--- a/deps/v8/src/code-factory.cc
+++ b/deps/v8/src/code-factory.cc
@@ -6,6 +6,7 @@
#include "src/bootstrapper.h"
#include "src/ic/ic.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -34,6 +35,16 @@ Callable CodeFactory::LoadIC(Isolate* isolate) {
}
// static
+Callable CodeFactory::LoadICProtoArray(Isolate* isolate,
+ bool throw_if_nonexistent) {
+ return Callable(
+ throw_if_nonexistent
+ ? isolate->builtins()->LoadICProtoArrayThrowIfNonexistent()
+ : isolate->builtins()->LoadICProtoArray(),
+ LoadICProtoArrayDescriptor(isolate));
+}
+
+// static
Callable CodeFactory::ApiGetter(Isolate* isolate) {
CallApiGetterStub stub(isolate);
return make_callable(stub);
@@ -76,23 +87,17 @@ Callable CodeFactory::KeyedLoadICInOptimizedCode(Isolate* isolate) {
}
// static
-Callable CodeFactory::KeyedLoadIC_Megamorphic(Isolate* isolate) {
- return Callable(isolate->builtins()->KeyedLoadIC_Megamorphic_TF(),
- LoadWithVectorDescriptor(isolate));
-}
-
-// static
Callable CodeFactory::CallIC(Isolate* isolate, ConvertReceiverMode mode,
TailCallMode tail_call_mode) {
- CallICTrampolineStub stub(isolate, CallICState(mode, tail_call_mode));
+ CallICStub stub(isolate, mode, tail_call_mode);
return make_callable(stub);
}
// static
-Callable CodeFactory::CallICInOptimizedCode(Isolate* isolate,
- ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
- CallICStub stub(isolate, CallICState(mode, tail_call_mode));
+Callable CodeFactory::CallICTrampoline(Isolate* isolate,
+ ConvertReceiverMode mode,
+ TailCallMode tail_call_mode) {
+ CallICTrampolineStub stub(isolate, mode, tail_call_mode);
return make_callable(stub);
}
@@ -112,6 +117,20 @@ Callable CodeFactory::StoreICInOptimizedCode(Isolate* isolate,
StoreWithVectorDescriptor(isolate));
}
+Callable CodeFactory::StoreOwnIC(Isolate* isolate) {
+ // TODO(ishell): Currently we use StoreOwnIC only for storing properties that
+ // already exist in the boilerplate therefore we can use StoreIC.
+ return Callable(isolate->builtins()->StoreICStrictTrampoline(),
+ StoreDescriptor(isolate));
+}
+
+Callable CodeFactory::StoreOwnICInOptimizedCode(Isolate* isolate) {
+ // TODO(ishell): Currently we use StoreOwnIC only for storing properties that
+ // already exist in the boilerplate therefore we can use StoreIC.
+ return Callable(isolate->builtins()->StoreICStrict(),
+ StoreWithVectorDescriptor(isolate));
+}
+
// static
Callable CodeFactory::KeyedStoreIC(Isolate* isolate,
LanguageMode language_mode) {
@@ -133,11 +152,10 @@ Callable CodeFactory::KeyedStoreICInOptimizedCode(Isolate* isolate,
// static
Callable CodeFactory::KeyedStoreIC_Megamorphic(Isolate* isolate,
LanguageMode language_mode) {
- return Callable(
- language_mode == STRICT
- ? isolate->builtins()->KeyedStoreIC_Megamorphic_Strict_TF()
- : isolate->builtins()->KeyedStoreIC_Megamorphic_TF(),
- StoreWithVectorDescriptor(isolate));
+ return Callable(language_mode == STRICT
+ ? isolate->builtins()->KeyedStoreIC_Megamorphic_Strict()
+ : isolate->builtins()->KeyedStoreIC_Megamorphic(),
+ StoreWithVectorDescriptor(isolate));
}
// static
@@ -159,36 +177,6 @@ Callable CodeFactory::GetProperty(Isolate* isolate) {
}
// static
-Callable CodeFactory::ToBoolean(Isolate* isolate) {
- return Callable(isolate->builtins()->ToBoolean(),
- TypeConversionDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::ToNumber(Isolate* isolate) {
- return Callable(isolate->builtins()->ToNumber(),
- TypeConversionDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::NonNumberToNumber(Isolate* isolate) {
- return Callable(isolate->builtins()->NonNumberToNumber(),
- TypeConversionDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::StringToNumber(Isolate* isolate) {
- return Callable(isolate->builtins()->StringToNumber(),
- TypeConversionDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::ToName(Isolate* isolate) {
- return Callable(isolate->builtins()->ToName(),
- TypeConversionDescriptor(isolate));
-}
-
-// static
Callable CodeFactory::NonPrimitiveToPrimitive(Isolate* isolate,
ToPrimitiveHint hint) {
return Callable(isolate->builtins()->NonPrimitiveToPrimitive(hint),
@@ -220,7 +208,7 @@ Callable CodeFactory::StringFromCharCode(Isolate* isolate) {
return Callable(code, BuiltinDescriptor(isolate));
}
-#define DECLARE_TFS(Name, Kind, Extra, InterfaceDescriptor) \
+#define DECLARE_TFS(Name, Kind, Extra, InterfaceDescriptor, result_size) \
typedef InterfaceDescriptor##Descriptor Name##Descriptor;
BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, DECLARE_TFS,
IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
@@ -254,19 +242,40 @@ TFS_BUILTIN(StrictEqual)
TFS_BUILTIN(StrictNotEqual)
TFS_BUILTIN(CreateIterResultObject)
TFS_BUILTIN(HasProperty)
+TFS_BUILTIN(NonNumberToNumber)
+TFS_BUILTIN(StringToNumber)
+TFS_BUILTIN(ToBoolean)
TFS_BUILTIN(ToInteger)
TFS_BUILTIN(ToLength)
+TFS_BUILTIN(ToName)
+TFS_BUILTIN(ToNumber)
TFS_BUILTIN(ToObject)
+TFS_BUILTIN(ClassOf)
TFS_BUILTIN(Typeof)
TFS_BUILTIN(InstanceOf)
TFS_BUILTIN(OrdinaryHasInstance)
-TFS_BUILTIN(ForInFilter)
+TFS_BUILTIN(CopyFastSmiOrObjectElements)
+TFS_BUILTIN(GrowFastDoubleElements)
+TFS_BUILTIN(GrowFastSmiOrObjectElements)
TFS_BUILTIN(NewUnmappedArgumentsElements)
TFS_BUILTIN(NewRestParameterElements)
-TFS_BUILTIN(PromiseHandleReject)
+TFS_BUILTIN(FastCloneRegExp)
+TFS_BUILTIN(FastNewClosure)
+TFS_BUILTIN(FastNewObject)
+TFS_BUILTIN(ForInFilter)
TFS_BUILTIN(GetSuperConstructor)
+TFS_BUILTIN(KeyedLoadIC_Megamorphic)
+TFS_BUILTIN(PromiseHandleReject)
+TFS_BUILTIN(RegExpReplace)
+TFS_BUILTIN(RegExpSplit)
TFS_BUILTIN(StringCharAt)
TFS_BUILTIN(StringCharCodeAt)
+TFS_BUILTIN(StringEqual)
+TFS_BUILTIN(StringNotEqual)
+TFS_BUILTIN(StringLessThan)
+TFS_BUILTIN(StringLessThanOrEqual)
+TFS_BUILTIN(StringGreaterThan)
+TFS_BUILTIN(StringGreaterThanOrEqual)
#undef TFS_BUILTIN
@@ -302,39 +311,9 @@ Callable CodeFactory::StringCompare(Isolate* isolate, Token::Value token) {
}
// static
-Callable CodeFactory::StringEqual(Isolate* isolate) {
- return Callable(isolate->builtins()->StringEqual(),
- CompareDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::StringNotEqual(Isolate* isolate) {
- return Callable(isolate->builtins()->StringNotEqual(),
- CompareDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::StringLessThan(Isolate* isolate) {
- return Callable(isolate->builtins()->StringLessThan(),
- CompareDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::StringLessThanOrEqual(Isolate* isolate) {
- return Callable(isolate->builtins()->StringLessThanOrEqual(),
- CompareDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::StringGreaterThan(Isolate* isolate) {
- return Callable(isolate->builtins()->StringGreaterThan(),
- CompareDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::StringGreaterThanOrEqual(Isolate* isolate) {
- return Callable(isolate->builtins()->StringGreaterThanOrEqual(),
- CompareDescriptor(isolate));
+Callable CodeFactory::StringIndexOf(Isolate* isolate) {
+ return Callable(isolate->builtins()->StringIndexOf(),
+ StringIndexOfDescriptor(isolate));
}
// static
@@ -350,9 +329,15 @@ Callable CodeFactory::ResumeGenerator(Isolate* isolate) {
}
// static
-Callable CodeFactory::FastCloneRegExp(Isolate* isolate) {
- return Callable(isolate->builtins()->FastCloneRegExp(),
- FastCloneRegExpDescriptor(isolate));
+Callable CodeFactory::FrameDropperTrampoline(Isolate* isolate) {
+ return Callable(isolate->builtins()->FrameDropperTrampoline(),
+ FrameDropperTrampolineDescriptor(isolate));
+}
+
+// static
+Callable CodeFactory::HandleDebuggerStatement(Isolate* isolate) {
+ return Callable(isolate->builtins()->HandleDebuggerStatement(),
+ ContextOnlyDescriptor(isolate));
}
// static
@@ -376,54 +361,33 @@ Callable CodeFactory::FastNewFunctionContext(Isolate* isolate,
}
// static
-Callable CodeFactory::FastNewClosure(Isolate* isolate) {
- return Callable(isolate->builtins()->FastNewClosure(),
- FastNewClosureDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::FastNewObject(Isolate* isolate) {
- return Callable(isolate->builtins()->FastNewObject(),
- FastNewObjectDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::FastNewRestParameter(Isolate* isolate,
- bool skip_stub_frame) {
- FastNewRestParameterStub stub(isolate, skip_stub_frame);
- return make_callable(stub);
+Callable CodeFactory::FastNewRestParameter(Isolate* isolate) {
+ return Callable(isolate->builtins()->FastNewRestParameter(),
+ FastNewRestParameterDescriptor(isolate));
}
// static
-Callable CodeFactory::FastNewSloppyArguments(Isolate* isolate,
- bool skip_stub_frame) {
- FastNewSloppyArgumentsStub stub(isolate, skip_stub_frame);
- return make_callable(stub);
+Callable CodeFactory::FastNewSloppyArguments(Isolate* isolate) {
+ return Callable(isolate->builtins()->FastNewSloppyArguments(),
+ FastNewRestParameterDescriptor(isolate));
}
// static
-Callable CodeFactory::FastNewStrictArguments(Isolate* isolate,
- bool skip_stub_frame) {
- FastNewStrictArgumentsStub stub(isolate, skip_stub_frame);
- return make_callable(stub);
+Callable CodeFactory::FastNewStrictArguments(Isolate* isolate) {
+ return Callable(isolate->builtins()->FastNewStrictArguments(),
+ FastNewRestParameterDescriptor(isolate));
}
// static
-Callable CodeFactory::CopyFastSmiOrObjectElements(Isolate* isolate) {
- return Callable(isolate->builtins()->CopyFastSmiOrObjectElements(),
- CopyFastSmiOrObjectElementsDescriptor(isolate));
+Callable CodeFactory::ForInPrepare(Isolate* isolate) {
+ return Callable(isolate->builtins()->ForInPrepare(),
+ ForInPrepareDescriptor(isolate));
}
// static
-Callable CodeFactory::GrowFastDoubleElements(Isolate* isolate) {
- return Callable(isolate->builtins()->GrowFastDoubleElements(),
- GrowArrayElementsDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::GrowFastSmiOrObjectElements(Isolate* isolate) {
- return Callable(isolate->builtins()->GrowFastSmiOrObjectElements(),
- GrowArrayElementsDescriptor(isolate));
+Callable CodeFactory::ForInNext(Isolate* isolate) {
+ return Callable(isolate->builtins()->ForInNext(),
+ ForInNextDescriptor(isolate));
}
// static
@@ -432,14 +396,6 @@ Callable CodeFactory::AllocateHeapNumber(Isolate* isolate) {
return make_callable(stub);
}
-#define SIMD128_ALLOC(TYPE, Type, type, lane_count, lane_type) \
- Callable CodeFactory::Allocate##Type(Isolate* isolate) { \
- Allocate##Type##Stub stub(isolate); \
- return make_callable(stub); \
- }
-SIMD128_TYPES(SIMD128_ALLOC)
-#undef SIMD128_ALLOC
-
// static
Callable CodeFactory::ArgumentAdaptor(Isolate* isolate) {
return Callable(isolate->builtins()->ArgumentsAdaptorTrampoline(),
@@ -454,18 +410,43 @@ Callable CodeFactory::Call(Isolate* isolate, ConvertReceiverMode mode,
}
// static
-Callable CodeFactory::CallFunction(Isolate* isolate, ConvertReceiverMode mode) {
- return Callable(isolate->builtins()->CallFunction(mode),
+Callable CodeFactory::CallWithSpread(Isolate* isolate) {
+ return Callable(isolate->builtins()->CallWithSpread(),
+ CallTrampolineDescriptor(isolate));
+}
+
+// static
+Callable CodeFactory::CallFunction(Isolate* isolate, ConvertReceiverMode mode,
+ TailCallMode tail_call_mode) {
+ return Callable(isolate->builtins()->CallFunction(mode, tail_call_mode),
CallTrampolineDescriptor(isolate));
}
// static
+Callable CodeFactory::CallForwardVarargs(Isolate* isolate) {
+ return Callable(isolate->builtins()->CallForwardVarargs(),
+ CallForwardVarargsDescriptor(isolate));
+}
+
+// static
+Callable CodeFactory::CallFunctionForwardVarargs(Isolate* isolate) {
+ return Callable(isolate->builtins()->CallFunctionForwardVarargs(),
+ CallForwardVarargsDescriptor(isolate));
+}
+
+// static
Callable CodeFactory::Construct(Isolate* isolate) {
return Callable(isolate->builtins()->Construct(),
ConstructTrampolineDescriptor(isolate));
}
// static
+Callable CodeFactory::ConstructWithSpread(Isolate* isolate) {
+ return Callable(isolate->builtins()->ConstructWithSpread(),
+ ConstructTrampolineDescriptor(isolate));
+}
+
+// static
Callable CodeFactory::ConstructFunction(Isolate* isolate) {
return Callable(isolate->builtins()->ConstructFunction(),
ConstructTrampolineDescriptor(isolate));
@@ -474,18 +455,17 @@ Callable CodeFactory::ConstructFunction(Isolate* isolate) {
// static
Callable CodeFactory::InterpreterPushArgsAndCall(Isolate* isolate,
TailCallMode tail_call_mode,
- CallableType function_type) {
- return Callable(isolate->builtins()->InterpreterPushArgsAndCall(
- tail_call_mode, function_type),
- InterpreterPushArgsAndCallDescriptor(isolate));
+ InterpreterPushArgsMode mode) {
+ return Callable(
+ isolate->builtins()->InterpreterPushArgsAndCall(tail_call_mode, mode),
+ InterpreterPushArgsAndCallDescriptor(isolate));
}
// static
Callable CodeFactory::InterpreterPushArgsAndConstruct(
- Isolate* isolate, CallableType function_type) {
- return Callable(
- isolate->builtins()->InterpreterPushArgsAndConstruct(function_type),
- InterpreterPushArgsAndConstructDescriptor(isolate));
+ Isolate* isolate, InterpreterPushArgsMode mode) {
+ return Callable(isolate->builtins()->InterpreterPushArgsAndConstruct(mode),
+ InterpreterPushArgsAndConstructDescriptor(isolate));
}
// static
@@ -509,6 +489,12 @@ Callable CodeFactory::InterpreterOnStackReplacement(Isolate* isolate) {
}
// static
+Callable CodeFactory::ArrayConstructor(Isolate* isolate) {
+ ArrayConstructorStub stub(isolate);
+ return make_callable(stub);
+}
+
+// static
Callable CodeFactory::ArrayPush(Isolate* isolate) {
return Callable(isolate->builtins()->ArrayPush(), BuiltinDescriptor(isolate));
}
diff --git a/deps/v8/src/code-factory.h b/deps/v8/src/code-factory.h
index 2500ebd274..d50c7f44f9 100644
--- a/deps/v8/src/code-factory.h
+++ b/deps/v8/src/code-factory.h
@@ -39,6 +39,7 @@ class V8_EXPORT_PRIVATE CodeFactory final {
// Initial states for ICs.
static Callable LoadIC(Isolate* isolate);
static Callable LoadICInOptimizedCode(Isolate* isolate);
+ static Callable LoadICProtoArray(Isolate* isolate, bool throw_if_nonexistent);
static Callable LoadGlobalIC(Isolate* isolate, TypeofMode typeof_mode);
static Callable LoadGlobalICInOptimizedCode(Isolate* isolate,
TypeofMode typeof_mode);
@@ -48,11 +49,13 @@ class V8_EXPORT_PRIVATE CodeFactory final {
static Callable CallIC(Isolate* isolate,
ConvertReceiverMode mode = ConvertReceiverMode::kAny,
TailCallMode tail_call_mode = TailCallMode::kDisallow);
- static Callable CallICInOptimizedCode(
+ static Callable CallICTrampoline(
Isolate* isolate, ConvertReceiverMode mode = ConvertReceiverMode::kAny,
TailCallMode tail_call_mode = TailCallMode::kDisallow);
static Callable StoreIC(Isolate* isolate, LanguageMode mode);
static Callable StoreICInOptimizedCode(Isolate* isolate, LanguageMode mode);
+ static Callable StoreOwnIC(Isolate* isolate);
+ static Callable StoreOwnICInOptimizedCode(Isolate* isolate);
static Callable KeyedStoreIC(Isolate* isolate, LanguageMode mode);
static Callable KeyedStoreICInOptimizedCode(Isolate* isolate,
LanguageMode mode);
@@ -60,6 +63,9 @@ class V8_EXPORT_PRIVATE CodeFactory final {
static Callable ResumeGenerator(Isolate* isolate);
+ static Callable FrameDropperTrampoline(Isolate* isolate);
+ static Callable HandleDebuggerStatement(Isolate* isolate);
+
static Callable CompareIC(Isolate* isolate, Token::Value op);
static Callable CompareNilIC(Isolate* isolate, NilValue nil_value);
@@ -126,7 +132,12 @@ class V8_EXPORT_PRIVATE CodeFactory final {
static Callable StringGreaterThan(Isolate* isolate);
static Callable StringGreaterThanOrEqual(Isolate* isolate);
static Callable SubString(Isolate* isolate);
+ static Callable StringIndexOf(Isolate* isolate);
+
+ static Callable RegExpReplace(Isolate* isolate);
+ static Callable RegExpSplit(Isolate* isolate);
+ static Callable ClassOf(Isolate* isolate);
static Callable Typeof(Isolate* isolate);
static Callable GetSuperConstructor(Isolate* isolate);
@@ -139,12 +150,12 @@ class V8_EXPORT_PRIVATE CodeFactory final {
ScopeType scope_type);
static Callable FastNewClosure(Isolate* isolate);
static Callable FastNewObject(Isolate* isolate);
- static Callable FastNewRestParameter(Isolate* isolate,
- bool skip_stub_frame = false);
- static Callable FastNewSloppyArguments(Isolate* isolate,
- bool skip_stub_frame = false);
- static Callable FastNewStrictArguments(Isolate* isolate,
- bool skip_stub_frame = false);
+ static Callable FastNewRestParameter(Isolate* isolate);
+ static Callable FastNewSloppyArguments(Isolate* isolate);
+ static Callable FastNewStrictArguments(Isolate* isolate);
+
+ static Callable ForInPrepare(Isolate* isolate);
+ static Callable ForInNext(Isolate* isolate);
static Callable CopyFastSmiOrObjectElements(Isolate* isolate);
static Callable GrowFastDoubleElements(Isolate* isolate);
@@ -154,32 +165,34 @@ class V8_EXPORT_PRIVATE CodeFactory final {
static Callable NewRestParameterElements(Isolate* isolate);
static Callable AllocateHeapNumber(Isolate* isolate);
-#define SIMD128_ALLOC(TYPE, Type, type, lane_count, lane_type) \
- static Callable Allocate##Type(Isolate* isolate);
- SIMD128_TYPES(SIMD128_ALLOC)
-#undef SIMD128_ALLOC
static Callable ArgumentAdaptor(Isolate* isolate);
static Callable Call(Isolate* isolate,
ConvertReceiverMode mode = ConvertReceiverMode::kAny,
TailCallMode tail_call_mode = TailCallMode::kDisallow);
+ static Callable CallWithSpread(Isolate* isolate);
static Callable CallFunction(
- Isolate* isolate, ConvertReceiverMode mode = ConvertReceiverMode::kAny);
+ Isolate* isolate, ConvertReceiverMode mode = ConvertReceiverMode::kAny,
+ TailCallMode tail_call_mode = TailCallMode::kDisallow);
+ static Callable CallForwardVarargs(Isolate* isolate);
+ static Callable CallFunctionForwardVarargs(Isolate* isolate);
static Callable Construct(Isolate* isolate);
+ static Callable ConstructWithSpread(Isolate* isolate);
static Callable ConstructFunction(Isolate* isolate);
static Callable CreateIterResultObject(Isolate* isolate);
static Callable HasProperty(Isolate* isolate);
static Callable ForInFilter(Isolate* isolate);
- static Callable InterpreterPushArgsAndCall(
- Isolate* isolate, TailCallMode tail_call_mode,
- CallableType function_type = CallableType::kAny);
- static Callable InterpreterPushArgsAndConstruct(
- Isolate* isolate, CallableType function_type = CallableType::kAny);
+ static Callable InterpreterPushArgsAndCall(Isolate* isolate,
+ TailCallMode tail_call_mode,
+ InterpreterPushArgsMode mode);
+ static Callable InterpreterPushArgsAndConstruct(Isolate* isolate,
+ InterpreterPushArgsMode mode);
static Callable InterpreterPushArgsAndConstructArray(Isolate* isolate);
static Callable InterpreterCEntry(Isolate* isolate, int result_size = 1);
static Callable InterpreterOnStackReplacement(Isolate* isolate);
+ static Callable ArrayConstructor(Isolate* isolate);
static Callable ArrayPush(Isolate* isolate);
static Callable FunctionPrototypeBind(Isolate* isolate);
static Callable PromiseHandleReject(Isolate* isolate);
diff --git a/deps/v8/src/code-stub-assembler.cc b/deps/v8/src/code-stub-assembler.cc
index 5c6a6145ba..e1ab040ab5 100644
--- a/deps/v8/src/code-stub-assembler.cc
+++ b/deps/v8/src/code-stub-assembler.cc
@@ -47,33 +47,36 @@ void CodeStubAssembler::Assert(const NodeGenerator& codition_body,
const char* message, const char* file,
int line) {
#if defined(DEBUG)
- Label ok(this);
- Label not_ok(this, Label::kDeferred);
- if (message != nullptr && FLAG_code_comments) {
- Comment("[ Assert: %s", message);
- } else {
- Comment("[ Assert");
- }
- Node* condition = codition_body();
- DCHECK_NOT_NULL(condition);
- Branch(condition, &ok, &not_ok);
- Bind(&not_ok);
- if (message != nullptr) {
- char chars[1024];
- Vector<char> buffer(chars);
- if (file != nullptr) {
- SNPrintF(buffer, "CSA_ASSERT failed: %s [%s:%d]\n", message, file, line);
+ if (FLAG_debug_code) {
+ Label ok(this);
+ Label not_ok(this, Label::kDeferred);
+ if (message != nullptr && FLAG_code_comments) {
+ Comment("[ Assert: %s", message);
} else {
- SNPrintF(buffer, "CSA_ASSERT failed: %s\n", message);
+ Comment("[ Assert");
+ }
+ Node* condition = codition_body();
+ DCHECK_NOT_NULL(condition);
+ Branch(condition, &ok, &not_ok);
+ Bind(&not_ok);
+ if (message != nullptr) {
+ char chars[1024];
+ Vector<char> buffer(chars);
+ if (file != nullptr) {
+ SNPrintF(buffer, "CSA_ASSERT failed: %s [%s:%d]\n", message, file,
+ line);
+ } else {
+ SNPrintF(buffer, "CSA_ASSERT failed: %s\n", message);
+ }
+ CallRuntime(
+ Runtime::kGlobalPrint, SmiConstant(Smi::kZero),
+ HeapConstant(factory()->NewStringFromAsciiChecked(&(buffer[0]))));
}
- CallRuntime(
- Runtime::kGlobalPrint, SmiConstant(Smi::kZero),
- HeapConstant(factory()->NewStringFromAsciiChecked(&(buffer[0]))));
- }
- DebugBreak();
- Goto(&ok);
- Bind(&ok);
- Comment("] Assert");
+ DebugBreak();
+ Goto(&ok);
+ Bind(&ok);
+ Comment("] Assert");
+ }
#endif
}
@@ -171,6 +174,16 @@ Node* CodeStubAssembler::IntPtrOrSmiConstant(int value, ParameterMode mode) {
}
}
+bool CodeStubAssembler::IsIntPtrOrSmiConstantZero(Node* test) {
+ int32_t constant_test;
+ Smi* smi_test;
+ if ((ToInt32Constant(test, constant_test) && constant_test == 0) ||
+ (ToSmiConstant(test, smi_test) && smi_test->value() == 0)) {
+ return true;
+ }
+ return false;
+}
+
Node* CodeStubAssembler::IntPtrRoundUpToPowerOfTwo32(Node* value) {
Comment("IntPtrRoundUpToPowerOfTwo32");
CSA_ASSERT(this, UintPtrLessThanOrEqual(value, IntPtrConstant(0x80000000u)));
@@ -196,11 +209,10 @@ Node* CodeStubAssembler::Float64Round(Node* x) {
Node* one = Float64Constant(1.0);
Node* one_half = Float64Constant(0.5);
- Variable var_x(this, MachineRepresentation::kFloat64);
Label return_x(this);
// Round up {x} towards Infinity.
- var_x.Bind(Float64Ceil(x));
+ Variable var_x(this, MachineRepresentation::kFloat64, Float64Ceil(x));
GotoIf(Float64LessThanOrEqual(Float64Sub(var_x.value(), one_half), x),
&return_x);
@@ -221,9 +233,8 @@ Node* CodeStubAssembler::Float64Ceil(Node* x) {
Node* two_52 = Float64Constant(4503599627370496.0E0);
Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
- Variable var_x(this, MachineRepresentation::kFloat64);
+ Variable var_x(this, MachineRepresentation::kFloat64, x);
Label return_x(this), return_minus_x(this);
- var_x.Bind(x);
// Check if {x} is greater than zero.
Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
@@ -237,7 +248,7 @@ Node* CodeStubAssembler::Float64Ceil(Node* x) {
// Round positive {x} towards Infinity.
var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52));
- GotoUnless(Float64LessThan(var_x.value(), x), &return_x);
+ GotoIfNot(Float64LessThan(var_x.value(), x), &return_x);
var_x.Bind(Float64Add(var_x.value(), one));
Goto(&return_x);
}
@@ -246,12 +257,12 @@ Node* CodeStubAssembler::Float64Ceil(Node* x) {
{
// Just return {x} unless it's in the range ]-2^52,0[
GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
- GotoUnless(Float64LessThan(x, zero), &return_x);
+ GotoIfNot(Float64LessThan(x, zero), &return_x);
// Round negated {x} towards Infinity and return the result negated.
Node* minus_x = Float64Neg(x);
var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
- GotoUnless(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x);
+ GotoIfNot(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x);
var_x.Bind(Float64Sub(var_x.value(), one));
Goto(&return_minus_x);
}
@@ -274,9 +285,8 @@ Node* CodeStubAssembler::Float64Floor(Node* x) {
Node* two_52 = Float64Constant(4503599627370496.0E0);
Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
- Variable var_x(this, MachineRepresentation::kFloat64);
+ Variable var_x(this, MachineRepresentation::kFloat64, x);
Label return_x(this), return_minus_x(this);
- var_x.Bind(x);
// Check if {x} is greater than zero.
Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
@@ -290,7 +300,7 @@ Node* CodeStubAssembler::Float64Floor(Node* x) {
// Round positive {x} towards -Infinity.
var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52));
- GotoUnless(Float64GreaterThan(var_x.value(), x), &return_x);
+ GotoIfNot(Float64GreaterThan(var_x.value(), x), &return_x);
var_x.Bind(Float64Sub(var_x.value(), one));
Goto(&return_x);
}
@@ -299,12 +309,12 @@ Node* CodeStubAssembler::Float64Floor(Node* x) {
{
// Just return {x} unless it's in the range ]-2^52,0[
GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
- GotoUnless(Float64LessThan(x, zero), &return_x);
+ GotoIfNot(Float64LessThan(x, zero), &return_x);
// Round negated {x} towards -Infinity and return the result negated.
Node* minus_x = Float64Neg(x);
var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
- GotoUnless(Float64LessThan(var_x.value(), minus_x), &return_minus_x);
+ GotoIfNot(Float64LessThan(var_x.value(), minus_x), &return_minus_x);
var_x.Bind(Float64Add(var_x.value(), one));
Goto(&return_minus_x);
}
@@ -358,9 +368,8 @@ Node* CodeStubAssembler::Float64Trunc(Node* x) {
Node* two_52 = Float64Constant(4503599627370496.0E0);
Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
- Variable var_x(this, MachineRepresentation::kFloat64);
+ Variable var_x(this, MachineRepresentation::kFloat64, x);
Label return_x(this), return_minus_x(this);
- var_x.Bind(x);
// Check if {x} is greater than 0.
Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
@@ -377,7 +386,7 @@ Node* CodeStubAssembler::Float64Trunc(Node* x) {
// Round positive {x} towards -Infinity.
var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52));
- GotoUnless(Float64GreaterThan(var_x.value(), x), &return_x);
+ GotoIfNot(Float64GreaterThan(var_x.value(), x), &return_x);
var_x.Bind(Float64Sub(var_x.value(), one));
}
Goto(&return_x);
@@ -391,12 +400,12 @@ Node* CodeStubAssembler::Float64Trunc(Node* x) {
} else {
// Just return {x} unless its in the range ]-2^52,0[.
GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
- GotoUnless(Float64LessThan(x, zero), &return_x);
+ GotoIfNot(Float64LessThan(x, zero), &return_x);
// Round negated {x} towards -Infinity and return result negated.
Node* minus_x = Float64Neg(x);
var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
- GotoUnless(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x);
+ GotoIfNot(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x);
var_x.Bind(Float64Sub(var_x.value(), one));
Goto(&return_minus_x);
}
@@ -480,7 +489,7 @@ Node* CodeStubAssembler::SmiMod(Node* a, Node* b) {
// Check if {a} is kMinInt and {b} is -1 (only relevant if the
// kMinInt is actually representable as a Smi).
Label join(this);
- GotoUnless(Word32Equal(a, Int32Constant(kMinInt)), &join);
+ GotoIfNot(Word32Equal(a, Int32Constant(kMinInt)), &join);
GotoIf(Word32Equal(b, Int32Constant(-1)), &return_minuszero);
Goto(&join);
Bind(&join);
@@ -601,82 +610,10 @@ Node* CodeStubAssembler::WordIsWordAligned(Node* word) {
WordAnd(word, IntPtrConstant((1 << kPointerSizeLog2) - 1)));
}
-void CodeStubAssembler::BranchIfSimd128Equal(Node* lhs, Node* lhs_map,
- Node* rhs, Node* rhs_map,
- Label* if_equal,
- Label* if_notequal) {
- Label if_mapsame(this), if_mapnotsame(this);
- Branch(WordEqual(lhs_map, rhs_map), &if_mapsame, &if_mapnotsame);
-
- Bind(&if_mapsame);
- {
- // Both {lhs} and {rhs} are Simd128Values with the same map, need special
- // handling for Float32x4 because of NaN comparisons.
- Label if_float32x4(this), if_notfloat32x4(this);
- Node* float32x4_map = HeapConstant(factory()->float32x4_map());
- Branch(WordEqual(lhs_map, float32x4_map), &if_float32x4, &if_notfloat32x4);
-
- Bind(&if_float32x4);
- {
- // Both {lhs} and {rhs} are Float32x4, compare the lanes individually
- // using a floating point comparison.
- for (int offset = Float32x4::kValueOffset - kHeapObjectTag;
- offset < Float32x4::kSize - kHeapObjectTag;
- offset += sizeof(float)) {
- // Load the floating point values for {lhs} and {rhs}.
- Node* lhs_value =
- Load(MachineType::Float32(), lhs, IntPtrConstant(offset));
- Node* rhs_value =
- Load(MachineType::Float32(), rhs, IntPtrConstant(offset));
-
- // Perform a floating point comparison.
- Label if_valueequal(this), if_valuenotequal(this);
- Branch(Float32Equal(lhs_value, rhs_value), &if_valueequal,
- &if_valuenotequal);
- Bind(&if_valuenotequal);
- Goto(if_notequal);
- Bind(&if_valueequal);
- }
-
- // All 4 lanes match, {lhs} and {rhs} considered equal.
- Goto(if_equal);
- }
-
- Bind(&if_notfloat32x4);
- {
- // For other Simd128Values we just perform a bitwise comparison.
- for (int offset = Simd128Value::kValueOffset - kHeapObjectTag;
- offset < Simd128Value::kSize - kHeapObjectTag;
- offset += kPointerSize) {
- // Load the word values for {lhs} and {rhs}.
- Node* lhs_value =
- Load(MachineType::Pointer(), lhs, IntPtrConstant(offset));
- Node* rhs_value =
- Load(MachineType::Pointer(), rhs, IntPtrConstant(offset));
-
- // Perform a bitwise word-comparison.
- Label if_valueequal(this), if_valuenotequal(this);
- Branch(WordEqual(lhs_value, rhs_value), &if_valueequal,
- &if_valuenotequal);
- Bind(&if_valuenotequal);
- Goto(if_notequal);
- Bind(&if_valueequal);
- }
-
- // Bitwise comparison succeeded, {lhs} and {rhs} considered equal.
- Goto(if_equal);
- }
- }
-
- Bind(&if_mapnotsame);
- Goto(if_notequal);
-}
-
void CodeStubAssembler::BranchIfPrototypesHaveNoElements(
Node* receiver_map, Label* definitely_no_elements,
Label* possibly_elements) {
- Variable var_map(this, MachineRepresentation::kTagged);
- var_map.Bind(receiver_map);
+ Variable var_map(this, MachineRepresentation::kTagged, receiver_map);
Label loop_body(this, &var_map);
Node* empty_elements = LoadRoot(Heap::kEmptyFixedArrayRootIndex);
Goto(&loop_body);
@@ -734,11 +671,11 @@ void CodeStubAssembler::BranchIfFastJSArray(
Node* elements_kind = LoadMapElementsKind(map);
// Bailout if receiver has slow elements.
- GotoUnless(IsFastElementsKind(elements_kind), if_false);
+ GotoIfNot(IsFastElementsKind(elements_kind), if_false);
// Check prototype chain if receiver does not have packed elements.
if (mode == FastJSArrayAccessMode::INBOUNDS_READ) {
- GotoUnless(IsHoleyFastElementsKind(elements_kind), if_true);
+ GotoIfNot(IsHoleyFastElementsKind(elements_kind), if_true);
}
BranchIfPrototypesHaveNoElements(map, if_true, if_false);
}
@@ -811,10 +748,9 @@ Node* CodeStubAssembler::AllocateRawAligned(Node* size_in_bytes,
Node* limit_address) {
Node* top = Load(MachineType::Pointer(), top_address);
Node* limit = Load(MachineType::Pointer(), limit_address);
- Variable adjusted_size(this, MachineType::PointerRepresentation());
- adjusted_size.Bind(size_in_bytes);
+ Variable adjusted_size(this, MachineType::PointerRepresentation(),
+ size_in_bytes);
if (flags & kDoubleAlignment) {
- // TODO(epertoso): Simd128 alignment.
Label aligned(this), not_aligned(this), merge(this, &adjusted_size);
Branch(WordAnd(top, IntPtrConstant(kDoubleAlignmentMask)), &not_aligned,
&aligned);
@@ -831,8 +767,9 @@ Node* CodeStubAssembler::AllocateRawAligned(Node* size_in_bytes,
Bind(&merge);
}
- Variable address(this, MachineRepresentation::kTagged);
- address.Bind(AllocateRawUnaligned(adjusted_size.value(), kNone, top, limit));
+ Variable address(
+ this, MachineRepresentation::kTagged,
+ AllocateRawUnaligned(adjusted_size.value(), kNone, top, limit));
Label needs_filler(this), doesnt_need_filler(this),
merge_address(this, &address);
@@ -841,8 +778,6 @@ Node* CodeStubAssembler::AllocateRawAligned(Node* size_in_bytes,
Bind(&needs_filler);
// Store a filler and increase the address by kPointerSize.
- // TODO(epertoso): this code assumes that we only align to kDoubleSize. Change
- // it when Simd128 alignment is supported.
StoreNoWriteBarrier(MachineType::PointerRepresentation(), top,
LoadRoot(Heap::kOnePointerFillerMapRootIndex));
address.Bind(BitcastWordToTagged(
@@ -906,11 +841,10 @@ Node* CodeStubAssembler::IsRegularHeapObjectSize(Node* size) {
void CodeStubAssembler::BranchIfToBooleanIsTrue(Node* value, Label* if_true,
Label* if_false) {
- Label if_valueissmi(this), if_valueisnotsmi(this), if_valueisstring(this),
- if_valueisheapnumber(this), if_valueisother(this);
+ Label if_valueissmi(this), if_valueisnotsmi(this),
+ if_valueisheapnumber(this, Label::kDeferred);
- // Fast check for Boolean {value}s (common case).
- GotoIf(WordEqual(value, BooleanConstant(true)), if_true);
+ // Rule out false {value}.
GotoIf(WordEqual(value, BooleanConstant(false)), if_false);
// Check if {value} is a Smi or a HeapObject.
@@ -924,27 +858,24 @@ void CodeStubAssembler::BranchIfToBooleanIsTrue(Node* value, Label* if_true,
Bind(&if_valueisnotsmi);
{
+ // Check if {value} is the empty string.
+ GotoIf(IsEmptyString(value), if_false);
+
// The {value} is a HeapObject, load its map.
Node* value_map = LoadMap(value);
- // Load the {value}s instance type.
- Node* value_instance_type = LoadMapInstanceType(value_map);
-
- // Dispatch based on the instance type; we distinguish all String instance
- // types, the HeapNumber type and everything else.
- GotoIf(Word32Equal(value_instance_type, Int32Constant(HEAP_NUMBER_TYPE)),
- &if_valueisheapnumber);
- Branch(IsStringInstanceType(value_instance_type), &if_valueisstring,
- &if_valueisother);
+ // Only null, undefined and document.all have the undetectable bit set,
+ // so we can return false immediately when that bit is set.
+ Node* value_map_bitfield = LoadMapBitField(value_map);
+ Node* value_map_undetectable =
+ Word32And(value_map_bitfield, Int32Constant(1 << Map::kIsUndetectable));
- Bind(&if_valueisstring);
- {
- // Load the string length field of the {value}.
- Node* value_length = LoadObjectField(value, String::kLengthOffset);
+ // Check if the {value} is undetectable.
+ GotoIfNot(Word32Equal(value_map_undetectable, Int32Constant(0)), if_false);
- // Check if the {value} is the empty string.
- BranchIfSmiEqual(value_length, SmiConstant(0), if_false, if_true);
- }
+ // We still need to handle numbers specially, but all other {value}s
+ // that make it here yield true.
+ Branch(IsHeapNumberMap(value_map), &if_valueisheapnumber, if_true);
Bind(&if_valueisheapnumber);
{
@@ -956,22 +887,6 @@ void CodeStubAssembler::BranchIfToBooleanIsTrue(Node* value, Label* if_true,
Branch(Float64LessThan(Float64Constant(0.0), Float64Abs(value_value)),
if_true, if_false);
}
-
- Bind(&if_valueisother);
- {
- // Load the bit field from the {value}s map. The {value} is now either
- // Null or Undefined, which have the undetectable bit set (so we always
- // return false for those), or a Symbol or Simd128Value, whose maps never
- // have the undetectable bit set (so we always return true for those), or
- // a JSReceiver, which may or may not have the undetectable bit set.
- Node* value_map_bitfield = LoadMapBitField(value_map);
- Node* value_map_undetectable = Word32And(
- value_map_bitfield, Int32Constant(1 << Map::kIsUndetectable));
-
- // Check if the {value} is undetectable.
- Branch(Word32Equal(value_map_undetectable, Int32Constant(0)), if_true,
- if_false);
- }
}
}
@@ -1054,6 +969,24 @@ Node* CodeStubAssembler::LoadAndUntagToWord32Root(
}
}
+Node* CodeStubAssembler::StoreAndTagSmi(Node* base, int offset, Node* value) {
+ if (Is64()) {
+ int zero_offset = offset + kPointerSize / 2;
+ int payload_offset = offset;
+#if V8_TARGET_LITTLE_ENDIAN
+ std::swap(zero_offset, payload_offset);
+#endif
+ StoreNoWriteBarrier(MachineRepresentation::kWord32, base,
+ IntPtrConstant(zero_offset), Int32Constant(0));
+ return StoreNoWriteBarrier(MachineRepresentation::kWord32, base,
+ IntPtrConstant(payload_offset),
+ TruncateInt64ToInt32(value));
+ } else {
+ return StoreNoWriteBarrier(MachineRepresentation::kTaggedSigned, base,
+ IntPtrConstant(offset), SmiTag(value));
+ }
+}
+
Node* CodeStubAssembler::LoadHeapNumberValue(Node* object) {
return LoadObjectField(object, HeapNumber::kValueOffset,
MachineType::Float64());
@@ -1139,9 +1072,9 @@ Node* CodeStubAssembler::LoadMapPrototypeInfo(Node* map,
Node* prototype_info =
LoadObjectField(map, Map::kTransitionsOrPrototypeInfoOffset);
GotoIf(TaggedIsSmi(prototype_info), if_no_proto_info);
- GotoUnless(WordEqual(LoadMap(prototype_info),
- LoadRoot(Heap::kPrototypeInfoMapRootIndex)),
- if_no_proto_info);
+ GotoIfNot(WordEqual(LoadMap(prototype_info),
+ LoadRoot(Heap::kPrototypeInfoMapRootIndex)),
+ if_no_proto_info);
return prototype_info;
}
@@ -1176,8 +1109,8 @@ Node* CodeStubAssembler::LoadMapConstructorFunctionIndex(Node* map) {
Node* CodeStubAssembler::LoadMapConstructor(Node* map) {
CSA_SLOW_ASSERT(this, IsMap(map));
- Variable result(this, MachineRepresentation::kTagged);
- result.Bind(LoadObjectField(map, Map::kConstructorOrBackPointerOffset));
+ Variable result(this, MachineRepresentation::kTagged,
+ LoadObjectField(map, Map::kConstructorOrBackPointerOffset));
Label done(this), loop(this, &result);
Goto(&loop);
@@ -1186,7 +1119,7 @@ Node* CodeStubAssembler::LoadMapConstructor(Node* map) {
GotoIf(TaggedIsSmi(result.value()), &done);
Node* is_map_type =
Word32Equal(LoadInstanceType(result.value()), Int32Constant(MAP_TYPE));
- GotoUnless(is_map_type, &done);
+ GotoIfNot(is_map_type, &done);
result.Bind(
LoadObjectField(result.value(), Map::kConstructorOrBackPointerOffset));
Goto(&loop);
@@ -1195,6 +1128,25 @@ Node* CodeStubAssembler::LoadMapConstructor(Node* map) {
return result.value();
}
+Node* CodeStubAssembler::LoadSharedFunctionInfoSpecialField(
+ Node* shared, int offset, ParameterMode mode) {
+ if (Is64()) {
+ Node* result = LoadObjectField(shared, offset, MachineType::Int32());
+ if (mode == SMI_PARAMETERS) {
+ result = SmiTag(result);
+ } else {
+ result = ChangeUint32ToWord(result);
+ }
+ return result;
+ } else {
+ Node* result = LoadObjectField(shared, offset);
+ if (mode != SMI_PARAMETERS) {
+ result = SmiUntag(result);
+ }
+ return result;
+ }
+}
+
Node* CodeStubAssembler::LoadNameHashField(Node* name) {
CSA_ASSERT(this, IsName(name));
return LoadObjectField(name, Name::kHashFieldOffset, MachineType::Uint32());
@@ -1483,12 +1435,12 @@ Node* CodeStubAssembler::BuildAppendJSArray(ElementsKind kind, Node* context,
Comment("BuildAppendJSArray: %s", ElementsKindToString(kind));
Label pre_bailout(this);
Label success(this);
- Variable var_elements(this, MachineRepresentation::kTagged);
Variable var_tagged_length(this, MachineRepresentation::kTagged);
ParameterMode mode = OptimalParameterMode();
- Variable var_length(this, OptimalParameterRepresentation());
- var_length.Bind(TaggedToParameter(LoadJSArrayLength(array), mode));
- var_elements.Bind(LoadElements(array));
+ Variable var_length(this, OptimalParameterRepresentation(),
+ TaggedToParameter(LoadJSArrayLength(array), mode));
+ Variable var_elements(this, MachineRepresentation::kTagged,
+ LoadElements(array));
Node* capacity =
TaggedToParameter(LoadFixedArrayBaseLength(var_elements.value()), mode);
@@ -1498,7 +1450,7 @@ Node* CodeStubAssembler::BuildAppendJSArray(ElementsKind kind, Node* context,
Node* growth = IntPtrSub(args.GetLength(), first);
Node* new_length =
IntPtrOrSmiAdd(WordToParameter(growth, mode), var_length.value(), mode);
- GotoUnless(IntPtrOrSmiGreaterThan(new_length, capacity, mode), &fits);
+ GotoIfNot(IntPtrOrSmiGreaterThan(new_length, capacity, mode), &fits);
Node* new_capacity = CalculateNewElementsCapacity(new_length, mode);
var_elements.Bind(GrowElementsCapacity(array, var_elements.value(), kind,
kind, capacity, new_capacity, mode,
@@ -1572,6 +1524,9 @@ Node* CodeStubAssembler::AllocateHeapNumberWithValue(Node* value,
Node* CodeStubAssembler::AllocateSeqOneByteString(int length,
AllocationFlags flags) {
Comment("AllocateSeqOneByteString");
+ if (length == 0) {
+ return LoadRoot(Heap::kempty_stringRootIndex);
+ }
Node* result = Allocate(SeqOneByteString::SizeFor(length), flags);
DCHECK(Heap::RootIsImmortalImmovable(Heap::kOneByteStringMapRootIndex));
StoreMapNoWriteBarrier(result, Heap::kOneByteStringMapRootIndex);
@@ -1591,8 +1546,10 @@ Node* CodeStubAssembler::AllocateSeqOneByteString(Node* context, Node* length,
Variable var_result(this, MachineRepresentation::kTagged);
// Compute the SeqOneByteString size and check if it fits into new space.
- Label if_sizeissmall(this), if_notsizeissmall(this, Label::kDeferred),
- if_join(this);
+ Label if_lengthiszero(this), if_sizeissmall(this),
+ if_notsizeissmall(this, Label::kDeferred), if_join(this);
+ GotoIf(WordEqual(length, IntPtrOrSmiConstant(0, mode)), &if_lengthiszero);
+
Node* raw_size = GetArrayAllocationSize(
length, UINT8_ELEMENTS, mode,
SeqOneByteString::kHeaderSize + kObjectAlignmentMask);
@@ -1625,6 +1582,12 @@ Node* CodeStubAssembler::AllocateSeqOneByteString(Node* context, Node* length,
Goto(&if_join);
}
+ Bind(&if_lengthiszero);
+ {
+ var_result.Bind(LoadRoot(Heap::kempty_stringRootIndex));
+ Goto(&if_join);
+ }
+
Bind(&if_join);
return var_result.value();
}
@@ -1632,6 +1595,9 @@ Node* CodeStubAssembler::AllocateSeqOneByteString(Node* context, Node* length,
Node* CodeStubAssembler::AllocateSeqTwoByteString(int length,
AllocationFlags flags) {
Comment("AllocateSeqTwoByteString");
+ if (length == 0) {
+ return LoadRoot(Heap::kempty_stringRootIndex);
+ }
Node* result = Allocate(SeqTwoByteString::SizeFor(length), flags);
DCHECK(Heap::RootIsImmortalImmovable(Heap::kStringMapRootIndex));
StoreMapNoWriteBarrier(result, Heap::kStringMapRootIndex);
@@ -1651,8 +1617,10 @@ Node* CodeStubAssembler::AllocateSeqTwoByteString(Node* context, Node* length,
Variable var_result(this, MachineRepresentation::kTagged);
// Compute the SeqTwoByteString size and check if it fits into new space.
- Label if_sizeissmall(this), if_notsizeissmall(this, Label::kDeferred),
- if_join(this);
+ Label if_lengthiszero(this), if_sizeissmall(this),
+ if_notsizeissmall(this, Label::kDeferred), if_join(this);
+ GotoIf(WordEqual(length, IntPtrOrSmiConstant(0, mode)), &if_lengthiszero);
+
Node* raw_size = GetArrayAllocationSize(
length, UINT16_ELEMENTS, mode,
SeqOneByteString::kHeaderSize + kObjectAlignmentMask);
@@ -1687,6 +1655,12 @@ Node* CodeStubAssembler::AllocateSeqTwoByteString(Node* context, Node* length,
Goto(&if_join);
}
+ Bind(&if_lengthiszero);
+ {
+ var_result.Bind(LoadRoot(Heap::kempty_stringRootIndex));
+ Goto(&if_join);
+ }
+
Bind(&if_join);
return var_result.value();
}
@@ -1969,12 +1943,12 @@ void CodeStubAssembler::StoreFieldsNoWriteBarrier(Node* start_address,
Comment("StoreFieldsNoWriteBarrier");
CSA_ASSERT(this, WordIsWordAligned(start_address));
CSA_ASSERT(this, WordIsWordAligned(end_address));
- BuildFastLoop(
- MachineType::PointerRepresentation(), start_address, end_address,
- [this, value](Node* current) {
- StoreNoWriteBarrier(MachineRepresentation::kTagged, current, value);
- },
- kPointerSize, IndexAdvanceMode::kPost);
+ BuildFastLoop(start_address, end_address,
+ [this, value](Node* current) {
+ StoreNoWriteBarrier(MachineRepresentation::kTagged, current,
+ value);
+ },
+ kPointerSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
}
Node* CodeStubAssembler::AllocateUninitializedJSArrayWithoutElements(
@@ -2044,8 +2018,7 @@ Node* CodeStubAssembler::AllocateJSArray(ElementsKind kind, Node* array_map,
Node* allocation_site,
ParameterMode capacity_mode) {
Node *array = nullptr, *elements = nullptr;
- int32_t constant_capacity;
- if (ToInt32Constant(capacity, constant_capacity) && constant_capacity == 0) {
+ if (IsIntPtrOrSmiConstantZero(capacity)) {
// Array is empty. Use the shared empty fixed array instead of allocating a
// new one.
array = AllocateUninitializedJSArrayWithoutElements(kind, array_map, length,
@@ -2174,9 +2147,9 @@ void CodeStubAssembler::CopyFixedArrayElements(
Node* limit_offset = ElementOffsetFromIndex(
IntPtrOrSmiConstant(0, mode), from_kind, mode, first_element_offset);
- Variable var_from_offset(this, MachineType::PointerRepresentation());
- var_from_offset.Bind(ElementOffsetFromIndex(element_count, from_kind, mode,
- first_element_offset));
+ Variable var_from_offset(this, MachineType::PointerRepresentation(),
+ ElementOffsetFromIndex(element_count, from_kind,
+ mode, first_element_offset));
// This second variable is used only when the element sizes of source and
// destination arrays do not match.
Variable var_to_offset(this, MachineType::PointerRepresentation());
@@ -2301,9 +2274,9 @@ void CodeStubAssembler::CopyStringCharacters(Node* from_string, Node* to_string,
int from_increment = 1 << ElementsKindToShiftSize(from_kind);
int to_increment = 1 << ElementsKindToShiftSize(to_kind);
- Variable current_to_offset(this, MachineType::PointerRepresentation());
+ Variable current_to_offset(this, MachineType::PointerRepresentation(),
+ to_offset);
VariableList vars({&current_to_offset}, zone());
- current_to_offset.Bind(to_offset);
int to_index_constant = 0, from_index_constant = 0;
Smi* to_index_smi = nullptr;
Smi* from_index_smi = nullptr;
@@ -2315,8 +2288,7 @@ void CodeStubAssembler::CopyStringCharacters(Node* from_string, Node* to_string,
(ToSmiConstant(from_index, from_index_smi) &&
ToSmiConstant(to_index, to_index_smi) &&
to_index_smi == from_index_smi));
- BuildFastLoop(vars, MachineType::PointerRepresentation(), from_offset,
- limit_offset,
+ BuildFastLoop(vars, from_offset, limit_offset,
[this, from_string, to_string, &current_to_offset, to_increment,
type, rep, index_same](Node* offset) {
Node* value = Load(type, from_string, offset);
@@ -2327,7 +2299,7 @@ void CodeStubAssembler::CopyStringCharacters(Node* from_string, Node* to_string,
Increment(current_to_offset, to_increment);
}
},
- from_increment, IndexAdvanceMode::kPost);
+ from_increment, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
}
Node* CodeStubAssembler::LoadElementAndPrepareForStore(Node* array,
@@ -2512,10 +2484,9 @@ Node* CodeStubAssembler::TruncateTaggedToFloat64(Node* context, Node* value) {
Node* CodeStubAssembler::TruncateTaggedToWord32(Node* context, Node* value) {
// We might need to loop once due to ToNumber conversion.
- Variable var_value(this, MachineRepresentation::kTagged),
+ Variable var_value(this, MachineRepresentation::kTagged, value),
var_result(this, MachineRepresentation::kWord32);
Label loop(this, &var_value), done_loop(this, &var_result);
- var_value.Bind(value);
Goto(&loop);
Bind(&loop);
{
@@ -2576,7 +2547,7 @@ Node* CodeStubAssembler::ChangeFloat64ToTagged(Node* value) {
Branch(Float64Equal(value, value64), &if_valueisequal, &if_valueisnotequal);
Bind(&if_valueisequal);
{
- GotoUnless(Word32Equal(value32, Int32Constant(0)), &if_valueisint32);
+ GotoIfNot(Word32Equal(value32, Int32Constant(0)), &if_valueisint32);
Branch(Int32LessThan(Float64ExtractHighWord32(value), Int32Constant(0)),
&if_valueisheapnumber, &if_valueisint32);
}
@@ -2680,8 +2651,7 @@ Node* CodeStubAssembler::ChangeUint32ToTagged(Node* value) {
Node* CodeStubAssembler::ToThisString(Node* context, Node* value,
char const* method_name) {
- Variable var_value(this, MachineRepresentation::kTagged);
- var_value.Bind(value);
+ Variable var_value(this, MachineRepresentation::kTagged, value);
// Check if the {value} is a Smi or a HeapObject.
Label if_valueissmi(this, Label::kDeferred), if_valueisnotsmi(this),
@@ -2724,7 +2694,7 @@ Node* CodeStubAssembler::ToThisString(Node* context, Node* value,
CallRuntime(Runtime::kThrowCalledOnNullOrUndefined, context,
HeapConstant(factory()->NewStringFromAsciiChecked(
method_name, TENURED)));
- Goto(&if_valueisstring); // Never reached.
+ Unreachable();
}
}
}
@@ -2762,10 +2732,9 @@ Node* CodeStubAssembler::ToThisValue(Node* context, Node* value,
PrimitiveType primitive_type,
char const* method_name) {
// We might need to loop once due to JSValue unboxing.
- Variable var_value(this, MachineRepresentation::kTagged);
+ Variable var_value(this, MachineRepresentation::kTagged, value);
Label loop(this, &var_value), done_loop(this),
done_throw(this, Label::kDeferred);
- var_value.Bind(value);
Goto(&loop);
Bind(&loop);
{
@@ -2824,7 +2793,7 @@ Node* CodeStubAssembler::ToThisValue(Node* context, Node* value,
CallRuntime(Runtime::kThrowNotGeneric, context,
HeapConstant(factory()->NewStringFromAsciiChecked(method_name,
TENURED)));
- Goto(&done_loop); // Never reached.
+ Unreachable();
}
Bind(&done_loop);
@@ -2852,8 +2821,7 @@ Node* CodeStubAssembler::ThrowIfNotInstanceType(Node* context, Node* value,
Runtime::kThrowIncompatibleMethodReceiver, context,
HeapConstant(factory()->NewStringFromAsciiChecked(method_name, TENURED)),
value);
- var_value_map.Bind(UndefinedConstant());
- Goto(&out); // Never reached.
+ Unreachable();
Bind(&out);
return var_value_map.value();
@@ -2889,6 +2857,10 @@ Node* CodeStubAssembler::IsCallableMap(Node* map) {
Int32Constant(0));
}
+Node* CodeStubAssembler::IsCallable(Node* object) {
+ return IsCallableMap(LoadMap(object));
+}
+
Node* CodeStubAssembler::IsConstructorMap(Node* map) {
CSA_ASSERT(this, IsMap(map));
return Word32NotEqual(
@@ -2918,6 +2890,11 @@ Node* CodeStubAssembler::IsJSReceiver(Node* object) {
return IsJSReceiverInstanceType(LoadInstanceType(object));
}
+Node* CodeStubAssembler::IsJSReceiverMap(Node* map) {
+ STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
+ return IsJSReceiverInstanceType(LoadMapInstanceType(map));
+}
+
Node* CodeStubAssembler::IsJSObject(Node* object) {
STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
return Int32GreaterThanOrEqual(LoadInstanceType(object),
@@ -2945,6 +2922,14 @@ Node* CodeStubAssembler::IsWeakCell(Node* object) {
return HasInstanceType(object, WEAK_CELL_TYPE);
}
+Node* CodeStubAssembler::IsBoolean(Node* object) {
+ return IsBooleanMap(LoadMap(object));
+}
+
+Node* CodeStubAssembler::IsHeapNumber(Node* object) {
+ return IsHeapNumberMap(LoadMap(object));
+}
+
Node* CodeStubAssembler::IsName(Node* object) {
return Int32LessThanOrEqual(LoadInstanceType(object),
Int32Constant(LAST_NAME_TYPE));
@@ -3002,14 +2987,12 @@ Node* CodeStubAssembler::StringCharCodeAt(Node* string, Node* index,
// Translate the {index} into a Word.
index = ParameterToWord(index, parameter_mode);
- // We may need to loop in case of cons or sliced strings.
- Variable var_index(this, MachineType::PointerRepresentation());
+ // We may need to loop in case of cons, thin, or sliced strings.
+ Variable var_index(this, MachineType::PointerRepresentation(), index);
+ Variable var_string(this, MachineRepresentation::kTagged, string);
Variable var_result(this, MachineRepresentation::kWord32);
- Variable var_string(this, MachineRepresentation::kTagged);
Variable* loop_vars[] = {&var_index, &var_string};
Label done_loop(this, &var_result), loop(this, 2, loop_vars);
- var_string.Bind(string);
- var_index.Bind(index);
Goto(&loop);
Bind(&loop);
{
@@ -3154,14 +3137,29 @@ Node* CodeStubAssembler::StringCharCodeAt(Node* string, Node* index,
Bind(&if_stringisnotexternal);
{
- // The {string} is a SlicedString, continue with its parent.
- Node* string_offset =
- LoadAndUntagObjectField(string, SlicedString::kOffsetOffset);
- Node* string_parent =
- LoadObjectField(string, SlicedString::kParentOffset);
- var_index.Bind(IntPtrAdd(index, string_offset));
- var_string.Bind(string_parent);
- Goto(&loop);
+ Label if_stringissliced(this), if_stringisthin(this);
+ Branch(
+ Word32Equal(Word32And(string_instance_type,
+ Int32Constant(kStringRepresentationMask)),
+ Int32Constant(kSlicedStringTag)),
+ &if_stringissliced, &if_stringisthin);
+ Bind(&if_stringissliced);
+ {
+ // The {string} is a SlicedString, continue with its parent.
+ Node* string_offset =
+ LoadAndUntagObjectField(string, SlicedString::kOffsetOffset);
+ Node* string_parent =
+ LoadObjectField(string, SlicedString::kParentOffset);
+ var_index.Bind(IntPtrAdd(index, string_offset));
+ var_string.Bind(string_parent);
+ Goto(&loop);
+ }
+ Bind(&if_stringisthin);
+ {
+ // The {string} is a ThinString, continue with its actual value.
+ var_string.Bind(LoadObjectField(string, ThinString::kActualOffset));
+ Goto(&loop);
+ }
}
}
}
@@ -3291,31 +3289,28 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
Label end(this);
Label runtime(this);
- Variable var_instance_type(this, MachineRepresentation::kWord32); // Int32.
- Variable var_result(this, MachineRepresentation::kTagged); // String.
- Variable var_from(this, MachineRepresentation::kTagged); // Smi.
- Variable var_string(this, MachineRepresentation::kTagged); // String.
+ Node* const int_zero = Int32Constant(0);
- var_instance_type.Bind(Int32Constant(0));
- var_string.Bind(string);
- var_from.Bind(from);
+ // Int32 variables.
+ Variable var_instance_type(this, MachineRepresentation::kWord32, int_zero);
+ Variable var_representation(this, MachineRepresentation::kWord32, int_zero);
- // Make sure first argument is a string.
+ Variable var_from(this, MachineRepresentation::kTagged, from); // Smi.
+ Variable var_string(this, MachineRepresentation::kTagged, string); // String.
+ Variable var_result(this, MachineRepresentation::kTagged); // String.
- // Bailout if receiver is a Smi.
- GotoIf(TaggedIsSmi(string), &runtime);
+ // Make sure first argument is a string.
+ CSA_ASSERT(this, TaggedIsNotSmi(string));
+ CSA_ASSERT(this, IsString(string));
// Load the instance type of the {string}.
Node* const instance_type = LoadInstanceType(string);
var_instance_type.Bind(instance_type);
- // Check if {string} is a String.
- GotoUnless(IsStringInstanceType(instance_type), &runtime);
-
// Make sure that both from and to are non-negative smis.
- GotoUnless(TaggedIsPositiveSmi(from), &runtime);
- GotoUnless(TaggedIsPositiveSmi(to), &runtime);
+ GotoIfNot(TaggedIsPositiveSmi(from), &runtime);
+ GotoIfNot(TaggedIsPositiveSmi(to), &runtime);
Node* const substr_length = SmiSub(to, from);
Node* const string_length = LoadStringLength(string);
@@ -3337,7 +3332,8 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
// and put the underlying string into var_string.
// If the string is not indirect, it can only be sequential or external.
- STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
+ STATIC_ASSERT(kIsIndirectStringMask ==
+ (kSlicedStringTag & kConsStringTag & kThinStringTag));
STATIC_ASSERT(kIsIndirectStringMask != 0);
Label underlying_unpacked(this);
GotoIf(Word32Equal(
@@ -3345,13 +3341,14 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
Int32Constant(0)),
&underlying_unpacked);
- // The subject string is either a sliced or cons string.
+ // The subject string is a sliced, cons, or thin string.
- Label sliced_string(this);
- GotoIf(Word32NotEqual(
- Word32And(instance_type, Int32Constant(kSlicedNotConsMask)),
- Int32Constant(0)),
- &sliced_string);
+ Label thin_string(this), thin_or_sliced(this);
+ var_representation.Bind(
+ Word32And(instance_type, Int32Constant(kStringRepresentationMask)));
+ GotoIf(
+ Word32NotEqual(var_representation.value(), Int32Constant(kConsStringTag)),
+ &thin_or_sliced);
// Cons string. Check whether it is flat, then fetch first part.
// Flat cons strings have an empty second part.
@@ -3363,14 +3360,25 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
Node* first_string_part = LoadObjectField(string, ConsString::kFirstOffset);
var_string.Bind(first_string_part);
var_instance_type.Bind(LoadInstanceType(first_string_part));
+ var_representation.Bind(Word32And(
+ var_instance_type.value(), Int32Constant(kStringRepresentationMask)));
- Goto(&underlying_unpacked);
+ // The loaded first part might be a thin string.
+ Branch(Word32Equal(Word32And(var_instance_type.value(),
+ Int32Constant(kIsIndirectStringMask)),
+ Int32Constant(0)),
+ &underlying_unpacked, &thin_string);
}
- Bind(&sliced_string);
+ Bind(&thin_or_sliced);
{
+ GotoIf(
+ Word32Equal(var_representation.value(), Int32Constant(kThinStringTag)),
+ &thin_string);
+ // Otherwise it's a sliced string.
// Fetch parent and correct start index by offset.
- Node* sliced_offset = LoadObjectField(string, SlicedString::kOffsetOffset);
+ Node* sliced_offset =
+ LoadObjectField(var_string.value(), SlicedString::kOffsetOffset);
var_from.Bind(SmiAdd(from, sliced_offset));
Node* slice_parent = LoadObjectField(string, SlicedString::kParentOffset);
@@ -3379,6 +3387,19 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
Node* slice_parent_instance_type = LoadInstanceType(slice_parent);
var_instance_type.Bind(slice_parent_instance_type);
+ // The loaded parent might be a thin string.
+ Branch(Word32Equal(Word32And(var_instance_type.value(),
+ Int32Constant(kIsIndirectStringMask)),
+ Int32Constant(0)),
+ &underlying_unpacked, &thin_string);
+ }
+
+ Bind(&thin_string);
+ {
+ Node* actual_string =
+ LoadObjectField(var_string.value(), ThinString::kActualOffset);
+ var_string.Bind(actual_string);
+ var_instance_type.Bind(LoadInstanceType(actual_string));
Goto(&underlying_unpacked);
}
@@ -3426,10 +3447,10 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
// encoding at this point.
STATIC_ASSERT(kExternalStringTag != 0);
STATIC_ASSERT(kSeqStringTag == 0);
- GotoUnless(Word32Equal(Word32And(var_instance_type.value(),
- Int32Constant(kExternalStringTag)),
- Int32Constant(0)),
- &external_string);
+ GotoIfNot(Word32Equal(Word32And(var_instance_type.value(),
+ Int32Constant(kExternalStringTag)),
+ Int32Constant(0)),
+ &external_string);
var_result.Bind(AllocAndCopyStringCharacters(
this, context, var_string.value(), var_instance_type.value(),
@@ -3444,23 +3465,8 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
// Handle external string.
Bind(&external_string);
{
- // Rule out short external strings.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- GotoIf(Word32NotEqual(Word32And(var_instance_type.value(),
- Int32Constant(kShortExternalStringMask)),
- Int32Constant(0)),
- &runtime);
-
- // Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize ==
- SeqOneByteString::kHeaderSize);
-
- Node* resource_data =
- LoadObjectField(var_string.value(), ExternalString::kResourceDataOffset,
- MachineType::Pointer());
- Node* const fake_sequential_string = IntPtrSub(
- resource_data,
- IntPtrConstant(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ Node* const fake_sequential_string = TryDerefExternalString(
+ var_string.value(), var_instance_type.value(), &runtime);
var_result.Bind(AllocAndCopyStringCharacters(
this, context, fake_sequential_string, var_instance_type.value(),
@@ -3509,12 +3515,91 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
return var_result.value();
}
+namespace {
+
+Node* IsExternalStringInstanceType(CodeStubAssembler* a,
+ Node* const instance_type) {
+ CSA_ASSERT(a, a->IsStringInstanceType(instance_type));
+ return a->Word32Equal(
+ a->Word32And(instance_type, a->Int32Constant(kStringRepresentationMask)),
+ a->Int32Constant(kExternalStringTag));
+}
+
+Node* IsShortExternalStringInstanceType(CodeStubAssembler* a,
+ Node* const instance_type) {
+ CSA_ASSERT(a, a->IsStringInstanceType(instance_type));
+ STATIC_ASSERT(kShortExternalStringTag != 0);
+ return a->Word32NotEqual(
+ a->Word32And(instance_type, a->Int32Constant(kShortExternalStringMask)),
+ a->Int32Constant(0));
+}
+
+} // namespace
+
+Node* CodeStubAssembler::TryDerefExternalString(Node* const string,
+ Node* const instance_type,
+ Label* if_bailout) {
+ Label out(this);
+
+ USE(IsExternalStringInstanceType);
+ CSA_ASSERT(this, IsExternalStringInstanceType(this, instance_type));
+ GotoIf(IsShortExternalStringInstanceType(this, instance_type), if_bailout);
+
+ // Move the pointer so that offset-wise, it looks like a sequential string.
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+
+ Node* resource_data = LoadObjectField(
+ string, ExternalString::kResourceDataOffset, MachineType::Pointer());
+ Node* const fake_sequential_string =
+ IntPtrSub(resource_data,
+ IntPtrConstant(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+ return fake_sequential_string;
+}
+
+void CodeStubAssembler::MaybeDerefIndirectString(Variable* var_string,
+ Node* instance_type,
+ Variable* var_did_something) {
+ Label deref(this), done(this, var_did_something);
+ Node* representation =
+ Word32And(instance_type, Int32Constant(kStringRepresentationMask));
+ GotoIf(Word32Equal(representation, Int32Constant(kThinStringTag)), &deref);
+ GotoIf(Word32NotEqual(representation, Int32Constant(kConsStringTag)), &done);
+ // Cons string.
+ Node* rhs = LoadObjectField(var_string->value(), ConsString::kSecondOffset);
+ GotoIf(WordEqual(rhs, EmptyStringConstant()), &deref);
+ Goto(&done);
+
+ Bind(&deref);
+ STATIC_ASSERT(ThinString::kActualOffset == ConsString::kFirstOffset);
+ var_string->Bind(
+ LoadObjectField(var_string->value(), ThinString::kActualOffset));
+ var_did_something->Bind(IntPtrConstant(1));
+ Goto(&done);
+
+ Bind(&done);
+}
+
+void CodeStubAssembler::MaybeDerefIndirectStrings(Variable* var_left,
+ Node* left_instance_type,
+ Variable* var_right,
+ Node* right_instance_type,
+ Label* did_something) {
+ Variable var_did_something(this, MachineType::PointerRepresentation(),
+ IntPtrConstant(0));
+ MaybeDerefIndirectString(var_left, left_instance_type, &var_did_something);
+ MaybeDerefIndirectString(var_right, right_instance_type, &var_did_something);
+
+ GotoIf(WordNotEqual(var_did_something.value(), IntPtrConstant(0)),
+ did_something);
+ // Fall through if neither string was an indirect string.
+}
+
Node* CodeStubAssembler::StringAdd(Node* context, Node* left, Node* right,
AllocationFlags flags) {
Label check_right(this);
Label runtime(this, Label::kDeferred);
Label cons(this);
- Label non_cons(this);
Variable result(this, MachineRepresentation::kTagged);
Label done(this, &result);
Label done_native(this, &result);
@@ -3532,72 +3617,90 @@ Node* CodeStubAssembler::StringAdd(Node* context, Node* left, Node* right,
Goto(&done_native);
Bind(&cons);
- CSA_ASSERT(this, TaggedIsSmi(left_length));
- CSA_ASSERT(this, TaggedIsSmi(right_length));
- Node* new_length = SmiAdd(left_length, right_length);
- GotoIf(SmiAboveOrEqual(new_length, SmiConstant(String::kMaxLength)),
- &runtime);
-
- GotoIf(SmiLessThan(new_length, SmiConstant(ConsString::kMinLength)),
- &non_cons);
-
- result.Bind(NewConsString(context, new_length, left, right, flags));
- Goto(&done_native);
+ {
+ CSA_ASSERT(this, TaggedIsSmi(left_length));
+ CSA_ASSERT(this, TaggedIsSmi(right_length));
+ Node* new_length = SmiAdd(left_length, right_length);
+ GotoIf(SmiAboveOrEqual(new_length, SmiConstant(String::kMaxLength)),
+ &runtime);
- Bind(&non_cons);
+ Variable var_left(this, MachineRepresentation::kTagged, left);
+ Variable var_right(this, MachineRepresentation::kTagged, right);
+ Variable* input_vars[2] = {&var_left, &var_right};
+ Label non_cons(this, 2, input_vars);
+ Label slow(this, Label::kDeferred);
+ GotoIf(SmiLessThan(new_length, SmiConstant(ConsString::kMinLength)),
+ &non_cons);
- Comment("Full string concatenate");
- Node* left_instance_type = LoadInstanceType(left);
- Node* right_instance_type = LoadInstanceType(right);
- // Compute intersection and difference of instance types.
+ result.Bind(NewConsString(context, new_length, var_left.value(),
+ var_right.value(), flags));
+ Goto(&done_native);
- Node* ored_instance_types = Word32Or(left_instance_type, right_instance_type);
- Node* xored_instance_types =
- Word32Xor(left_instance_type, right_instance_type);
+ Bind(&non_cons);
- // Check if both strings have the same encoding and both are sequential.
- GotoIf(Word32NotEqual(Word32And(xored_instance_types,
- Int32Constant(kStringEncodingMask)),
- Int32Constant(0)),
- &runtime);
- GotoIf(Word32NotEqual(Word32And(ored_instance_types,
- Int32Constant(kStringRepresentationMask)),
- Int32Constant(0)),
- &runtime);
+ Comment("Full string concatenate");
+ Node* left_instance_type = LoadInstanceType(var_left.value());
+ Node* right_instance_type = LoadInstanceType(var_right.value());
+ // Compute intersection and difference of instance types.
- Label two_byte(this);
- GotoIf(Word32Equal(
- Word32And(ored_instance_types, Int32Constant(kStringEncodingMask)),
- Int32Constant(kTwoByteStringTag)),
- &two_byte);
- // One-byte sequential string case
- Node* new_string =
- AllocateSeqOneByteString(context, new_length, SMI_PARAMETERS);
- CopyStringCharacters(left, new_string, SmiConstant(Smi::kZero),
- SmiConstant(Smi::kZero), left_length,
- String::ONE_BYTE_ENCODING, String::ONE_BYTE_ENCODING,
- SMI_PARAMETERS);
- CopyStringCharacters(right, new_string, SmiConstant(Smi::kZero), left_length,
- right_length, String::ONE_BYTE_ENCODING,
- String::ONE_BYTE_ENCODING, SMI_PARAMETERS);
- result.Bind(new_string);
- Goto(&done_native);
+ Node* ored_instance_types =
+ Word32Or(left_instance_type, right_instance_type);
+ Node* xored_instance_types =
+ Word32Xor(left_instance_type, right_instance_type);
- Bind(&two_byte);
- {
- // Two-byte sequential string case
- new_string = AllocateSeqTwoByteString(context, new_length, SMI_PARAMETERS);
- CopyStringCharacters(left, new_string, SmiConstant(Smi::kZero),
+ // Check if both strings have the same encoding and both are sequential.
+ GotoIf(Word32NotEqual(Word32And(xored_instance_types,
+ Int32Constant(kStringEncodingMask)),
+ Int32Constant(0)),
+ &runtime);
+ GotoIf(Word32NotEqual(Word32And(ored_instance_types,
+ Int32Constant(kStringRepresentationMask)),
+ Int32Constant(0)),
+ &slow);
+
+ Label two_byte(this);
+ GotoIf(Word32Equal(Word32And(ored_instance_types,
+ Int32Constant(kStringEncodingMask)),
+ Int32Constant(kTwoByteStringTag)),
+ &two_byte);
+ // One-byte sequential string case
+ Node* new_string =
+ AllocateSeqOneByteString(context, new_length, SMI_PARAMETERS);
+ CopyStringCharacters(var_left.value(), new_string, SmiConstant(Smi::kZero),
SmiConstant(Smi::kZero), left_length,
- String::TWO_BYTE_ENCODING, String::TWO_BYTE_ENCODING,
+ String::ONE_BYTE_ENCODING, String::ONE_BYTE_ENCODING,
SMI_PARAMETERS);
- CopyStringCharacters(right, new_string, SmiConstant(Smi::kZero),
- left_length, right_length, String::TWO_BYTE_ENCODING,
- String::TWO_BYTE_ENCODING, SMI_PARAMETERS);
+ CopyStringCharacters(var_right.value(), new_string, SmiConstant(Smi::kZero),
+ left_length, right_length, String::ONE_BYTE_ENCODING,
+ String::ONE_BYTE_ENCODING, SMI_PARAMETERS);
result.Bind(new_string);
Goto(&done_native);
- }
+ Bind(&two_byte);
+ {
+ // Two-byte sequential string case
+ new_string =
+ AllocateSeqTwoByteString(context, new_length, SMI_PARAMETERS);
+ CopyStringCharacters(var_left.value(), new_string,
+ SmiConstant(Smi::kZero), SmiConstant(Smi::kZero),
+ left_length, String::TWO_BYTE_ENCODING,
+ String::TWO_BYTE_ENCODING, SMI_PARAMETERS);
+ CopyStringCharacters(var_right.value(), new_string,
+ SmiConstant(Smi::kZero), left_length, right_length,
+ String::TWO_BYTE_ENCODING, String::TWO_BYTE_ENCODING,
+ SMI_PARAMETERS);
+ result.Bind(new_string);
+ Goto(&done_native);
+ }
+
+ Bind(&slow);
+ {
+ // Try to unwrap indirect strings, restart the above attempt on success.
+ MaybeDerefIndirectStrings(&var_left, left_instance_type, &var_right,
+ right_instance_type, &non_cons);
+ Goto(&runtime);
+ }
+ }
Bind(&runtime);
{
result.Bind(CallRuntime(Runtime::kStringAdd, context, left, right));
@@ -3614,77 +3717,10 @@ Node* CodeStubAssembler::StringAdd(Node* context, Node* left, Node* right,
return result.value();
}
-Node* CodeStubAssembler::StringIndexOfChar(Node* context, Node* string,
- Node* needle_char, Node* from) {
- CSA_ASSERT(this, IsString(string));
- Variable var_result(this, MachineRepresentation::kTagged);
-
- Label out(this), runtime(this, Label::kDeferred);
-
- // Let runtime handle non-one-byte {needle_char}.
-
- Node* const one_byte_char_mask = Int32Constant(0xFF);
- GotoUnless(
- Word32Equal(Word32And(needle_char, one_byte_char_mask), needle_char),
- &runtime);
-
- // TODO(jgruber): Handle external and two-byte strings.
-
- Node* const one_byte_seq_mask = Int32Constant(
- kIsIndirectStringMask | kExternalStringTag | kStringEncodingMask);
- Node* const expected_masked = Int32Constant(kOneByteStringTag);
-
- Node* const string_instance_type = LoadInstanceType(string);
- GotoUnless(Word32Equal(Word32And(string_instance_type, one_byte_seq_mask),
- expected_masked),
- &runtime);
-
- // If we reach this, {string} is a non-indirect, non-external one-byte string.
-
- Node* const length = LoadStringLength(string);
- Node* const search_range_length = SmiUntag(SmiSub(length, from));
-
- const int offset = SeqOneByteString::kHeaderSize - kHeapObjectTag;
- Node* const begin = IntPtrConstant(offset);
- Node* const cursor = IntPtrAdd(begin, SmiUntag(from));
- Node* const end = IntPtrAdd(cursor, search_range_length);
-
- var_result.Bind(SmiConstant(Smi::FromInt(-1)));
-
- BuildFastLoop(
- MachineType::PointerRepresentation(), cursor, end,
- [this, string, needle_char, begin, &var_result, &out](Node* cursor) {
- Label next(this);
- Node* value = Load(MachineType::Uint8(), string, cursor);
- GotoUnless(Word32Equal(value, needle_char), &next);
-
- // Found a match.
- Node* index = SmiTag(IntPtrSub(cursor, begin));
- var_result.Bind(index);
- Goto(&out);
-
- Bind(&next);
- },
- 1, IndexAdvanceMode::kPost);
- Goto(&out);
-
- Bind(&runtime);
- {
- Node* const pattern = StringFromCharCode(needle_char);
- Node* const result =
- CallRuntime(Runtime::kStringIndexOf, context, string, pattern, from);
- var_result.Bind(result);
- Goto(&out);
- }
-
- Bind(&out);
- return var_result.value();
-}
-
Node* CodeStubAssembler::StringFromCodePoint(Node* codepoint,
UnicodeEncoding encoding) {
- Variable var_result(this, MachineRepresentation::kTagged);
- var_result.Bind(EmptyStringConstant());
+ Variable var_result(this, MachineRepresentation::kTagged,
+ EmptyStringConstant());
Label if_isword16(this), if_isword32(this), return_result(this);
@@ -3780,7 +3816,7 @@ Node* CodeStubAssembler::NumberToString(Node* context, Node* argument) {
// Argument isn't smi, check to see if it's a heap-number.
Node* map = LoadMap(argument);
- GotoUnless(IsHeapNumberMap(map), &runtime);
+ GotoIfNot(IsHeapNumberMap(map), &runtime);
// Make a hash from the two 32-bit values of the double.
Node* low =
@@ -3796,15 +3832,15 @@ Node* CodeStubAssembler::NumberToString(Node* context, Node* argument) {
Node* number_key = LoadFixedArrayElement(number_string_cache, index);
GotoIf(TaggedIsSmi(number_key), &runtime);
map = LoadMap(number_key);
- GotoUnless(IsHeapNumberMap(map), &runtime);
+ GotoIfNot(IsHeapNumberMap(map), &runtime);
// Cache entry's key must match the heap number value we're looking for.
Node* low_compare = LoadObjectField(number_key, HeapNumber::kValueOffset,
MachineType::Int32());
Node* high_compare = LoadObjectField(
number_key, HeapNumber::kValueOffset + kIntSize, MachineType::Int32());
- GotoUnless(Word32Equal(low, low_compare), &runtime);
- GotoUnless(Word32Equal(high, high_compare), &runtime);
+ GotoIfNot(Word32Equal(low, low_compare), &runtime);
+ GotoIfNot(Word32Equal(high, high_compare), &runtime);
// Heap number match, return value from cache entry.
IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
@@ -3890,11 +3926,10 @@ Node* CodeStubAssembler::NonNumberToNumber(Node* context, Node* input) {
CSA_ASSERT(this, Word32BinaryNot(IsHeapNumberMap(LoadMap(input))));
// We might need to loop once here due to ToPrimitive conversions.
- Variable var_input(this, MachineRepresentation::kTagged);
+ Variable var_input(this, MachineRepresentation::kTagged, input);
Variable var_result(this, MachineRepresentation::kTagged);
Label loop(this, &var_input);
Label end(this);
- var_input.Bind(input);
Goto(&loop);
Bind(&loop);
{
@@ -3958,8 +3993,8 @@ Node* CodeStubAssembler::NonNumberToNumber(Node* context, Node* input) {
Bind(&if_inputisother);
{
- // The {input} is something else (i.e. Symbol or Simd128Value), let the
- // runtime figure out the correct exception.
+ // The {input} is something else (e.g. Symbol), let the runtime figure
+ // out the correct exception.
// Note: We cannot tail call to the runtime here, as js-to-wasm
// trampolines also use this code currently, and they declare all
// outgoing parameters as untagged, while we would push a tagged
@@ -3978,7 +4013,7 @@ Node* CodeStubAssembler::ToNumber(Node* context, Node* input) {
Label end(this);
Label not_smi(this, Label::kDeferred);
- GotoUnless(TaggedIsSmi(input), &not_smi);
+ GotoIfNot(TaggedIsSmi(input), &not_smi);
var_result.Bind(input);
Goto(&end);
@@ -3986,7 +4021,7 @@ Node* CodeStubAssembler::ToNumber(Node* context, Node* input) {
{
Label not_heap_number(this, Label::kDeferred);
Node* input_map = LoadMap(input);
- GotoUnless(IsHeapNumberMap(input_map), &not_heap_number);
+ GotoIfNot(IsHeapNumberMap(input_map), &not_heap_number);
var_result.Bind(input);
Goto(&end);
@@ -4008,8 +4043,7 @@ Node* CodeStubAssembler::ToUint32(Node* context, Node* input) {
Label out(this);
- Variable var_result(this, MachineRepresentation::kTagged);
- var_result.Bind(input);
+ Variable var_result(this, MachineRepresentation::kTagged, input);
// Early exit for positive smis.
{
@@ -4143,45 +4177,6 @@ Node* CodeStubAssembler::ToString(Node* context, Node* input) {
return result.value();
}
-Node* CodeStubAssembler::FlattenString(Node* string) {
- CSA_ASSERT(this, IsString(string));
- Variable var_result(this, MachineRepresentation::kTagged);
- var_result.Bind(string);
-
- Node* instance_type = LoadInstanceType(string);
-
- // Check if the {string} is not a ConsString (i.e. already flat).
- Label is_cons(this, Label::kDeferred), is_flat_in_cons(this), end(this);
- {
- GotoUnless(Word32Equal(Word32And(instance_type,
- Int32Constant(kStringRepresentationMask)),
- Int32Constant(kConsStringTag)),
- &end);
-
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string).
- Node* rhs = LoadObjectField(string, ConsString::kSecondOffset);
- Branch(WordEqual(rhs, EmptyStringConstant()), &is_flat_in_cons, &is_cons);
- }
-
- // Bail out to the runtime.
- Bind(&is_cons);
- {
- var_result.Bind(
- CallRuntime(Runtime::kFlattenString, NoContextConstant(), string));
- Goto(&end);
- }
-
- Bind(&is_flat_in_cons);
- {
- var_result.Bind(LoadObjectField(string, ConsString::kFirstOffset));
- Goto(&end);
- }
-
- Bind(&end);
- return var_result.value();
-}
-
Node* CodeStubAssembler::JSReceiverToPrimitive(Node* context, Node* input) {
Label if_isreceiver(this, Label::kDeferred), if_isnotreceiver(this);
Variable result(this, MachineRepresentation::kTagged);
@@ -4210,9 +4205,8 @@ Node* CodeStubAssembler::JSReceiverToPrimitive(Node* context, Node* input) {
Node* CodeStubAssembler::ToInteger(Node* context, Node* input,
ToIntegerTruncationMode mode) {
// We might need to loop once for ToNumber conversion.
- Variable var_arg(this, MachineRepresentation::kTagged);
+ Variable var_arg(this, MachineRepresentation::kTagged, input);
Label loop(this, &var_arg), out(this);
- var_arg.Bind(input);
Goto(&loop);
Bind(&loop);
{
@@ -4237,7 +4231,7 @@ Node* CodeStubAssembler::ToInteger(Node* context, Node* input,
Node* arg_value = LoadHeapNumberValue(arg);
// Check if {arg} is NaN.
- GotoUnless(Float64Equal(arg_value, arg_value), &return_zero);
+ GotoIfNot(Float64Equal(arg_value, arg_value), &return_zero);
// Truncate {arg} towards zero.
Node* value = Float64Trunc(arg_value);
@@ -4323,23 +4317,25 @@ void CodeStubAssembler::Use(Label* label) {
void CodeStubAssembler::TryToName(Node* key, Label* if_keyisindex,
Variable* var_index, Label* if_keyisunique,
- Label* if_bailout) {
+ Variable* var_unique, Label* if_bailout) {
DCHECK_EQ(MachineType::PointerRepresentation(), var_index->rep());
+ DCHECK_EQ(MachineRepresentation::kTagged, var_unique->rep());
Comment("TryToName");
- Label if_hascachedindex(this), if_keyisnotindex(this);
+ Label if_hascachedindex(this), if_keyisnotindex(this), if_thinstring(this);
// Handle Smi and HeapNumber keys.
var_index->Bind(TryToIntptr(key, &if_keyisnotindex));
Goto(if_keyisindex);
Bind(&if_keyisnotindex);
Node* key_map = LoadMap(key);
+ var_unique->Bind(key);
// Symbols are unique.
GotoIf(IsSymbolMap(key_map), if_keyisunique);
Node* key_instance_type = LoadMapInstanceType(key_map);
// Miss if |key| is not a String.
STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
- GotoUnless(IsStringInstanceType(key_instance_type), if_bailout);
+ GotoIfNot(IsStringInstanceType(key_instance_type), if_bailout);
// |key| is a String. Check if it has a cached array index.
Node* hash = LoadNameHashField(key);
Node* contains_index =
@@ -4350,6 +4346,12 @@ void CodeStubAssembler::TryToName(Node* key, Label* if_keyisindex,
Node* not_an_index =
Word32And(hash, Int32Constant(Name::kIsNotArrayIndexMask));
GotoIf(Word32Equal(not_an_index, Int32Constant(0)), if_bailout);
+ // Check if we have a ThinString.
+ GotoIf(Word32Equal(key_instance_type, Int32Constant(THIN_STRING_TYPE)),
+ &if_thinstring);
+ GotoIf(
+ Word32Equal(key_instance_type, Int32Constant(THIN_ONE_BYTE_STRING_TYPE)),
+ &if_thinstring);
// Finally, check if |key| is internalized.
STATIC_ASSERT(kNotInternalizedTag != 0);
Node* not_internalized =
@@ -4357,6 +4359,10 @@ void CodeStubAssembler::TryToName(Node* key, Label* if_keyisindex,
GotoIf(Word32NotEqual(not_internalized, Int32Constant(0)), if_bailout);
Goto(if_keyisunique);
+ Bind(&if_thinstring);
+ var_unique->Bind(LoadObjectField(key, ThinString::kActualOffset));
+ Goto(if_keyisunique);
+
Bind(&if_hascachedindex);
var_index->Bind(DecodeWordFromWord32<Name::ArrayIndexValueBits>(hash));
Goto(if_keyisindex);
@@ -4466,12 +4472,10 @@ void CodeStubAssembler::NameDictionaryLookup(Node* dictionary,
Node* undefined = UndefinedConstant();
Node* the_hole = mode == kFindExisting ? nullptr : TheHoleConstant();
- Variable var_count(this, MachineType::PointerRepresentation());
- Variable var_entry(this, MachineType::PointerRepresentation());
+ Variable var_count(this, MachineType::PointerRepresentation(), count);
+ Variable var_entry(this, MachineType::PointerRepresentation(), entry);
Variable* loop_vars[] = {&var_count, &var_entry, var_name_index};
Label loop(this, 3, loop_vars);
- var_count.Bind(count);
- var_entry.Bind(entry);
Goto(&loop);
Bind(&loop);
{
@@ -4547,10 +4551,9 @@ void CodeStubAssembler::NumberDictionaryLookup(Node* dictionary,
Node* undefined = UndefinedConstant();
Node* the_hole = TheHoleConstant();
- Variable var_count(this, MachineType::PointerRepresentation());
+ Variable var_count(this, MachineType::PointerRepresentation(), count);
Variable* loop_vars[] = {&var_count, var_entry};
Label loop(this, 2, loop_vars);
- var_count.Bind(count);
var_entry->Bind(entry);
Goto(&loop);
Bind(&loop);
@@ -4617,27 +4620,23 @@ void CodeStubAssembler::InsertEntry<NameDictionary>(Node* dictionary,
Node* enum_index) {
// Store name and value.
StoreFixedArrayElement(dictionary, index, name);
- const int kNameToValueOffset =
- (NameDictionary::kEntryValueIndex - NameDictionary::kEntryKeyIndex) *
- kPointerSize;
- StoreFixedArrayElement(dictionary, index, value, UPDATE_WRITE_BARRIER,
- kNameToValueOffset);
+ StoreValueByKeyIndex<NameDictionary>(dictionary, index, value);
// Prepare details of the new property.
- Variable var_details(this, MachineRepresentation::kTaggedSigned);
const int kInitialIndex = 0;
PropertyDetails d(kData, NONE, kInitialIndex, PropertyCellType::kNoCell);
enum_index =
SmiShl(enum_index, PropertyDetails::DictionaryStorageField::kShift);
STATIC_ASSERT(kInitialIndex == 0);
- var_details.Bind(SmiOr(SmiConstant(d.AsSmi()), enum_index));
+ Variable var_details(this, MachineRepresentation::kTaggedSigned,
+ SmiOr(SmiConstant(d.AsSmi()), enum_index));
// Private names must be marked non-enumerable.
Label not_private(this, &var_details);
- GotoUnless(IsSymbolMap(LoadMap(name)), &not_private);
+ GotoIfNot(IsSymbolMap(LoadMap(name)), &not_private);
Node* flags = SmiToWord32(LoadObjectField(name, Symbol::kFlagsOffset));
const int kPrivateMask = 1 << Symbol::kPrivateBit;
- GotoUnless(IsSetWord32(flags, kPrivateMask), &not_private);
+ GotoIfNot(IsSetWord32(flags, kPrivateMask), &not_private);
Node* dont_enum =
SmiShl(SmiConstant(DONT_ENUM), PropertyDetails::AttributesField::kShift);
var_details.Bind(SmiOr(var_details.value(), dont_enum));
@@ -4645,11 +4644,8 @@ void CodeStubAssembler::InsertEntry<NameDictionary>(Node* dictionary,
Bind(&not_private);
// Finally, store the details.
- const int kNameToDetailsOffset =
- (NameDictionary::kEntryDetailsIndex - NameDictionary::kEntryKeyIndex) *
- kPointerSize;
- StoreFixedArrayElement(dictionary, index, var_details.value(),
- SKIP_WRITE_BARRIER, kNameToDetailsOffset);
+ StoreDetailsByKeyIndex<NameDictionary>(dictionary, index,
+ var_details.value());
}
template <>
@@ -4707,22 +4703,154 @@ void CodeStubAssembler::DescriptorLookupLinear(Node* unique_name,
Label* if_found,
Variable* var_name_index,
Label* if_not_found) {
+ Comment("DescriptorLookupLinear");
Node* first_inclusive = IntPtrConstant(DescriptorArray::ToKeyIndex(0));
- Node* factor = IntPtrConstant(DescriptorArray::kDescriptorSize);
+ Node* factor = IntPtrConstant(DescriptorArray::kEntrySize);
Node* last_exclusive = IntPtrAdd(first_inclusive, IntPtrMul(nof, factor));
- BuildFastLoop(
- MachineType::PointerRepresentation(), last_exclusive, first_inclusive,
- [this, descriptors, unique_name, if_found,
- var_name_index](Node* name_index) {
- Node* candidate_name = LoadFixedArrayElement(descriptors, name_index);
- var_name_index->Bind(name_index);
- GotoIf(WordEqual(candidate_name, unique_name), if_found);
- },
- -DescriptorArray::kDescriptorSize, IndexAdvanceMode::kPre);
+ BuildFastLoop(last_exclusive, first_inclusive,
+ [this, descriptors, unique_name, if_found,
+ var_name_index](Node* name_index) {
+ Node* candidate_name =
+ LoadFixedArrayElement(descriptors, name_index);
+ var_name_index->Bind(name_index);
+ GotoIf(WordEqual(candidate_name, unique_name), if_found);
+ },
+ -DescriptorArray::kEntrySize, INTPTR_PARAMETERS,
+ IndexAdvanceMode::kPre);
Goto(if_not_found);
}
+Node* CodeStubAssembler::DescriptorArrayNumberOfEntries(Node* descriptors) {
+ return LoadAndUntagToWord32FixedArrayElement(
+ descriptors, IntPtrConstant(DescriptorArray::kDescriptorLengthIndex));
+}
+
+namespace {
+
+Node* DescriptorNumberToIndex(CodeStubAssembler* a, Node* descriptor_number) {
+ Node* descriptor_size = a->Int32Constant(DescriptorArray::kEntrySize);
+ Node* index = a->Int32Mul(descriptor_number, descriptor_size);
+ return a->ChangeInt32ToIntPtr(index);
+}
+
+} // namespace
+
+Node* CodeStubAssembler::DescriptorArrayToKeyIndex(Node* descriptor_number) {
+ return IntPtrAdd(IntPtrConstant(DescriptorArray::ToKeyIndex(0)),
+ DescriptorNumberToIndex(this, descriptor_number));
+}
+
+Node* CodeStubAssembler::DescriptorArrayGetSortedKeyIndex(
+ Node* descriptors, Node* descriptor_number) {
+ const int details_offset = DescriptorArray::ToDetailsIndex(0) * kPointerSize;
+ Node* details = LoadAndUntagToWord32FixedArrayElement(
+ descriptors, DescriptorNumberToIndex(this, descriptor_number),
+ details_offset);
+ return DecodeWord32<PropertyDetails::DescriptorPointer>(details);
+}
+
+Node* CodeStubAssembler::DescriptorArrayGetKey(Node* descriptors,
+ Node* descriptor_number) {
+ const int key_offset = DescriptorArray::ToKeyIndex(0) * kPointerSize;
+ return LoadFixedArrayElement(descriptors,
+ DescriptorNumberToIndex(this, descriptor_number),
+ key_offset);
+}
+
+void CodeStubAssembler::DescriptorLookupBinary(Node* unique_name,
+ Node* descriptors, Node* nof,
+ Label* if_found,
+ Variable* var_name_index,
+ Label* if_not_found) {
+ Comment("DescriptorLookupBinary");
+ Variable var_low(this, MachineRepresentation::kWord32, Int32Constant(0));
+ Node* limit =
+ Int32Sub(DescriptorArrayNumberOfEntries(descriptors), Int32Constant(1));
+ Variable var_high(this, MachineRepresentation::kWord32, limit);
+ Node* hash = LoadNameHashField(unique_name);
+ CSA_ASSERT(this, Word32NotEqual(hash, Int32Constant(0)));
+
+ // Assume non-empty array.
+ CSA_ASSERT(this, Uint32LessThanOrEqual(var_low.value(), var_high.value()));
+
+ Variable* loop_vars[] = {&var_high, &var_low};
+ Label binary_loop(this, 2, loop_vars);
+ Goto(&binary_loop);
+ Bind(&binary_loop);
+ {
+ // mid = low + (high - low) / 2 (to avoid overflow in "(low + high) / 2").
+ Node* mid =
+ Int32Add(var_low.value(),
+ Word32Shr(Int32Sub(var_high.value(), var_low.value()), 1));
+ // mid_name = descriptors->GetSortedKey(mid).
+ Node* sorted_key_index = DescriptorArrayGetSortedKeyIndex(descriptors, mid);
+ Node* mid_name = DescriptorArrayGetKey(descriptors, sorted_key_index);
+
+ Node* mid_hash = LoadNameHashField(mid_name);
+
+ Label mid_greater(this), mid_less(this), merge(this);
+ Branch(Uint32GreaterThanOrEqual(mid_hash, hash), &mid_greater, &mid_less);
+ Bind(&mid_greater);
+ {
+ var_high.Bind(mid);
+ Goto(&merge);
+ }
+ Bind(&mid_less);
+ {
+ var_low.Bind(Int32Add(mid, Int32Constant(1)));
+ Goto(&merge);
+ }
+ Bind(&merge);
+ GotoIf(Word32NotEqual(var_low.value(), var_high.value()), &binary_loop);
+ }
+
+ Label scan_loop(this, &var_low);
+ Goto(&scan_loop);
+ Bind(&scan_loop);
+ {
+ GotoIf(Int32GreaterThan(var_low.value(), limit), if_not_found);
+
+ Node* sort_index =
+ DescriptorArrayGetSortedKeyIndex(descriptors, var_low.value());
+ Node* current_name = DescriptorArrayGetKey(descriptors, sort_index);
+ Node* current_hash = LoadNameHashField(current_name);
+ GotoIf(Word32NotEqual(current_hash, hash), if_not_found);
+ Label next(this);
+ GotoIf(WordNotEqual(current_name, unique_name), &next);
+ GotoIf(Int32GreaterThanOrEqual(sort_index, nof), if_not_found);
+ var_name_index->Bind(DescriptorArrayToKeyIndex(sort_index));
+ Goto(if_found);
+
+ Bind(&next);
+ var_low.Bind(Int32Add(var_low.value(), Int32Constant(1)));
+ Goto(&scan_loop);
+ }
+}
+
+void CodeStubAssembler::DescriptorLookup(Node* unique_name, Node* descriptors,
+ Node* bitfield3, Label* if_found,
+ Variable* var_name_index,
+ Label* if_not_found) {
+ Comment("DescriptorArrayLookup");
+ Node* nof = DecodeWord32<Map::NumberOfOwnDescriptorsBits>(bitfield3);
+ GotoIf(Word32Equal(nof, Int32Constant(0)), if_not_found);
+ Label linear_search(this), binary_search(this);
+ const int kMaxElementsForLinearSearch = 32;
+ Branch(Int32LessThanOrEqual(nof, Int32Constant(kMaxElementsForLinearSearch)),
+ &linear_search, &binary_search);
+ Bind(&linear_search);
+ {
+ DescriptorLookupLinear(unique_name, descriptors, ChangeInt32ToIntPtr(nof),
+ if_found, var_name_index, if_not_found);
+ }
+ Bind(&binary_search);
+ {
+ DescriptorLookupBinary(unique_name, descriptors, nof, if_found,
+ var_name_index, if_not_found);
+ }
+}
+
void CodeStubAssembler::TryLookupProperty(
Node* object, Node* map, Node* instance_type, Node* unique_name,
Label* if_found_fast, Label* if_found_dict, Label* if_found_global,
@@ -4748,20 +4876,11 @@ void CodeStubAssembler::TryLookupProperty(
&if_isfastmap);
Bind(&if_isfastmap);
{
- Comment("DescriptorArrayLookup");
- Node* nof =
- DecodeWordFromWord32<Map::NumberOfOwnDescriptorsBits>(bit_field3);
- // Bail out to the runtime for large numbers of own descriptors. The stub
- // only does linear search, which becomes too expensive in that case.
- {
- static const int32_t kMaxLinear = 210;
- GotoIf(UintPtrGreaterThan(nof, IntPtrConstant(kMaxLinear)), if_bailout);
- }
Node* descriptors = LoadMapDescriptors(map);
var_meta_storage->Bind(descriptors);
- DescriptorLookupLinear(unique_name, descriptors, nof, if_found_fast,
- var_name_index, if_not_found);
+ DescriptorLookup(unique_name, descriptors, bit_field3, if_found_fast,
+ var_name_index, if_not_found);
}
Bind(&if_isslowmap);
{
@@ -4774,8 +4893,8 @@ void CodeStubAssembler::TryLookupProperty(
Bind(&if_objectisspecial);
{
// Handle global object here and other special objects in runtime.
- GotoUnless(Word32Equal(instance_type, Int32Constant(JS_GLOBAL_OBJECT_TYPE)),
- if_bailout);
+ GotoIfNot(Word32Equal(instance_type, Int32Constant(JS_GLOBAL_OBJECT_TYPE)),
+ if_bailout);
// Handle interceptors and access checks in runtime.
Node* bit_field = LoadMapBitField(map);
@@ -4826,15 +4945,8 @@ void CodeStubAssembler::LoadPropertyFromFastObject(Node* object, Node* map,
DCHECK_EQ(MachineRepresentation::kTagged, var_value->rep());
Comment("[ LoadPropertyFromFastObject");
- const int name_to_details_offset =
- (DescriptorArray::kDescriptorDetails - DescriptorArray::kDescriptorKey) *
- kPointerSize;
- const int name_to_value_offset =
- (DescriptorArray::kDescriptorValue - DescriptorArray::kDescriptorKey) *
- kPointerSize;
-
- Node* details = LoadAndUntagToWord32FixedArrayElement(descriptors, name_index,
- name_to_details_offset);
+ Node* details =
+ LoadDetailsByKeyIndex<DescriptorArray>(descriptors, name_index);
var_details->Bind(details);
Node* location = DecodeWord32<PropertyDetails::LocationField>(details);
@@ -4917,9 +5029,8 @@ void CodeStubAssembler::LoadPropertyFromFastObject(Node* object, Node* map,
}
Bind(&if_in_descriptor);
{
- Node* value =
- LoadFixedArrayElement(descriptors, name_index, name_to_value_offset);
- var_value->Bind(value);
+ var_value->Bind(
+ LoadValueByKeyIndex<DescriptorArray>(descriptors, name_index));
Goto(&done);
}
Bind(&done);
@@ -4933,19 +5044,10 @@ void CodeStubAssembler::LoadPropertyFromNameDictionary(Node* dictionary,
Variable* var_value) {
Comment("LoadPropertyFromNameDictionary");
CSA_ASSERT(this, IsDictionary(dictionary));
- const int name_to_details_offset =
- (NameDictionary::kEntryDetailsIndex - NameDictionary::kEntryKeyIndex) *
- kPointerSize;
- const int name_to_value_offset =
- (NameDictionary::kEntryValueIndex - NameDictionary::kEntryKeyIndex) *
- kPointerSize;
-
- Node* details = LoadAndUntagToWord32FixedArrayElement(dictionary, name_index,
- name_to_details_offset);
- var_details->Bind(details);
- var_value->Bind(
- LoadFixedArrayElement(dictionary, name_index, name_to_value_offset));
+ var_details->Bind(
+ LoadDetailsByKeyIndex<NameDictionary>(dictionary, name_index));
+ var_value->Bind(LoadValueByKeyIndex<NameDictionary>(dictionary, name_index));
Comment("] LoadPropertyFromNameDictionary");
}
@@ -4958,12 +5060,8 @@ void CodeStubAssembler::LoadPropertyFromGlobalDictionary(Node* dictionary,
Comment("[ LoadPropertyFromGlobalDictionary");
CSA_ASSERT(this, IsDictionary(dictionary));
- const int name_to_value_offset =
- (GlobalDictionary::kEntryValueIndex - GlobalDictionary::kEntryKeyIndex) *
- kPointerSize;
-
Node* property_cell =
- LoadFixedArrayElement(dictionary, name_index, name_to_value_offset);
+ LoadValueByKeyIndex<GlobalDictionary>(dictionary, name_index);
Node* value = LoadObjectField(property_cell, PropertyCell::kValueOffset);
GotoIf(WordEqual(value, TheHoleConstant()), if_deleted);
@@ -4983,8 +5081,7 @@ void CodeStubAssembler::LoadPropertyFromGlobalDictionary(Node* dictionary,
Node* CodeStubAssembler::CallGetterIfAccessor(Node* value, Node* details,
Node* context, Node* receiver,
Label* if_bailout) {
- Variable var_value(this, MachineRepresentation::kTagged);
- var_value.Bind(value);
+ Variable var_value(this, MachineRepresentation::kTagged, value);
Label done(this);
Node* kind = DecodeWord32<PropertyDetails::KindField>(details);
@@ -5007,7 +5104,7 @@ Node* CodeStubAssembler::CallGetterIfAccessor(Node* value, Node* details,
// Return undefined if the {getter} is not callable.
var_value.Bind(UndefinedConstant());
- GotoUnless(IsCallableMap(getter_map), &done);
+ GotoIfNot(IsCallableMap(getter_map), &done);
// Call the accessor.
Callable callable = CodeFactory::Call(isolate());
@@ -5123,7 +5220,7 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
Node* elements = LoadElements(object);
Node* length = LoadAndUntagFixedArrayBaseLength(elements);
- GotoUnless(UintPtrLessThan(intptr_index, length), &if_oob);
+ GotoIfNot(UintPtrLessThan(intptr_index, length), &if_oob);
Node* element = LoadFixedArrayElement(elements, intptr_index);
Node* the_hole = TheHoleConstant();
@@ -5134,7 +5231,7 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
Node* elements = LoadElements(object);
Node* length = LoadAndUntagFixedArrayBaseLength(elements);
- GotoUnless(UintPtrLessThan(intptr_index, length), &if_oob);
+ GotoIfNot(UintPtrLessThan(intptr_index, length), &if_oob);
// Check if the element is a double hole, but don't load it.
LoadFixedDoubleArrayElement(elements, intptr_index, MachineType::None(), 0,
@@ -5206,22 +5303,22 @@ void CodeStubAssembler::TryPrototypeChainLookup(
}
Variable var_index(this, MachineType::PointerRepresentation());
+ Variable var_unique(this, MachineRepresentation::kTagged);
Label if_keyisindex(this), if_iskeyunique(this);
- TryToName(key, &if_keyisindex, &var_index, &if_iskeyunique, if_bailout);
+ TryToName(key, &if_keyisindex, &var_index, &if_iskeyunique, &var_unique,
+ if_bailout);
Bind(&if_iskeyunique);
{
- Variable var_holder(this, MachineRepresentation::kTagged);
- Variable var_holder_map(this, MachineRepresentation::kTagged);
- Variable var_holder_instance_type(this, MachineRepresentation::kWord32);
+ Variable var_holder(this, MachineRepresentation::kTagged, receiver);
+ Variable var_holder_map(this, MachineRepresentation::kTagged, map);
+ Variable var_holder_instance_type(this, MachineRepresentation::kWord32,
+ instance_type);
Variable* merged_variables[] = {&var_holder, &var_holder_map,
&var_holder_instance_type};
Label loop(this, arraysize(merged_variables), merged_variables);
- var_holder.Bind(receiver);
- var_holder_map.Bind(map);
- var_holder_instance_type.Bind(instance_type);
Goto(&loop);
Bind(&loop);
{
@@ -5230,8 +5327,8 @@ void CodeStubAssembler::TryPrototypeChainLookup(
Label next_proto(this);
lookup_property_in_holder(receiver, var_holder.value(), holder_map,
- holder_instance_type, key, &next_proto,
- if_bailout);
+ holder_instance_type, var_unique.value(),
+ &next_proto, if_bailout);
Bind(&next_proto);
// Bailout if it can be an integer indexed exotic case.
@@ -5256,16 +5353,14 @@ void CodeStubAssembler::TryPrototypeChainLookup(
}
Bind(&if_keyisindex);
{
- Variable var_holder(this, MachineRepresentation::kTagged);
- Variable var_holder_map(this, MachineRepresentation::kTagged);
- Variable var_holder_instance_type(this, MachineRepresentation::kWord32);
+ Variable var_holder(this, MachineRepresentation::kTagged, receiver);
+ Variable var_holder_map(this, MachineRepresentation::kTagged, map);
+ Variable var_holder_instance_type(this, MachineRepresentation::kWord32,
+ instance_type);
Variable* merged_variables[] = {&var_holder, &var_holder_map,
&var_holder_instance_type};
Label loop(this, arraysize(merged_variables), merged_variables);
- var_holder.Bind(receiver);
- var_holder_map.Bind(map);
- var_holder_instance_type.Bind(instance_type);
Goto(&loop);
Bind(&loop);
{
@@ -5313,10 +5408,10 @@ Node* CodeStubAssembler::OrdinaryHasInstance(Node* context, Node* callable,
Node* instanceof_cache_map = LoadRoot(Heap::kInstanceofCacheMapRootIndex);
{
Label instanceof_cache_miss(this);
- GotoUnless(WordEqual(instanceof_cache_function, callable),
- &instanceof_cache_miss);
- GotoUnless(WordEqual(instanceof_cache_map, object_map),
- &instanceof_cache_miss);
+ GotoIfNot(WordEqual(instanceof_cache_function, callable),
+ &instanceof_cache_miss);
+ GotoIfNot(WordEqual(instanceof_cache_map, object_map),
+ &instanceof_cache_miss);
var_result.Bind(LoadRoot(Heap::kInstanceofCacheAnswerRootIndex));
Goto(&return_result);
Bind(&instanceof_cache_miss);
@@ -5330,14 +5425,14 @@ Node* CodeStubAssembler::OrdinaryHasInstance(Node* context, Node* callable,
// Goto runtime if {callable} is not a JSFunction.
Node* callable_instance_type = LoadMapInstanceType(callable_map);
- GotoUnless(
+ GotoIfNot(
Word32Equal(callable_instance_type, Int32Constant(JS_FUNCTION_TYPE)),
&return_runtime);
// Goto runtime if {callable} is not a constructor or has
// a non-instance "prototype".
Node* callable_bitfield = LoadMapBitField(callable_map);
- GotoUnless(
+ GotoIfNot(
Word32Equal(Word32And(callable_bitfield,
Int32Constant((1 << Map::kHasNonInstancePrototype) |
(1 << Map::kIsConstructor))),
@@ -5348,9 +5443,9 @@ Node* CodeStubAssembler::OrdinaryHasInstance(Node* context, Node* callable,
Node* callable_prototype =
LoadObjectField(callable, JSFunction::kPrototypeOrInitialMapOffset);
{
- Variable var_callable_prototype(this, MachineRepresentation::kTagged);
Label callable_prototype_valid(this);
- var_callable_prototype.Bind(callable_prototype);
+ Variable var_callable_prototype(this, MachineRepresentation::kTagged,
+ callable_prototype);
// Resolve the "prototype" if the {callable} has an initial map. Afterwards
// the {callable_prototype} will be either the JSReceiver prototype object
@@ -5358,7 +5453,7 @@ Node* CodeStubAssembler::OrdinaryHasInstance(Node* context, Node* callable,
// created so far and hence we should return false.
Node* callable_prototype_instance_type =
LoadInstanceType(callable_prototype);
- GotoUnless(
+ GotoIfNot(
Word32Equal(callable_prototype_instance_type, Int32Constant(MAP_TYPE)),
&callable_prototype_valid);
var_callable_prototype.Bind(
@@ -5374,8 +5469,7 @@ Node* CodeStubAssembler::OrdinaryHasInstance(Node* context, Node* callable,
StoreRoot(Heap::kInstanceofCacheMapRootIndex, object_map);
// Loop through the prototype chain looking for the {callable} prototype.
- Variable var_object_map(this, MachineRepresentation::kTagged);
- var_object_map.Bind(object_map);
+ Variable var_object_map(this, MachineRepresentation::kTagged, object_map);
Label loop(this, &var_object_map);
Goto(&loop);
Bind(&loop);
@@ -5384,7 +5478,7 @@ Node* CodeStubAssembler::OrdinaryHasInstance(Node* context, Node* callable,
// Check if the current {object} needs to be access checked.
Node* object_bitfield = LoadMapBitField(object_map);
- GotoUnless(
+ GotoIfNot(
Word32Equal(Word32And(object_bitfield,
Int32Constant(1 << Map::kIsAccessCheckNeeded)),
Int32Constant(0)),
@@ -5464,8 +5558,8 @@ Node* CodeStubAssembler::ElementOffsetFromIndex(Node* index_node,
Node* CodeStubAssembler::LoadFeedbackVectorForStub() {
Node* function =
LoadFromParentFrame(JavaScriptFrameConstants::kFunctionOffset);
- Node* literals = LoadObjectField(function, JSFunction::kLiteralsOffset);
- return LoadObjectField(literals, LiteralsArray::kFeedbackVectorOffset);
+ Node* cell = LoadObjectField(function, JSFunction::kFeedbackVectorOffset);
+ return LoadObjectField(cell, Cell::kValueOffset);
}
void CodeStubAssembler::UpdateFeedback(Node* feedback, Node* feedback_vector,
@@ -5504,11 +5598,11 @@ Node* CodeStubAssembler::TryToIntptr(Node* key, Label* miss) {
Label done(this, &var_intptr_key), key_is_smi(this);
GotoIf(TaggedIsSmi(key), &key_is_smi);
// Try to convert a heap number to a Smi.
- GotoUnless(IsHeapNumberMap(LoadMap(key)), miss);
+ GotoIfNot(IsHeapNumberMap(LoadMap(key)), miss);
{
Node* value = LoadHeapNumberValue(key);
Node* int_value = RoundFloat64ToInt32(value);
- GotoUnless(Float64Equal(value, ChangeInt32ToFloat64(int_value)), miss);
+ GotoIfNot(Float64Equal(value, ChangeInt32ToFloat64(int_value)), miss);
var_intptr_key.Bind(ChangeInt32ToIntPtr(int_value));
Goto(&done);
}
@@ -5553,7 +5647,7 @@ Node* CodeStubAssembler::EmitKeyedSloppyArguments(Node* receiver, Node* key,
bool is_load = value == nullptr;
- GotoUnless(TaggedIsSmi(key), bailout);
+ GotoIfNot(TaggedIsSmi(key), bailout);
key = SmiUntag(key);
GotoIf(IntPtrLessThan(key, IntPtrConstant(0)), bailout);
@@ -5686,8 +5780,7 @@ Node* CodeStubAssembler::Int32ToUint8Clamped(Node* int32_value) {
Label done(this);
Node* int32_zero = Int32Constant(0);
Node* int32_255 = Int32Constant(255);
- Variable var_value(this, MachineRepresentation::kWord32);
- var_value.Bind(int32_value);
+ Variable var_value(this, MachineRepresentation::kWord32, int32_value);
GotoIf(Uint32LessThanOrEqual(int32_value, int32_255), &done);
var_value.Bind(int32_zero);
GotoIf(Int32LessThan(int32_value, int32_zero), &done);
@@ -5699,8 +5792,7 @@ Node* CodeStubAssembler::Int32ToUint8Clamped(Node* int32_value) {
Node* CodeStubAssembler::Float64ToUint8Clamped(Node* float64_value) {
Label done(this);
- Variable var_value(this, MachineRepresentation::kWord32);
- var_value.Bind(Int32Constant(0));
+ Variable var_value(this, MachineRepresentation::kWord32, Int32Constant(0));
GotoIf(Float64LessThanOrEqual(float64_value, Float64Constant(0.0)), &done);
var_value.Bind(Int32Constant(255));
GotoIf(Float64LessThanOrEqual(Float64Constant(255.0), float64_value), &done);
@@ -5743,7 +5835,7 @@ Node* CodeStubAssembler::PrepareValueForWriteToTypedArray(
Label done(this, &var_result), if_smi(this);
GotoIf(TaggedIsSmi(input), &if_smi);
// Try to convert a heap number to a Smi.
- GotoUnless(IsHeapNumberMap(LoadMap(input)), bailout);
+ GotoIfNot(IsHeapNumberMap(LoadMap(input)), bailout);
{
Node* value = LoadHeapNumberValue(input);
if (rep == MachineRepresentation::kWord32) {
@@ -5821,12 +5913,12 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
if (store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
// Skip the store if we write beyond the length.
- GotoUnless(IntPtrLessThan(key, length), &done);
+ GotoIfNot(IntPtrLessThan(key, length), &done);
// ... but bailout if the key is negative.
} else {
DCHECK_EQ(STANDARD_STORE, store_mode);
}
- GotoUnless(UintPtrLessThan(key, length), bailout);
+ GotoIfNot(UintPtrLessThan(key, length), bailout);
// Backing store = external_pointer + base_pointer.
Node* external_pointer =
@@ -5853,7 +5945,7 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
// a smi before manipulating the backing store. Otherwise the backing store
// may be left in an invalid state.
if (IsFastSmiElementsKind(elements_kind)) {
- GotoUnless(TaggedIsSmi(value), bailout);
+ GotoIfNot(TaggedIsSmi(value), bailout);
} else if (IsFastDoubleElementsKind(elements_kind)) {
value = TryTaggedToFloat64(value, bailout);
}
@@ -5862,7 +5954,7 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
elements = CheckForCapacityGrow(object, elements, elements_kind, length,
key, parameter_mode, is_jsarray, bailout);
} else {
- GotoUnless(UintPtrLessThan(key, length), bailout);
+ GotoIfNot(UintPtrLessThan(key, length), bailout);
if ((store_mode == STORE_NO_TRANSITION_HANDLE_COW) &&
IsFastSmiOrObjectElementsKind(elements_kind)) {
@@ -5917,7 +6009,7 @@ Node* CodeStubAssembler::CheckForCapacityGrow(Node* object, Node* elements,
Bind(&no_grow_case);
{
- GotoUnless(UintPtrLessThan(key, length), bailout);
+ GotoIfNot(UintPtrLessThan(key, length), bailout);
checked_elements.Bind(elements);
Goto(&done);
}
@@ -5930,11 +6022,10 @@ Node* CodeStubAssembler::CopyElementsOnWrite(Node* object, Node* elements,
ElementsKind kind, Node* length,
ParameterMode mode,
Label* bailout) {
- Variable new_elements_var(this, MachineRepresentation::kTagged);
+ Variable new_elements_var(this, MachineRepresentation::kTagged, elements);
Label done(this);
- new_elements_var.Bind(elements);
- GotoUnless(
+ GotoIfNot(
WordEqual(LoadMap(elements), LoadRoot(Heap::kFixedCOWArrayMapRootIndex)),
&done);
{
@@ -6062,11 +6153,10 @@ Node* CodeStubAssembler::EnumLength(Node* map) {
void CodeStubAssembler::CheckEnumCache(Node* receiver, Label* use_cache,
Label* use_runtime) {
- Variable current_js_object(this, MachineRepresentation::kTagged);
- current_js_object.Bind(receiver);
+ Variable current_js_object(this, MachineRepresentation::kTagged, receiver);
- Variable current_map(this, MachineRepresentation::kTagged);
- current_map.Bind(LoadMap(current_js_object.value()));
+ Variable current_map(this, MachineRepresentation::kTagged,
+ LoadMap(current_js_object.value()));
// These variables are updated in the loop below.
Variable* loop_vars[2] = {&current_js_object, &current_map};
@@ -6184,14 +6274,16 @@ Node* CodeStubAssembler::CreateWeakCellInFeedbackVector(Node* feedback_vector,
return cell;
}
-void CodeStubAssembler::BuildFastLoop(
- const CodeStubAssembler::VariableList& vars,
- MachineRepresentation index_rep, Node* start_index, Node* end_index,
- const FastLoopBody& body, int increment, IndexAdvanceMode mode) {
- Variable var(this, index_rep);
+Node* CodeStubAssembler::BuildFastLoop(
+ const CodeStubAssembler::VariableList& vars, Node* start_index,
+ Node* end_index, const FastLoopBody& body, int increment,
+ ParameterMode parameter_mode, IndexAdvanceMode advance_mode) {
+ MachineRepresentation index_rep = (parameter_mode == INTPTR_PARAMETERS)
+ ? MachineType::PointerRepresentation()
+ : MachineRepresentation::kTaggedSigned;
+ Variable var(this, index_rep, start_index);
VariableList vars_copy(vars, zone());
vars_copy.Add(&var, zone());
- var.Bind(start_index);
Label loop(this, vars_copy);
Label after_loop(this);
// Introduce an explicit second check of the termination condition before the
@@ -6204,20 +6296,22 @@ void CodeStubAssembler::BuildFastLoop(
Branch(WordEqual(var.value(), end_index), &after_loop, &loop);
Bind(&loop);
{
- if (mode == IndexAdvanceMode::kPre) {
- Increment(var, increment);
+ if (advance_mode == IndexAdvanceMode::kPre) {
+ Increment(var, increment, parameter_mode);
}
body(var.value());
- if (mode == IndexAdvanceMode::kPost) {
- Increment(var, increment);
+ if (advance_mode == IndexAdvanceMode::kPost) {
+ Increment(var, increment, parameter_mode);
}
Branch(WordNotEqual(var.value(), end_index), &loop, &after_loop);
}
Bind(&after_loop);
+ return var.value();
}
void CodeStubAssembler::BuildFastFixedArrayForEach(
- Node* fixed_array, ElementsKind kind, Node* first_element_inclusive,
+ const CodeStubAssembler::VariableList& vars, Node* fixed_array,
+ ElementsKind kind, Node* first_element_inclusive,
Node* last_element_exclusive, const FastFixedArrayForEachBody& body,
ParameterMode mode, ForEachDirection direction) {
STATIC_ASSERT(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize);
@@ -6260,25 +6354,37 @@ void CodeStubAssembler::BuildFastFixedArrayForEach(
int increment = IsFastDoubleElementsKind(kind) ? kDoubleSize : kPointerSize;
BuildFastLoop(
- MachineType::PointerRepresentation(), start, limit,
+ vars, start, limit,
[fixed_array, &body](Node* offset) { body(fixed_array, offset); },
direction == ForEachDirection::kReverse ? -increment : increment,
+ INTPTR_PARAMETERS,
direction == ForEachDirection::kReverse ? IndexAdvanceMode::kPre
: IndexAdvanceMode::kPost);
}
+void CodeStubAssembler::GotoIfFixedArraySizeDoesntFitInNewSpace(
+ Node* element_count, Label* doesnt_fit, int base_size, ParameterMode mode) {
+ int max_newspace_parameters =
+ (kMaxRegularHeapObjectSize - base_size) / kPointerSize;
+ GotoIf(IntPtrOrSmiGreaterThan(
+ element_count, IntPtrOrSmiConstant(max_newspace_parameters, mode),
+ mode),
+ doesnt_fit);
+}
+
void CodeStubAssembler::InitializeFieldsWithRoot(
Node* object, Node* start_offset, Node* end_offset,
Heap::RootListIndex root_index) {
start_offset = IntPtrAdd(start_offset, IntPtrConstant(-kHeapObjectTag));
end_offset = IntPtrAdd(end_offset, IntPtrConstant(-kHeapObjectTag));
Node* root_value = LoadRoot(root_index);
- BuildFastLoop(MachineType::PointerRepresentation(), end_offset, start_offset,
+ BuildFastLoop(end_offset, start_offset,
[this, object, root_value](Node* current) {
StoreNoWriteBarrier(MachineRepresentation::kTagged, object,
current, root_value);
},
- -kPointerSize, CodeStubAssembler::IndexAdvanceMode::kPre);
+ -kPointerSize, INTPTR_PARAMETERS,
+ CodeStubAssembler::IndexAdvanceMode::kPre);
}
void CodeStubAssembler::BranchIfNumericRelationalComparison(
@@ -6405,12 +6511,10 @@ Node* CodeStubAssembler::RelationalComparison(RelationalComparisonMode mode,
// We might need to loop several times due to ToPrimitive and/or ToNumber
// conversions.
- Variable var_lhs(this, MachineRepresentation::kTagged),
- var_rhs(this, MachineRepresentation::kTagged);
+ Variable var_lhs(this, MachineRepresentation::kTagged, lhs),
+ var_rhs(this, MachineRepresentation::kTagged, rhs);
Variable* loop_vars[2] = {&var_lhs, &var_rhs};
Label loop(this, 2, loop_vars);
- var_lhs.Bind(lhs);
- var_rhs.Bind(rhs);
Goto(&loop);
Bind(&loop);
{
@@ -6716,8 +6820,6 @@ void GenerateEqual_Same(CodeStubAssembler* assembler, Node* value,
// In case of abstract or strict equality checks, we need additional checks
// for NaN values because they are not considered equal, even if both the
// left and the right hand side reference exactly the same value.
- // TODO(bmeurer): This seems to violate the SIMD.js specification, but it
- // seems to be what is tested in the current SIMD.js testsuite.
typedef CodeStubAssembler::Label Label;
@@ -6752,15 +6854,6 @@ void GenerateEqual_Same(CodeStubAssembler* assembler, Node* value,
assembler->Bind(&if_valueissmi);
assembler->Goto(if_equal);
}
-
-void GenerateEqual_Simd128Value_HeapObject(
- CodeStubAssembler* assembler, Node* lhs, Node* lhs_map, Node* rhs,
- Node* rhs_map, CodeStubAssembler::Label* if_equal,
- CodeStubAssembler::Label* if_notequal) {
- assembler->BranchIfSimd128Equal(lhs, lhs_map, rhs, rhs_map, if_equal,
- if_notequal);
-}
-
} // namespace
// ES6 section 7.2.12 Abstract Equality Comparison
@@ -6782,12 +6875,10 @@ Node* CodeStubAssembler::Equal(ResultMode mode, Node* lhs, Node* rhs,
// We might need to loop several times due to ToPrimitive and/or ToNumber
// conversions.
- Variable var_lhs(this, MachineRepresentation::kTagged),
- var_rhs(this, MachineRepresentation::kTagged);
+ Variable var_lhs(this, MachineRepresentation::kTagged, lhs),
+ var_rhs(this, MachineRepresentation::kTagged, rhs);
Variable* loop_vars[2] = {&var_lhs, &var_rhs};
Label loop(this, 2, loop_vars);
- var_lhs.Bind(lhs);
- var_rhs.Bind(rhs);
Goto(&loop);
Bind(&loop);
{
@@ -6920,8 +7011,8 @@ Node* CodeStubAssembler::Equal(ResultMode mode, Node* lhs, Node* rhs,
Bind(&if_rhsisnotsmi);
{
Label if_lhsisstring(this), if_lhsisnumber(this),
- if_lhsissymbol(this), if_lhsissimd128value(this),
- if_lhsisoddball(this), if_lhsisreceiver(this);
+ if_lhsissymbol(this), if_lhsisoddball(this),
+ if_lhsisreceiver(this);
// Both {lhs} and {rhs} are HeapObjects, load their maps
// and their instance types.
@@ -6933,7 +7024,7 @@ Node* CodeStubAssembler::Equal(ResultMode mode, Node* lhs, Node* rhs,
Node* rhs_instance_type = LoadMapInstanceType(rhs_map);
// Dispatch based on the instance type of {lhs}.
- size_t const kNumCases = FIRST_NONSTRING_TYPE + 4;
+ size_t const kNumCases = FIRST_NONSTRING_TYPE + 3;
Label* case_labels[kNumCases];
int32_t case_values[kNumCases];
for (int32_t i = 0; i < FIRST_NONSTRING_TYPE; ++i) {
@@ -6944,10 +7035,8 @@ Node* CodeStubAssembler::Equal(ResultMode mode, Node* lhs, Node* rhs,
case_values[FIRST_NONSTRING_TYPE + 0] = HEAP_NUMBER_TYPE;
case_labels[FIRST_NONSTRING_TYPE + 1] = &if_lhsissymbol;
case_values[FIRST_NONSTRING_TYPE + 1] = SYMBOL_TYPE;
- case_labels[FIRST_NONSTRING_TYPE + 2] = &if_lhsissimd128value;
- case_values[FIRST_NONSTRING_TYPE + 2] = SIMD128_VALUE_TYPE;
- case_labels[FIRST_NONSTRING_TYPE + 3] = &if_lhsisoddball;
- case_values[FIRST_NONSTRING_TYPE + 3] = ODDBALL_TYPE;
+ case_labels[FIRST_NONSTRING_TYPE + 2] = &if_lhsisoddball;
+ case_values[FIRST_NONSTRING_TYPE + 2] = ODDBALL_TYPE;
Switch(lhs_instance_type, &if_lhsisreceiver, case_values, case_labels,
arraysize(case_values));
for (int32_t i = 0; i < FIRST_NONSTRING_TYPE; ++i) {
@@ -7131,47 +7220,6 @@ Node* CodeStubAssembler::Equal(ResultMode mode, Node* lhs, Node* rhs,
}
}
- Bind(&if_lhsissimd128value);
- {
- // Check if the {rhs} is also a Simd128Value.
- Label if_rhsissimd128value(this), if_rhsisnotsimd128value(this);
- Branch(Word32Equal(lhs_instance_type, rhs_instance_type),
- &if_rhsissimd128value, &if_rhsisnotsimd128value);
-
- Bind(&if_rhsissimd128value);
- {
- // Both {lhs} and {rhs} is a Simd128Value.
- GenerateEqual_Simd128Value_HeapObject(
- this, lhs, lhs_map, rhs, rhs_map, &if_equal, &if_notequal);
- }
-
- Bind(&if_rhsisnotsimd128value);
- {
- // Check if the {rhs} is a JSReceiver.
- Label if_rhsisreceiver(this), if_rhsisnotreceiver(this);
- STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- Branch(IsJSReceiverInstanceType(rhs_instance_type),
- &if_rhsisreceiver, &if_rhsisnotreceiver);
-
- Bind(&if_rhsisreceiver);
- {
- // The {lhs} is a Primitive and the {rhs} is a JSReceiver.
- // Swapping {lhs} and {rhs} is not observable and doesn't
- // matter for the result, so we can just swap them and use
- // the JSReceiver handling below (for {lhs} being a JSReceiver).
- var_lhs.Bind(rhs);
- var_rhs.Bind(lhs);
- Goto(&loop);
- }
-
- Bind(&if_rhsisnotreceiver);
- {
- // The {rhs} is some other Primitive.
- Goto(&if_notequal);
- }
- }
- }
-
Bind(&if_lhsisreceiver);
{
// Check if the {rhs} is also a JSReceiver.
@@ -7290,10 +7338,6 @@ Node* CodeStubAssembler::StrictEqual(ResultMode mode, Node* lhs, Node* rhs,
// } else {
// return false;
// }
- // } else if (lhs->IsSimd128()) {
- // if (rhs->IsSimd128()) {
- // return %StrictEqual(lhs, rhs);
- // }
// } else {
// return false;
// }
@@ -7327,8 +7371,8 @@ Node* CodeStubAssembler::StrictEqual(ResultMode mode, Node* lhs, Node* rhs,
Bind(&if_notsame);
{
- // The {lhs} and {rhs} reference different objects, yet for Smi, HeapNumber,
- // String and Simd128Value they can still be considered equal.
+ // The {lhs} and {rhs} reference different objects, yet for Smi, HeapNumber
+ // and String they can still be considered equal.
// Check if {lhs} is a Smi or a HeapObject.
Label if_lhsissmi(this), if_lhsisnotsmi(this);
@@ -7427,26 +7471,7 @@ Node* CodeStubAssembler::StrictEqual(ResultMode mode, Node* lhs, Node* rhs,
}
Bind(&if_lhsisnotstring);
- {
- // Check if {lhs} is a Simd128Value.
- Label if_lhsissimd128value(this), if_lhsisnotsimd128value(this);
- Branch(Word32Equal(lhs_instance_type,
- Int32Constant(SIMD128_VALUE_TYPE)),
- &if_lhsissimd128value, &if_lhsisnotsimd128value);
-
- Bind(&if_lhsissimd128value);
- {
- // Load the map of {rhs}.
- Node* rhs_map = LoadMap(rhs);
-
- // Check if {rhs} is also a Simd128Value that is equal to {lhs}.
- GenerateEqual_Simd128Value_HeapObject(
- this, lhs, lhs_map, rhs, rhs_map, &if_equal, &if_notequal);
- }
-
- Bind(&if_lhsisnotsimd128value);
- Goto(&if_notequal);
- }
+ Goto(&if_notequal);
}
}
}
@@ -7670,6 +7695,57 @@ Node* CodeStubAssembler::HasProperty(
return result.value();
}
+Node* CodeStubAssembler::ClassOf(Node* value) {
+ Variable var_result(this, MachineRepresentation::kTaggedPointer);
+ Label if_function(this, Label::kDeferred), if_object(this, Label::kDeferred),
+ if_primitive(this, Label::kDeferred), return_result(this);
+
+ // Check if {value} is a Smi.
+ GotoIf(TaggedIsSmi(value), &if_primitive);
+
+ Node* value_map = LoadMap(value);
+ Node* value_instance_type = LoadMapInstanceType(value_map);
+
+ // Check if {value} is a JSFunction or JSBoundFunction.
+ STATIC_ASSERT(LAST_TYPE == LAST_FUNCTION_TYPE);
+ GotoIf(Uint32LessThanOrEqual(Int32Constant(FIRST_FUNCTION_TYPE),
+ value_instance_type),
+ &if_function);
+
+ // Check if {value} is a primitive HeapObject.
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ GotoIf(Uint32LessThan(value_instance_type,
+ Int32Constant(FIRST_JS_RECEIVER_TYPE)),
+ &if_primitive);
+
+ // Load the {value}s constructor, and check that it's a JSFunction.
+ Node* constructor = LoadMapConstructor(value_map);
+ GotoIfNot(IsJSFunction(constructor), &if_object);
+
+ // Return the instance class name for the {constructor}.
+ Node* shared_info =
+ LoadObjectField(constructor, JSFunction::kSharedFunctionInfoOffset);
+ Node* instance_class_name = LoadObjectField(
+ shared_info, SharedFunctionInfo::kInstanceClassNameOffset);
+ var_result.Bind(instance_class_name);
+ Goto(&return_result);
+
+ Bind(&if_function);
+ var_result.Bind(LoadRoot(Heap::kFunction_stringRootIndex));
+ Goto(&return_result);
+
+ Bind(&if_object);
+ var_result.Bind(LoadRoot(Heap::kObject_stringRootIndex));
+ Goto(&return_result);
+
+ Bind(&if_primitive);
+ var_result.Bind(NullConstant());
+ Goto(&return_result);
+
+ Bind(&return_result);
+ return var_result.value();
+}
+
Node* CodeStubAssembler::Typeof(Node* value, Node* context) {
Variable result_var(this, MachineRepresentation::kTagged);
@@ -7695,20 +7771,13 @@ Node* CodeStubAssembler::Typeof(Node* value, Node* context) {
Int32Constant(1 << Map::kIsCallable)),
&return_function);
- GotoUnless(Word32Equal(callable_or_undetectable_mask, Int32Constant(0)),
- &return_undefined);
+ GotoIfNot(Word32Equal(callable_or_undetectable_mask, Int32Constant(0)),
+ &return_undefined);
GotoIf(IsJSReceiverInstanceType(instance_type), &return_object);
GotoIf(IsStringInstanceType(instance_type), &return_string);
-#define SIMD128_BRANCH(TYPE, Type, type, lane_count, lane_type) \
- Label return_##type(this); \
- Node* type##_map = HeapConstant(factory()->type##_map()); \
- GotoIf(WordEqual(map, type##_map), &return_##type);
- SIMD128_TYPES(SIMD128_BRANCH)
-#undef SIMD128_BRANCH
-
CSA_ASSERT(this, Word32Equal(instance_type, Int32Constant(SYMBOL_TYPE)));
result_var.Bind(HeapConstant(isolate()->factory()->symbol_string()));
Goto(&return_result);
@@ -7750,15 +7819,6 @@ Node* CodeStubAssembler::Typeof(Node* value, Node* context) {
Goto(&return_result);
}
-#define SIMD128_BIND_RETURN(TYPE, Type, type, lane_count, lane_type) \
- Bind(&return_##type); \
- { \
- result_var.Bind(HeapConstant(isolate()->factory()->type##_string())); \
- Goto(&return_result); \
- }
- SIMD128_TYPES(SIMD128_BIND_RETURN)
-#undef SIMD128_BIND_RETURN
-
Bind(&return_result);
return result_var.value();
}
@@ -7773,16 +7833,16 @@ Node* CodeStubAssembler::GetSuperConstructor(Node* active_function,
Node* map = LoadMap(active_function);
Node* prototype = LoadMapPrototype(map);
Node* prototype_map = LoadMap(prototype);
- GotoUnless(IsConstructorMap(prototype_map), &is_not_constructor);
+ GotoIfNot(IsConstructorMap(prototype_map), &is_not_constructor);
result.Bind(prototype);
Goto(&out);
Bind(&is_not_constructor);
{
- result.Bind(CallRuntime(Runtime::kThrowNotSuperConstructor, context,
- prototype, active_function));
- Goto(&out);
+ CallRuntime(Runtime::kThrowNotSuperConstructor, context, prototype,
+ active_function);
+ Unreachable();
}
Bind(&out);
@@ -7791,33 +7851,86 @@ Node* CodeStubAssembler::GetSuperConstructor(Node* active_function,
Node* CodeStubAssembler::InstanceOf(Node* object, Node* callable,
Node* context) {
- Label return_runtime(this, Label::kDeferred), end(this);
- Variable result(this, MachineRepresentation::kTagged);
+ Variable var_result(this, MachineRepresentation::kTagged);
+ Label if_notcallable(this, Label::kDeferred),
+ if_notreceiver(this, Label::kDeferred), if_otherhandler(this),
+ if_nohandler(this, Label::kDeferred), return_true(this),
+ return_false(this), return_result(this, &var_result);
+
+ // Ensure that the {callable} is actually a JSReceiver.
+ GotoIf(TaggedIsSmi(callable), &if_notreceiver);
+ GotoIfNot(IsJSReceiver(callable), &if_notreceiver);
+
+ // Load the @@hasInstance property from {callable}.
+ Node* inst_of_handler = CallStub(CodeFactory::GetProperty(isolate()), context,
+ callable, HasInstanceSymbolConstant());
+
+ // Optimize for the likely case where {inst_of_handler} is the builtin
+ // Function.prototype[@@hasInstance] method, and emit a direct call in
+ // that case without any additional checking.
+ Node* native_context = LoadNativeContext(context);
+ Node* function_has_instance =
+ LoadContextElement(native_context, Context::FUNCTION_HAS_INSTANCE_INDEX);
+ GotoIfNot(WordEqual(inst_of_handler, function_has_instance),
+ &if_otherhandler);
+ {
+ // Call to Function.prototype[@@hasInstance] directly.
+ Callable builtin(isolate()->builtins()->FunctionPrototypeHasInstance(),
+ CallTrampolineDescriptor(isolate()));
+ Node* result = CallJS(builtin, context, inst_of_handler, callable, object);
+ var_result.Bind(result);
+ Goto(&return_result);
+ }
- // Check if no one installed @@hasInstance somewhere.
- GotoUnless(
- WordEqual(LoadObjectField(LoadRoot(Heap::kHasInstanceProtectorRootIndex),
- PropertyCell::kValueOffset),
- SmiConstant(Smi::FromInt(Isolate::kProtectorValid))),
- &return_runtime);
+ Bind(&if_otherhandler);
+ {
+ // Check if there's actually an {inst_of_handler}.
+ GotoIf(IsNull(inst_of_handler), &if_nohandler);
+ GotoIf(IsUndefined(inst_of_handler), &if_nohandler);
- // Check if {callable} is a valid receiver.
- GotoIf(TaggedIsSmi(callable), &return_runtime);
- GotoUnless(IsCallableMap(LoadMap(callable)), &return_runtime);
+ // Call the {inst_of_handler} for {callable} and {object}.
+ Node* result = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
+ context, inst_of_handler, callable, object);
- // Use the inline OrdinaryHasInstance directly.
- result.Bind(OrdinaryHasInstance(context, callable, object));
- Goto(&end);
+ // Convert the {result} to a Boolean.
+ BranchIfToBooleanIsTrue(result, &return_true, &return_false);
+ }
- // TODO(bmeurer): Use GetPropertyStub here once available.
- Bind(&return_runtime);
+ Bind(&if_nohandler);
{
- result.Bind(CallRuntime(Runtime::kInstanceOf, context, object, callable));
- Goto(&end);
+ // Ensure that the {callable} is actually Callable.
+ GotoIfNot(IsCallable(callable), &if_notcallable);
+
+ // Use the OrdinaryHasInstance algorithm.
+ Node* result = CallStub(CodeFactory::OrdinaryHasInstance(isolate()),
+ context, callable, object);
+ var_result.Bind(result);
+ Goto(&return_result);
}
- Bind(&end);
- return result.value();
+ Bind(&if_notcallable);
+ {
+ CallRuntime(Runtime::kThrowNonCallableInInstanceOfCheck, context);
+ Unreachable();
+ }
+
+ Bind(&if_notreceiver);
+ {
+ CallRuntime(Runtime::kThrowNonObjectInInstanceOfCheck, context);
+ Unreachable();
+ }
+
+ Bind(&return_true);
+ var_result.Bind(TrueConstant());
+ Goto(&return_result);
+
+ Bind(&return_false);
+ var_result.Bind(FalseConstant());
+ Goto(&return_result);
+
+ Bind(&return_result);
+ return var_result.value();
}
Node* CodeStubAssembler::NumberInc(Node* value) {
@@ -7996,7 +8109,7 @@ Node* CodeStubAssembler::CreateArrayIterator(Node* array, Node* array_map,
// here, and take the slow path if any fail.
Node* protector_cell = LoadRoot(Heap::kArrayProtectorRootIndex);
DCHECK(isolate()->heap()->array_protector()->IsPropertyCell());
- GotoUnless(
+ GotoIfNot(
WordEqual(
LoadObjectField(protector_cell, PropertyCell::kValueOffset),
SmiConstant(Smi::FromInt(Isolate::kProtectorValid))),
@@ -8007,13 +8120,13 @@ Node* CodeStubAssembler::CreateArrayIterator(Node* array, Node* array_map,
Node* prototype = LoadMapPrototype(array_map);
Node* array_prototype = LoadContextElement(
native_context, Context::INITIAL_ARRAY_PROTOTYPE_INDEX);
- GotoUnless(WordEqual(prototype, array_prototype), &if_isslow);
+ GotoIfNot(WordEqual(prototype, array_prototype), &if_isslow);
Node* map = LoadMap(prototype);
prototype = LoadMapPrototype(map);
Node* object_prototype = LoadContextElement(
native_context, Context::INITIAL_OBJECT_PROTOTYPE_INDEX);
- GotoUnless(WordEqual(prototype, object_prototype), &if_isslow);
+ GotoIfNot(WordEqual(prototype, object_prototype), &if_isslow);
map = LoadMap(prototype);
prototype = LoadMapPrototype(map);
@@ -8100,14 +8213,16 @@ Node* CodeStubAssembler::IsDetachedBuffer(Node* buffer) {
return IsSetWord32<JSArrayBuffer::WasNeutered>(buffer_bit_field);
}
-CodeStubArguments::CodeStubArguments(CodeStubAssembler* assembler, Node* argc)
+CodeStubArguments::CodeStubArguments(CodeStubAssembler* assembler, Node* argc,
+ Node* fp,
+ CodeStubAssembler::ParameterMode mode)
: assembler_(assembler),
+ argc_mode_(mode),
argc_(argc),
arguments_(nullptr),
- fp_(assembler->LoadFramePointer()) {
- argc_ = assembler->ChangeUint32ToWord(argc_);
+ fp_(fp != nullptr ? fp : assembler->LoadFramePointer()) {
Node* offset = assembler->ElementOffsetFromIndex(
- argc_, FAST_ELEMENTS, CodeStubAssembler::INTPTR_PARAMETERS,
+ argc_, FAST_ELEMENTS, mode,
(StandardFrameConstants::kFixedSlotCountAboveFp - 1) * kPointerSize);
arguments_ = assembler_->IntPtrAdd(fp_, offset);
}
@@ -8117,19 +8232,22 @@ Node* CodeStubArguments::GetReceiver() const {
assembler_->IntPtrConstant(kPointerSize));
}
-Node* CodeStubArguments::AtIndex(Node* index,
- CodeStubAssembler::ParameterMode mode) const {
+Node* CodeStubArguments::AtIndexPtr(
+ Node* index, CodeStubAssembler::ParameterMode mode) const {
typedef compiler::Node Node;
- CSA_ASSERT(assembler_, assembler_->UintPtrLessThan(
- mode == CodeStubAssembler::INTPTR_PARAMETERS
- ? index
- : assembler_->SmiUntag(index),
- GetLength()));
- Node* negated_index =
- assembler_->IntPtrSub(assembler_->IntPtrOrSmiConstant(0, mode), index);
+ Node* negated_index = assembler_->IntPtrOrSmiSub(
+ assembler_->IntPtrOrSmiConstant(0, mode), index, mode);
Node* offset =
assembler_->ElementOffsetFromIndex(negated_index, FAST_ELEMENTS, mode, 0);
- return assembler_->Load(MachineType::AnyTagged(), arguments_, offset);
+ return assembler_->IntPtrAdd(arguments_, offset);
+}
+
+Node* CodeStubArguments::AtIndex(Node* index,
+ CodeStubAssembler::ParameterMode mode) const {
+ DCHECK_EQ(argc_mode_, mode);
+ CSA_ASSERT(assembler_,
+ assembler_->UintPtrOrSmiLessThan(index, GetLength(), mode));
+ return assembler_->Load(MachineType::AnyTagged(), AtIndexPtr(index, mode));
}
Node* CodeStubArguments::AtIndex(int index) const {
@@ -8141,12 +8259,11 @@ void CodeStubArguments::ForEach(
const CodeStubArguments::ForEachBodyFunction& body, Node* first, Node* last,
CodeStubAssembler::ParameterMode mode) {
assembler_->Comment("CodeStubArguments::ForEach");
- DCHECK_IMPLIES(first == nullptr || last == nullptr,
- mode == CodeStubAssembler::INTPTR_PARAMETERS);
if (first == nullptr) {
first = assembler_->IntPtrOrSmiConstant(0, mode);
}
if (last == nullptr) {
+ DCHECK_EQ(mode, argc_mode_);
last = argc_;
}
Node* start = assembler_->IntPtrSub(
@@ -8155,13 +8272,14 @@ void CodeStubArguments::ForEach(
Node* end = assembler_->IntPtrSub(
arguments_,
assembler_->ElementOffsetFromIndex(last, FAST_ELEMENTS, mode));
- assembler_->BuildFastLoop(
- vars, MachineType::PointerRepresentation(), start, end,
- [this, &body](Node* current) {
- Node* arg = assembler_->Load(MachineType::AnyTagged(), current);
- body(arg);
- },
- -kPointerSize, CodeStubAssembler::IndexAdvanceMode::kPost);
+ assembler_->BuildFastLoop(vars, start, end,
+ [this, &body](Node* current) {
+ Node* arg = assembler_->Load(
+ MachineType::AnyTagged(), current);
+ body(arg);
+ },
+ -kPointerSize, CodeStubAssembler::INTPTR_PARAMETERS,
+ CodeStubAssembler::IndexAdvanceMode::kPost);
}
void CodeStubArguments::PopAndReturn(Node* value) {
@@ -8193,11 +8311,13 @@ Node* CodeStubAssembler::IsDebugActive() {
return Word32NotEqual(is_debug_active, Int32Constant(0));
}
-Node* CodeStubAssembler::IsPromiseHookEnabled() {
- Node* const promise_hook = Load(
- MachineType::Pointer(),
- ExternalConstant(ExternalReference::promise_hook_address(isolate())));
- return WordNotEqual(promise_hook, IntPtrConstant(0));
+Node* CodeStubAssembler::IsPromiseHookEnabledOrDebugIsActive() {
+ Node* const promise_hook_or_debug_is_active =
+ Load(MachineType::Uint8(),
+ ExternalConstant(
+ ExternalReference::promise_hook_or_debug_is_active_address(
+ isolate())));
+ return Word32NotEqual(promise_hook_or_debug_is_active, Int32Constant(0));
}
Node* CodeStubAssembler::AllocateFunctionWithMapAndContext(Node* map,
@@ -8214,8 +8334,8 @@ Node* CodeStubAssembler::AllocateFunctionWithMapAndContext(Node* map,
Heap::kEmptyFixedArrayRootIndex);
StoreObjectFieldRoot(fun, JSObject::kElementsOffset,
Heap::kEmptyFixedArrayRootIndex);
- StoreObjectFieldRoot(fun, JSFunction::kLiteralsOffset,
- Heap::kEmptyLiteralsArrayRootIndex);
+ StoreObjectFieldRoot(fun, JSFunction::kFeedbackVectorOffset,
+ Heap::kUndefinedCellRootIndex);
StoreObjectFieldRoot(fun, JSFunction::kPrototypeOrInitialMapOffset,
Heap::kTheHoleValueRootIndex);
StoreObjectFieldNoWriteBarrier(fun, JSFunction::kSharedFunctionInfoOffset,
@@ -8246,12 +8366,48 @@ Node* CodeStubAssembler::AllocatePromiseReactionJobInfo(
StoreObjectFieldNoWriteBarrier(
result, PromiseReactionJobInfo::kDeferredOnRejectOffset,
deferred_on_reject);
- StoreObjectFieldNoWriteBarrier(result, PromiseReactionJobInfo::kDebugIdOffset,
- SmiConstant(kDebugPromiseNoID));
StoreObjectFieldNoWriteBarrier(result, PromiseReactionJobInfo::kContextOffset,
context);
return result;
}
+Node* CodeStubAssembler::MarkerIsFrameType(Node* marker_or_function,
+ StackFrame::Type frame_type) {
+ return WordEqual(
+ marker_or_function,
+ IntPtrConstant(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
+}
+
+Node* CodeStubAssembler::MarkerIsNotFrameType(Node* marker_or_function,
+ StackFrame::Type frame_type) {
+ return WordNotEqual(
+ marker_or_function,
+ IntPtrConstant(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
+}
+
+void CodeStubAssembler::Print(const char* s) {
+#ifdef DEBUG
+ std::string formatted(s);
+ formatted += "\n";
+ Handle<String> string = isolate()->factory()->NewStringFromAsciiChecked(
+ formatted.c_str(), TENURED);
+ CallRuntime(Runtime::kGlobalPrint, NoContextConstant(), HeapConstant(string));
+#endif
+}
+
+void CodeStubAssembler::Print(const char* prefix, Node* tagged_value) {
+#ifdef DEBUG
+ if (prefix != nullptr) {
+ std::string formatted(prefix);
+ formatted += ": ";
+ Handle<String> string = isolate()->factory()->NewStringFromAsciiChecked(
+ formatted.c_str(), TENURED);
+ CallRuntime(Runtime::kGlobalPrint, NoContextConstant(),
+ HeapConstant(string));
+ }
+ CallRuntime(Runtime::kDebugPrint, NoContextConstant(), tagged_value);
+#endif
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/code-stub-assembler.h b/deps/v8/src/code-stub-assembler.h
index ee6f348fc9..52e85831c5 100644
--- a/deps/v8/src/code-stub-assembler.h
+++ b/deps/v8/src/code-stub-assembler.h
@@ -28,13 +28,16 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(CodeMap, CodeMap) \
V(empty_string, EmptyString) \
V(EmptyFixedArray, EmptyFixedArray) \
- V(EmptyLiteralsArray, EmptyLiteralsArray) \
V(FalseValue, False) \
V(FixedArrayMap, FixedArrayMap) \
V(FixedCOWArrayMap, FixedCOWArrayMap) \
V(FixedDoubleArrayMap, FixedDoubleArrayMap) \
V(FunctionTemplateInfoMap, FunctionTemplateInfoMap) \
+ V(has_instance_symbol, HasInstanceSymbol) \
V(HeapNumberMap, HeapNumberMap) \
+ V(NoClosuresCellMap, NoClosuresCellMap) \
+ V(OneClosureCellMap, OneClosureCellMap) \
+ V(ManyClosuresCellMap, ManyClosuresCellMap) \
V(MinusZeroValue, MinusZero) \
V(NanValue, Nan) \
V(NullValue, Null) \
@@ -77,10 +80,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
return Is64() ? INTPTR_PARAMETERS : SMI_PARAMETERS;
}
+ MachineRepresentation ParameterRepresentation(ParameterMode mode) const {
+ return mode == INTPTR_PARAMETERS ? MachineType::PointerRepresentation()
+ : MachineRepresentation::kTaggedSigned;
+ }
+
MachineRepresentation OptimalParameterRepresentation() const {
- return OptimalParameterMode() == INTPTR_PARAMETERS
- ? MachineType::PointerRepresentation()
- : MachineRepresentation::kTaggedSigned;
+ return ParameterRepresentation(OptimalParameterMode());
}
Node* ParameterToWord(Node* value, ParameterMode mode) {
@@ -112,8 +118,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
return IntPtrOpName(a, b); \
} \
}
+ PARAMETER_BINOP(IntPtrOrSmiMin, IntPtrMin, SmiMin)
PARAMETER_BINOP(IntPtrOrSmiAdd, IntPtrAdd, SmiAdd)
+ PARAMETER_BINOP(IntPtrOrSmiSub, IntPtrSub, SmiSub)
PARAMETER_BINOP(IntPtrOrSmiLessThan, IntPtrLessThan, SmiLessThan)
+ PARAMETER_BINOP(IntPtrOrSmiLessThanOrEqual, IntPtrLessThanOrEqual,
+ SmiLessThanOrEqual)
PARAMETER_BINOP(IntPtrOrSmiGreaterThan, IntPtrGreaterThan, SmiGreaterThan)
PARAMETER_BINOP(IntPtrOrSmiGreaterThanOrEqual, IntPtrGreaterThanOrEqual,
SmiGreaterThanOrEqual)
@@ -136,6 +146,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* IntPtrOrSmiConstant(int value, ParameterMode mode);
+ bool IsIntPtrOrSmiConstantZero(Node* test);
+
// Round the 32bits payload of the provided word up to the next power of two.
Node* IntPtrRoundUpToPowerOfTwo32(Node* value);
// Select the maximum of the two provided IntPtr values.
@@ -207,6 +219,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
return IntPtrOpName(BitcastTaggedToWord(a), BitcastTaggedToWord(b)); \
}
SMI_COMPARISON_OP(SmiEqual, WordEqual)
+ SMI_COMPARISON_OP(SmiNotEqual, WordNotEqual)
SMI_COMPARISON_OP(SmiAbove, UintPtrGreaterThan)
SMI_COMPARISON_OP(SmiAboveOrEqual, UintPtrGreaterThanOrEqual)
SMI_COMPARISON_OP(SmiBelow, UintPtrLessThan)
@@ -294,14 +307,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// otherwise goes to {if_false}.
void BranchIfToBooleanIsTrue(Node* value, Label* if_true, Label* if_false);
- void BranchIfSimd128Equal(Node* lhs, Node* lhs_map, Node* rhs, Node* rhs_map,
- Label* if_equal, Label* if_notequal);
- void BranchIfSimd128Equal(Node* lhs, Node* rhs, Label* if_equal,
- Label* if_notequal) {
- BranchIfSimd128Equal(lhs, LoadMap(lhs), rhs, LoadMap(rhs), if_equal,
- if_notequal);
- }
-
void BranchIfJSReceiver(Node* object, Label* if_true, Label* if_false);
void BranchIfJSObject(Node* object, Label* if_true, Label* if_false);
@@ -333,6 +338,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Load a SMI root, untag it, and convert to Word32.
Node* LoadAndUntagToWord32Root(Heap::RootListIndex root_index);
+ // Tag a smi and store it.
+ Node* StoreAndTagSmi(Node* base, int offset, Node* value);
+
// Load the floating point value of a HeapNumber.
Node* LoadHeapNumberValue(Node* object);
// Load the Map of an HeapObject.
@@ -377,6 +385,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* LoadMapConstructorFunctionIndex(Node* map);
// Load the constructor of a Map (equivalent to Map::GetConstructor()).
Node* LoadMapConstructor(Node* map);
+ // Loads a value from the specially encoded integer fields in the
+ // SharedFunctionInfo object.
+ // TODO(danno): This currently only works for the integer fields that are
+ // mapped to the upper part of 64-bit words. We should customize
+ // SFI::BodyDescriptor and store int32 values directly.
+ Node* LoadSharedFunctionInfoSpecialField(Node* shared, int offset,
+ ParameterMode param_mode);
+
// Check if the map is set for slow properties.
Node* IsDictionaryMap(Node* map);
@@ -676,8 +692,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* IsJSGlobalProxy(Node* object);
Node* IsJSReceiverInstanceType(Node* instance_type);
Node* IsJSReceiver(Node* object);
+ Node* IsJSReceiverMap(Node* map);
Node* IsMap(Node* object);
Node* IsCallableMap(Node* map);
+ Node* IsCallable(Node* object);
+ Node* IsBoolean(Node* object);
+ Node* IsHeapNumber(Node* object);
Node* IsName(Node* object);
Node* IsSymbol(Node* object);
Node* IsPrivateSymbol(Node* object);
@@ -710,12 +730,25 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* StringAdd(Node* context, Node* first, Node* second,
AllocationFlags flags = kNone);
- // Return the first index >= {from} at which {needle_char} was found in
- // {string}, or -1 if such an index does not exist. The returned value is
- // a Smi, {string} is expected to be a String, {needle_char} is an intptr,
- // and {from} is expected to be tagged.
- Node* StringIndexOfChar(Node* context, Node* string, Node* needle_char,
- Node* from);
+ // Unpack the external string, returning a pointer that (offset-wise) looks
+ // like a sequential string.
+ // Note that this pointer is not tagged and does not point to a real
+ // sequential string instance, and may only be used to access the string
+ // data. The pointer is GC-safe as long as a reference to the container
+ // ExternalString is live.
+ // |string| must be an external string. Bailout for short external strings.
+ Node* TryDerefExternalString(Node* const string, Node* const instance_type,
+ Label* if_bailout);
+
+ // Check if |var_string| has an indirect (thin or flat cons) string type,
+ // and unpack it if so.
+ void MaybeDerefIndirectString(Variable* var_string, Node* instance_type,
+ Variable* var_did_something);
+ // Check if |var_left| or |var_right| has an indirect (thin or flat cons)
+ // string type, and unpack it/them if so. Fall through if nothing was done.
+ void MaybeDerefIndirectStrings(Variable* var_left, Node* left_instance_type,
+ Variable* var_right, Node* right_instance_type,
+ Label* did_something);
Node* StringFromCodePoint(Node* codepoint, UnicodeEncoding encoding);
@@ -731,7 +764,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* ToNumber(Node* context, Node* input);
// Converts |input| to one of 2^32 integer values in the range 0 through
- // 2^32−1, inclusive.
+ // 2^32-1, inclusive.
// ES#sec-touint32
compiler::Node* ToUint32(compiler::Node* context, compiler::Node* input);
@@ -741,9 +774,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Convert any object to a Primitive.
Node* JSReceiverToPrimitive(Node* context, Node* input);
- // Convert a String to a flat String.
- Node* FlattenString(Node* string);
-
enum ToIntegerTruncationMode {
kNoTruncation,
kTruncateMinusZero,
@@ -856,7 +886,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Various building blocks for stubs doing property lookups.
void TryToName(Node* key, Label* if_keyisindex, Variable* var_index,
- Label* if_keyisunique, Label* if_bailout);
+ Label* if_keyisunique, Variable* var_unique,
+ Label* if_bailout);
// Calculates array index for given dictionary entry and entry field.
// See Dictionary::EntryToIndex().
@@ -866,6 +897,49 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* EntryToIndex(Node* entry) {
return EntryToIndex<Dictionary>(entry, Dictionary::kEntryKeyIndex);
}
+
+ // Loads the details for the entry with the given key_index.
+ // Returns an untagged int32.
+ template <class ContainerType>
+ Node* LoadDetailsByKeyIndex(Node* container, Node* key_index) {
+ const int kKeyToDetailsOffset =
+ (ContainerType::kEntryDetailsIndex - ContainerType::kEntryKeyIndex) *
+ kPointerSize;
+ return LoadAndUntagToWord32FixedArrayElement(container, key_index,
+ kKeyToDetailsOffset);
+ }
+
+ // Loads the value for the entry with the given key_index.
+ // Returns a tagged value.
+ template <class ContainerType>
+ Node* LoadValueByKeyIndex(Node* container, Node* key_index) {
+ const int kKeyToValueOffset =
+ (ContainerType::kEntryValueIndex - ContainerType::kEntryKeyIndex) *
+ kPointerSize;
+ return LoadFixedArrayElement(container, key_index, kKeyToValueOffset);
+ }
+
+ // Stores the details for the entry with the given key_index.
+ // |details| must be a Smi.
+ template <class ContainerType>
+ void StoreDetailsByKeyIndex(Node* container, Node* key_index, Node* details) {
+ const int kKeyToDetailsOffset =
+ (ContainerType::kEntryDetailsIndex - ContainerType::kEntryKeyIndex) *
+ kPointerSize;
+ StoreFixedArrayElement(container, key_index, details, SKIP_WRITE_BARRIER,
+ kKeyToDetailsOffset);
+ }
+
+ // Stores the value for the entry with the given key_index.
+ template <class ContainerType>
+ void StoreValueByKeyIndex(Node* container, Node* key_index, Node* value) {
+ const int kKeyToValueOffset =
+ (ContainerType::kEntryValueIndex - ContainerType::kEntryKeyIndex) *
+ kPointerSize;
+ StoreFixedArrayElement(container, key_index, value, UPDATE_WRITE_BARRIER,
+ kKeyToValueOffset);
+ }
+
// Calculate a valid size for the a hash table.
Node* HashTableComputeCapacity(Node* at_least_space_for);
@@ -930,6 +1004,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Label* if_found, Variable* var_value,
Label* if_not_found, Label* if_bailout);
+ Node* GetProperty(Node* context, Node* receiver, Handle<Name> name) {
+ return CallStub(CodeFactory::GetProperty(isolate()), context, receiver,
+ HeapConstant(name));
+ }
+
void LoadPropertyFromFastObject(Node* object, Node* map, Node* descriptors,
Node* name_index, Variable* var_details,
Variable* var_value);
@@ -1059,16 +1138,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
typedef std::function<void(Node* index)> FastLoopBody;
- void BuildFastLoop(const VariableList& var_list,
- MachineRepresentation index_rep, Node* start_index,
- Node* end_index, const FastLoopBody& body, int increment,
- IndexAdvanceMode mode = IndexAdvanceMode::kPre);
-
- void BuildFastLoop(MachineRepresentation index_rep, Node* start_index,
- Node* end_index, const FastLoopBody& body, int increment,
- IndexAdvanceMode mode = IndexAdvanceMode::kPre) {
- BuildFastLoop(VariableList(0, zone()), index_rep, start_index, end_index,
- body, increment, mode);
+ Node* BuildFastLoop(const VariableList& var_list, Node* start_index,
+ Node* end_index, const FastLoopBody& body, int increment,
+ ParameterMode parameter_mode,
+ IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre);
+
+ Node* BuildFastLoop(Node* start_index, Node* end_index,
+ const FastLoopBody& body, int increment,
+ ParameterMode parameter_mode,
+ IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre) {
+ return BuildFastLoop(VariableList(0, zone()), start_index, end_index, body,
+ increment, parameter_mode, advance_mode);
}
enum class ForEachDirection { kForward, kReverse };
@@ -1077,11 +1157,22 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
FastFixedArrayForEachBody;
void BuildFastFixedArrayForEach(
- Node* fixed_array, ElementsKind kind, Node* first_element_inclusive,
+ const CodeStubAssembler::VariableList& vars, Node* fixed_array,
+ ElementsKind kind, Node* first_element_inclusive,
Node* last_element_exclusive, const FastFixedArrayForEachBody& body,
ParameterMode mode = INTPTR_PARAMETERS,
ForEachDirection direction = ForEachDirection::kReverse);
+ void BuildFastFixedArrayForEach(
+ Node* fixed_array, ElementsKind kind, Node* first_element_inclusive,
+ Node* last_element_exclusive, const FastFixedArrayForEachBody& body,
+ ParameterMode mode = INTPTR_PARAMETERS,
+ ForEachDirection direction = ForEachDirection::kReverse) {
+ CodeStubAssembler::VariableList list(0, zone());
+ BuildFastFixedArrayForEach(list, fixed_array, kind, first_element_inclusive,
+ last_element_exclusive, body, mode, direction);
+ }
+
Node* GetArrayAllocationSize(Node* element_count, ElementsKind kind,
ParameterMode mode, int header_size) {
return ElementOffsetFromIndex(element_count, kind, mode, header_size);
@@ -1093,6 +1184,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
FixedArray::kHeaderSize);
}
+ void GotoIfFixedArraySizeDoesntFitInNewSpace(Node* element_count,
+ Label* doesnt_fit, int base_size,
+ ParameterMode mode);
+
void InitializeFieldsWithRoot(Node* object, Node* start_offset,
Node* end_offset, Heap::RootListIndex root);
@@ -1130,6 +1225,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Runtime::FunctionId fallback_runtime_function_id = Runtime::kHasProperty);
Node* ForInFilter(Node* key, Node* object, Node* context);
+ Node* ClassOf(Node* object);
+
Node* Typeof(Node* value, Node* context);
Node* GetSuperConstructor(Node* value, Node* context);
@@ -1149,17 +1246,44 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* context);
// Promise helpers
- Node* IsPromiseHookEnabled();
+ Node* IsPromiseHookEnabledOrDebugIsActive();
Node* AllocatePromiseReactionJobInfo(Node* value, Node* tasks,
Node* deferred_promise,
Node* deferred_on_resolve,
Node* deferred_on_reject, Node* context);
+ // Helpers for StackFrame markers.
+ Node* MarkerIsFrameType(Node* marker_or_function,
+ StackFrame::Type frame_type);
+ Node* MarkerIsNotFrameType(Node* marker_or_function,
+ StackFrame::Type frame_type);
+
+ // Support for printf-style debugging
+ void Print(const char* s);
+ void Print(const char* prefix, Node* tagged_value);
+ inline void Print(Node* tagged_value) { return Print(nullptr, tagged_value); }
+
+ template <class... TArgs>
+ Node* MakeTypeError(MessageTemplate::Template message, Node* context,
+ TArgs... args) {
+ STATIC_ASSERT(sizeof...(TArgs) <= 3);
+ Node* const make_type_error = LoadContextElement(
+ LoadNativeContext(context), Context::MAKE_TYPE_ERROR_INDEX);
+ return CallJS(CodeFactory::Call(isolate()), context, make_type_error,
+ UndefinedConstant(), SmiConstant(message), args...);
+ }
+
protected:
+ void DescriptorLookup(Node* unique_name, Node* descriptors, Node* bitfield3,
+ Label* if_found, Variable* var_name_index,
+ Label* if_not_found);
void DescriptorLookupLinear(Node* unique_name, Node* descriptors, Node* nof,
Label* if_found, Variable* var_name_index,
Label* if_not_found);
+ void DescriptorLookupBinary(Node* unique_name, Node* descriptors, Node* nof,
+ Label* if_found, Variable* var_name_index,
+ Label* if_not_found);
Node* CallGetterIfAccessor(Node* value, Node* details, Node* context,
Node* receiver, Label* if_bailout);
@@ -1198,6 +1322,19 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* AllocateConsString(Heap::RootListIndex map_root_index, Node* length,
Node* first, Node* second, AllocationFlags flags);
+ // Implements DescriptorArray::number_of_entries.
+ // Returns an untagged int32.
+ Node* DescriptorArrayNumberOfEntries(Node* descriptors);
+ // Implements DescriptorArray::ToKeyIndex.
+ // Returns an untagged IntPtr.
+ Node* DescriptorArrayToKeyIndex(Node* descriptor_number);
+ // Implements DescriptorArray::GetSortedKeyIndex.
+ // Returns an untagged int32.
+ Node* DescriptorArrayGetSortedKeyIndex(Node* descriptors,
+ Node* descriptor_number);
+ // Implements DescriptorArray::GetKey.
+ Node* DescriptorArrayGetKey(Node* descriptors, Node* descriptor_number);
+
static const int kElementLoopUnrollThreshold = 8;
};
@@ -1207,10 +1344,17 @@ class CodeStubArguments {
// |argc| is an uint32 value which specifies the number of arguments passed
// to the builtin excluding the receiver.
- CodeStubArguments(CodeStubAssembler* assembler, Node* argc);
+ CodeStubArguments(CodeStubAssembler* assembler, Node* argc)
+ : CodeStubArguments(assembler, argc, nullptr,
+ CodeStubAssembler::INTPTR_PARAMETERS) {}
+ CodeStubArguments(CodeStubAssembler* assembler, Node* argc, Node* fp,
+ CodeStubAssembler::ParameterMode param_mode);
Node* GetReceiver() const;
+ Node* AtIndexPtr(Node* index, CodeStubAssembler::ParameterMode mode =
+ CodeStubAssembler::INTPTR_PARAMETERS) const;
+
// |index| is zero-based and does not include the receiver
Node* AtIndex(Node* index, CodeStubAssembler::ParameterMode mode =
CodeStubAssembler::INTPTR_PARAMETERS) const;
@@ -1241,6 +1385,7 @@ class CodeStubArguments {
Node* GetArguments();
CodeStubAssembler* assembler_;
+ CodeStubAssembler::ParameterMode argc_mode_;
Node* argc_;
Node* arguments_;
Node* fp_;
diff --git a/deps/v8/src/code-stubs-hydrogen.cc b/deps/v8/src/code-stubs-hydrogen.cc
index 4fd9309b0e..4c10e20d1e 100644
--- a/deps/v8/src/code-stubs-hydrogen.cc
+++ b/deps/v8/src/code-stubs-hydrogen.cc
@@ -13,6 +13,7 @@
#include "src/crankshaft/lithium.h"
#include "src/field-index.h"
#include "src/ic/ic.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -78,14 +79,6 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
HContext* context() { return context_; }
Isolate* isolate() { return info_->isolate(); }
- HLoadNamedField* BuildLoadNamedField(HValue* object, FieldIndex index);
- void BuildStoreNamedField(HValue* object, HValue* value, FieldIndex index,
- Representation representation,
- bool transition_to_field);
-
- HValue* BuildToString(HValue* input, bool convert);
- HValue* BuildToPrimitive(HValue* input, HValue* input_map);
-
private:
std::unique_ptr<HParameter* []> parameters_;
HValue* arguments_length_;
@@ -326,72 +319,6 @@ static Handle<Code> DoGenerateCode(Stub* stub) {
return code;
}
-
-HLoadNamedField* CodeStubGraphBuilderBase::BuildLoadNamedField(
- HValue* object, FieldIndex index) {
- Representation representation = index.is_double()
- ? Representation::Double()
- : Representation::Tagged();
- int offset = index.offset();
- HObjectAccess access = index.is_inobject()
- ? HObjectAccess::ForObservableJSObjectOffset(offset, representation)
- : HObjectAccess::ForBackingStoreOffset(offset, representation);
- if (index.is_double() &&
- (!FLAG_unbox_double_fields || !index.is_inobject())) {
- // Load the heap number.
- object = Add<HLoadNamedField>(
- object, nullptr, access.WithRepresentation(Representation::Tagged()));
- // Load the double value from it.
- access = HObjectAccess::ForHeapNumberValue();
- }
- return Add<HLoadNamedField>(object, nullptr, access);
-}
-
-void CodeStubGraphBuilderBase::BuildStoreNamedField(
- HValue* object, HValue* value, FieldIndex index,
- Representation representation, bool transition_to_field) {
- DCHECK(!index.is_double() || representation.IsDouble());
- int offset = index.offset();
- HObjectAccess access =
- index.is_inobject()
- ? HObjectAccess::ForObservableJSObjectOffset(offset, representation)
- : HObjectAccess::ForBackingStoreOffset(offset, representation);
-
- if (representation.IsDouble()) {
- if (!FLAG_unbox_double_fields || !index.is_inobject()) {
- HObjectAccess heap_number_access =
- access.WithRepresentation(Representation::Tagged());
- if (transition_to_field) {
- // The store requires a mutable HeapNumber to be allocated.
- NoObservableSideEffectsScope no_side_effects(this);
- HInstruction* heap_number_size = Add<HConstant>(HeapNumber::kSize);
-
- // TODO(hpayer): Allocation site pretenuring support.
- HInstruction* heap_number =
- Add<HAllocate>(heap_number_size, HType::HeapObject(), NOT_TENURED,
- MUTABLE_HEAP_NUMBER_TYPE, graph()->GetConstant0());
- AddStoreMapConstant(heap_number,
- isolate()->factory()->mutable_heap_number_map());
- Add<HStoreNamedField>(heap_number, HObjectAccess::ForHeapNumberValue(),
- value);
- // Store the new mutable heap number into the object.
- access = heap_number_access;
- value = heap_number;
- } else {
- // Load the heap number.
- object = Add<HLoadNamedField>(object, nullptr, heap_number_access);
- // Store the double value into it.
- access = HObjectAccess::ForHeapNumberValue();
- }
- }
- } else if (representation.IsHeapObject()) {
- BuildCheckHeapObject(value);
- }
-
- Add<HStoreNamedField>(object, access, value, INITIALIZING_STORE);
-}
-
-
template <>
HValue* CodeStubGraphBuilder<TransitionElementsKindStub>::BuildCodeStub() {
ElementsKind const from_kind = casted_stub()->from_kind();
@@ -557,138 +484,6 @@ Handle<Code> BinaryOpWithAllocationSiteStub::GenerateCode() {
}
-HValue* CodeStubGraphBuilderBase::BuildToString(HValue* input, bool convert) {
- if (!convert) return BuildCheckString(input);
- IfBuilder if_inputissmi(this);
- HValue* inputissmi = if_inputissmi.If<HIsSmiAndBranch>(input);
- if_inputissmi.Then();
- {
- // Convert the input smi to a string.
- Push(BuildNumberToString(input, AstType::SignedSmall()));
- }
- if_inputissmi.Else();
- {
- HValue* input_map =
- Add<HLoadNamedField>(input, inputissmi, HObjectAccess::ForMap());
- HValue* input_instance_type = Add<HLoadNamedField>(
- input_map, inputissmi, HObjectAccess::ForMapInstanceType());
- IfBuilder if_inputisstring(this);
- if_inputisstring.If<HCompareNumericAndBranch>(
- input_instance_type, Add<HConstant>(FIRST_NONSTRING_TYPE), Token::LT);
- if_inputisstring.Then();
- {
- // The input is already a string.
- Push(input);
- }
- if_inputisstring.Else();
- {
- // Convert to primitive first (if necessary), see
- // ES6 section 12.7.3 The Addition operator.
- IfBuilder if_inputisprimitive(this);
- STATIC_ASSERT(FIRST_PRIMITIVE_TYPE == FIRST_TYPE);
- if_inputisprimitive.If<HCompareNumericAndBranch>(
- input_instance_type, Add<HConstant>(LAST_PRIMITIVE_TYPE), Token::LTE);
- if_inputisprimitive.Then();
- {
- // The input is already a primitive.
- Push(input);
- }
- if_inputisprimitive.Else();
- {
- // Convert the input to a primitive.
- Push(BuildToPrimitive(input, input_map));
- }
- if_inputisprimitive.End();
- // Convert the primitive to a string value.
- HValue* values[] = {Pop()};
- Callable toString = CodeFactory::ToString(isolate());
- Push(AddUncasted<HCallWithDescriptor>(Add<HConstant>(toString.code()), 0,
- toString.descriptor(),
- ArrayVector(values)));
- }
- if_inputisstring.End();
- }
- if_inputissmi.End();
- return Pop();
-}
-
-
-HValue* CodeStubGraphBuilderBase::BuildToPrimitive(HValue* input,
- HValue* input_map) {
- // Get the native context of the caller.
- HValue* native_context = BuildGetNativeContext();
-
- // Determine the initial map of the %ObjectPrototype%.
- HValue* object_function_prototype_map =
- Add<HLoadNamedField>(native_context, nullptr,
- HObjectAccess::ForContextSlot(
- Context::OBJECT_FUNCTION_PROTOTYPE_MAP_INDEX));
-
- // Determine the initial map of the %StringPrototype%.
- HValue* string_function_prototype_map =
- Add<HLoadNamedField>(native_context, nullptr,
- HObjectAccess::ForContextSlot(
- Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
-
- // Determine the initial map of the String function.
- HValue* string_function = Add<HLoadNamedField>(
- native_context, nullptr,
- HObjectAccess::ForContextSlot(Context::STRING_FUNCTION_INDEX));
- HValue* string_function_initial_map = Add<HLoadNamedField>(
- string_function, nullptr, HObjectAccess::ForPrototypeOrInitialMap());
-
- // Determine the map of the [[Prototype]] of {input}.
- HValue* input_prototype =
- Add<HLoadNamedField>(input_map, nullptr, HObjectAccess::ForPrototype());
- HValue* input_prototype_map =
- Add<HLoadNamedField>(input_prototype, nullptr, HObjectAccess::ForMap());
-
- // For string wrappers (JSValue instances with [[StringData]] internal
- // fields), we can shortcirciut the ToPrimitive if
- //
- // (a) the {input} map matches the initial map of the String function,
- // (b) the {input} [[Prototype]] is the unmodified %StringPrototype% (i.e.
- // no one monkey-patched toString, @@toPrimitive or valueOf), and
- // (c) the %ObjectPrototype% (i.e. the [[Prototype]] of the
- // %StringPrototype%) is also unmodified, that is no one sneaked a
- // @@toPrimitive into the %ObjectPrototype%.
- //
- // If all these assumptions hold, we can just take the [[StringData]] value
- // and return it.
- // TODO(bmeurer): This just repairs a regression introduced by removing the
- // weird (and broken) intrinsic %_IsStringWrapperSafeForDefaultValue, which
- // was intendend to something similar to this, although less efficient and
- // wrong in the presence of @@toPrimitive. Long-term we might want to move
- // into the direction of having a ToPrimitiveStub that can do common cases
- // while staying in JavaScript land (i.e. not going to C++).
- IfBuilder if_inputisstringwrapper(this);
- if_inputisstringwrapper.If<HCompareObjectEqAndBranch>(
- input_map, string_function_initial_map);
- if_inputisstringwrapper.And();
- if_inputisstringwrapper.If<HCompareObjectEqAndBranch>(
- input_prototype_map, string_function_prototype_map);
- if_inputisstringwrapper.And();
- if_inputisstringwrapper.If<HCompareObjectEqAndBranch>(
- Add<HLoadNamedField>(Add<HLoadNamedField>(input_prototype_map, nullptr,
- HObjectAccess::ForPrototype()),
- nullptr, HObjectAccess::ForMap()),
- object_function_prototype_map);
- if_inputisstringwrapper.Then();
- {
- Push(BuildLoadNamedField(
- input, FieldIndex::ForInObjectOffset(JSValue::kValueOffset)));
- }
- if_inputisstringwrapper.Else();
- {
- // TODO(bmeurer): Add support for fast ToPrimitive conversion using
- // a dedicated ToPrimitiveStub.
- Add<HPushArguments>(input);
- Push(Add<HCallRuntime>(Runtime::FunctionForId(Runtime::kToPrimitive), 1));
- }
- if_inputisstringwrapper.End();
- return Pop();
-}
-
template <>
HValue* CodeStubGraphBuilder<ToBooleanICStub>::BuildCodeInitializedStub() {
ToBooleanICStub* stub = casted_stub();
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index cdaa4ec14f..48d24f8cc3 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -6,17 +6,19 @@
#include <sstream>
+#include "src/arguments.h"
#include "src/ast/ast.h"
#include "src/bootstrapper.h"
#include "src/code-factory.h"
#include "src/code-stub-assembler.h"
+#include "src/counters.h"
#include "src/factory.h"
#include "src/gdb-jit.h"
-#include "src/ic/accessor-assembler.h"
-#include "src/ic/handler-compiler.h"
+#include "src/heap/heap-inl.h"
#include "src/ic/ic-stats.h"
#include "src/ic/ic.h"
#include "src/macro-assembler.h"
+#include "src/objects-inl.h"
#include "src/tracing/tracing-category-observer.h"
namespace v8 {
@@ -73,7 +75,7 @@ void CodeStubDescriptor::Initialize(Register stack_parameter_count,
bool CodeStub::FindCodeInCache(Code** code_out) {
UnseededNumberDictionary* stubs = isolate()->heap()->code_stubs();
- int index = stubs->FindEntry(GetKey());
+ int index = stubs->FindEntry(isolate(), GetKey());
if (index != UnseededNumberDictionary::kNotFound) {
*code_out = Code::cast(stubs->ValueAt(index));
return true;
@@ -105,8 +107,7 @@ Code::Flags CodeStub::GetCodeFlags() const {
return Code::ComputeFlags(GetCodeKind(), GetExtraICState());
}
-
-Handle<Code> CodeStub::GetCodeCopy(const Code::FindAndReplacePattern& pattern) {
+Handle<Code> CodeStub::GetCodeCopy(const FindAndReplacePattern& pattern) {
Handle<Code> ic = GetCode();
ic = isolate()->factory()->CopyCode(ic);
ic->FindAndReplace(pattern);
@@ -439,11 +440,6 @@ Handle<Code> TurboFanCodeStub::GenerateCode() {
return compiler::CodeAssembler::GenerateCode(&state);
}
-void LoadICProtoArrayStub::GenerateAssembly(CodeAssemblerState* state) const {
- AccessorAssembler::GenerateLoadICProtoArray(
- state, throw_reference_error_if_nonexistent());
-}
-
void ElementsTransitionAndStoreStub::GenerateAssembly(
compiler::CodeAssemblerState* state) const {
typedef CodeStubAssembler::Label Label;
@@ -494,21 +490,6 @@ void AllocateHeapNumberStub::GenerateAssembly(
assembler.Return(result);
}
-#define SIMD128_GEN_ASM(TYPE, Type, type, lane_count, lane_type) \
- void Allocate##Type##Stub::GenerateAssembly( \
- compiler::CodeAssemblerState* state) const { \
- CodeStubAssembler assembler(state); \
- compiler::Node* result = \
- assembler.Allocate(Simd128Value::kSize, CodeStubAssembler::kNone); \
- compiler::Node* map = assembler.LoadMap(result); \
- assembler.StoreNoWriteBarrier( \
- MachineRepresentation::kTagged, map, \
- assembler.HeapConstant(isolate()->factory()->type##_map())); \
- assembler.Return(result); \
- }
-SIMD128_TYPES(SIMD128_GEN_ASM)
-#undef SIMD128_GEN_ASM
-
void StringLengthStub::GenerateAssembly(
compiler::CodeAssemblerState* state) const {
CodeStubAssembler assembler(state);
@@ -603,8 +584,8 @@ compiler::Node* AddWithFeedbackStub::Generate(CodeStubAssembler* assembler,
Node* rhs_map = assembler->LoadMap(rhs);
// Check if the {rhs} is a HeapNumber.
- assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map),
- &check_rhsisoddball);
+ assembler->GotoIfNot(assembler->IsHeapNumberMap(rhs_map),
+ &check_rhsisoddball);
var_fadd_lhs.Bind(assembler->SmiToFloat64(lhs));
var_fadd_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
@@ -618,8 +599,8 @@ compiler::Node* AddWithFeedbackStub::Generate(CodeStubAssembler* assembler,
Node* lhs_map = assembler->LoadMap(lhs);
// Check if {lhs} is a HeapNumber.
- assembler->GotoUnless(assembler->IsHeapNumberMap(lhs_map),
- &if_lhsisnotnumber);
+ assembler->GotoIfNot(assembler->IsHeapNumberMap(lhs_map),
+ &if_lhsisnotnumber);
// Check if the {rhs} is Smi.
Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
@@ -639,8 +620,8 @@ compiler::Node* AddWithFeedbackStub::Generate(CodeStubAssembler* assembler,
Node* rhs_map = assembler->LoadMap(rhs);
// Check if the {rhs} is a HeapNumber.
- assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map),
- &check_rhsisoddball);
+ assembler->GotoIfNot(assembler->IsHeapNumberMap(rhs_map),
+ &check_rhsisoddball);
var_fadd_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
var_fadd_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
@@ -684,8 +665,8 @@ compiler::Node* AddWithFeedbackStub::Generate(CodeStubAssembler* assembler,
assembler->Bind(&if_lhsisnotoddball);
{
// Exit unless {lhs} is a string
- assembler->GotoUnless(assembler->IsStringInstanceType(lhs_instance_type),
- &call_with_any_feedback);
+ assembler->GotoIfNot(assembler->IsStringInstanceType(lhs_instance_type),
+ &call_with_any_feedback);
// Check if the {rhs} is a smi, and exit the string check early if it is.
assembler->GotoIf(assembler->TaggedIsSmi(rhs), &call_with_any_feedback);
@@ -694,8 +675,8 @@ compiler::Node* AddWithFeedbackStub::Generate(CodeStubAssembler* assembler,
// Exit unless {rhs} is a string. Since {lhs} is a string we no longer
// need an Oddball check.
- assembler->GotoUnless(assembler->IsStringInstanceType(rhs_instance_type),
- &call_with_any_feedback);
+ assembler->GotoIfNot(assembler->IsStringInstanceType(rhs_instance_type),
+ &call_with_any_feedback);
var_type_feedback.Bind(
assembler->SmiConstant(BinaryOperationFeedback::kString));
@@ -810,8 +791,8 @@ compiler::Node* SubtractWithFeedbackStub::Generate(
Node* rhs_map = assembler->LoadMap(rhs);
// Check if {rhs} is a HeapNumber.
- assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map),
- &check_rhsisoddball);
+ assembler->GotoIfNot(assembler->IsHeapNumberMap(rhs_map),
+ &check_rhsisoddball);
// Perform a floating point subtraction.
var_fsub_lhs.Bind(assembler->SmiToFloat64(lhs));
@@ -826,8 +807,8 @@ compiler::Node* SubtractWithFeedbackStub::Generate(
Node* lhs_map = assembler->LoadMap(lhs);
// Check if the {lhs} is a HeapNumber.
- assembler->GotoUnless(assembler->IsHeapNumberMap(lhs_map),
- &if_lhsisnotnumber);
+ assembler->GotoIfNot(assembler->IsHeapNumberMap(lhs_map),
+ &if_lhsisnotnumber);
// Check if the {rhs} is a Smi.
Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
@@ -848,8 +829,8 @@ compiler::Node* SubtractWithFeedbackStub::Generate(
Node* rhs_map = assembler->LoadMap(rhs);
// Check if the {rhs} is a HeapNumber.
- assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map),
- &check_rhsisoddball);
+ assembler->GotoIfNot(assembler->IsHeapNumberMap(rhs_map),
+ &check_rhsisoddball);
// Perform a floating point subtraction.
var_fsub_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
@@ -876,7 +857,7 @@ compiler::Node* SubtractWithFeedbackStub::Generate(
Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
Node* lhs_is_oddball = assembler->Word32Equal(
lhs_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
- assembler->GotoUnless(lhs_is_oddball, &call_with_any_feedback);
+ assembler->GotoIfNot(lhs_is_oddball, &call_with_any_feedback);
Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
assembler->Branch(assembler->TaggedIsSmi(rhs), &if_rhsissmi,
@@ -895,8 +876,8 @@ compiler::Node* SubtractWithFeedbackStub::Generate(
Node* rhs_map = assembler->LoadMap(rhs);
// Check if {rhs} is a HeapNumber.
- assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map),
- &check_rhsisoddball);
+ assembler->GotoIfNot(assembler->IsHeapNumberMap(rhs_map),
+ &check_rhsisoddball);
var_type_feedback.Bind(
assembler->SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
@@ -911,7 +892,7 @@ compiler::Node* SubtractWithFeedbackStub::Generate(
Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
Node* rhs_is_oddball = assembler->Word32Equal(
rhs_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
- assembler->GotoUnless(rhs_is_oddball, &call_with_any_feedback);
+ assembler->GotoIfNot(rhs_is_oddball, &call_with_any_feedback);
var_type_feedback.Bind(
assembler->SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
@@ -984,8 +965,8 @@ compiler::Node* MultiplyWithFeedbackStub::Generate(
Node* rhs_map = assembler->LoadMap(rhs);
// Check if {rhs} is a HeapNumber.
- assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map),
- &check_rhsisoddball);
+ assembler->GotoIfNot(assembler->IsHeapNumberMap(rhs_map),
+ &check_rhsisoddball);
// Convert {lhs} to a double and multiply it with the value of {rhs}.
var_lhs_float64.Bind(assembler->SmiToFloat64(lhs));
@@ -999,8 +980,8 @@ compiler::Node* MultiplyWithFeedbackStub::Generate(
Node* lhs_map = assembler->LoadMap(lhs);
// Check if {lhs} is a HeapNumber.
- assembler->GotoUnless(assembler->IsHeapNumberMap(lhs_map),
- &if_lhsisnotnumber);
+ assembler->GotoIfNot(assembler->IsHeapNumberMap(lhs_map),
+ &if_lhsisnotnumber);
// Check if {rhs} is a Smi.
Label rhs_is_smi(assembler), rhs_is_not_smi(assembler);
@@ -1020,8 +1001,8 @@ compiler::Node* MultiplyWithFeedbackStub::Generate(
Node* rhs_map = assembler->LoadMap(rhs);
// Check if {rhs} is a HeapNumber.
- assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map),
- &check_rhsisoddball);
+ assembler->GotoIfNot(assembler->IsHeapNumberMap(rhs_map),
+ &check_rhsisoddball);
// Both {lhs} and {rhs} are HeapNumbers. Load their values and
// multiply them.
@@ -1049,7 +1030,7 @@ compiler::Node* MultiplyWithFeedbackStub::Generate(
Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
Node* lhs_is_oddball = assembler->Word32Equal(
lhs_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
- assembler->GotoUnless(lhs_is_oddball, &call_with_any_feedback);
+ assembler->GotoIfNot(lhs_is_oddball, &call_with_any_feedback);
assembler->GotoIf(assembler->TaggedIsSmi(rhs), &call_with_oddball_feedback);
@@ -1202,8 +1183,8 @@ compiler::Node* DivideWithFeedbackStub::Generate(
Node* divisor_map = assembler->LoadMap(divisor);
// Check if {divisor} is a HeapNumber.
- assembler->GotoUnless(assembler->IsHeapNumberMap(divisor_map),
- &check_divisor_for_oddball);
+ assembler->GotoIfNot(assembler->IsHeapNumberMap(divisor_map),
+ &check_divisor_for_oddball);
// Convert {dividend} to a double and divide it with the value of
// {divisor}.
@@ -1217,8 +1198,8 @@ compiler::Node* DivideWithFeedbackStub::Generate(
Node* dividend_map = assembler->LoadMap(dividend);
// Check if {dividend} is a HeapNumber.
- assembler->GotoUnless(assembler->IsHeapNumberMap(dividend_map),
- &dividend_is_not_number);
+ assembler->GotoIfNot(assembler->IsHeapNumberMap(dividend_map),
+ &dividend_is_not_number);
// Check if {divisor} is a Smi.
Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
@@ -1239,8 +1220,8 @@ compiler::Node* DivideWithFeedbackStub::Generate(
Node* divisor_map = assembler->LoadMap(divisor);
// Check if {divisor} is a HeapNumber.
- assembler->GotoUnless(assembler->IsHeapNumberMap(divisor_map),
- &check_divisor_for_oddball);
+ assembler->GotoIfNot(assembler->IsHeapNumberMap(divisor_map),
+ &check_divisor_for_oddball);
// Both {dividend} and {divisor} are HeapNumbers. Load their values
// and divide them.
@@ -1268,7 +1249,7 @@ compiler::Node* DivideWithFeedbackStub::Generate(
Node* dividend_instance_type = assembler->LoadInstanceType(dividend);
Node* dividend_is_oddball = assembler->Word32Equal(
dividend_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
- assembler->GotoUnless(dividend_is_oddball, &call_with_any_feedback);
+ assembler->GotoIfNot(dividend_is_oddball, &call_with_any_feedback);
assembler->GotoIf(assembler->TaggedIsSmi(divisor),
&call_with_oddball_feedback);
@@ -1363,8 +1344,8 @@ compiler::Node* ModulusWithFeedbackStub::Generate(
Node* divisor_map = assembler->LoadMap(divisor);
// Check if {divisor} is a HeapNumber.
- assembler->GotoUnless(assembler->IsHeapNumberMap(divisor_map),
- &check_divisor_for_oddball);
+ assembler->GotoIfNot(assembler->IsHeapNumberMap(divisor_map),
+ &check_divisor_for_oddball);
// Convert {dividend} to a double and divide it with the value of
// {divisor}.
@@ -1379,8 +1360,8 @@ compiler::Node* ModulusWithFeedbackStub::Generate(
Node* dividend_map = assembler->LoadMap(dividend);
// Check if {dividend} is a HeapNumber.
- assembler->GotoUnless(assembler->IsHeapNumberMap(dividend_map),
- &dividend_is_not_number);
+ assembler->GotoIfNot(assembler->IsHeapNumberMap(dividend_map),
+ &dividend_is_not_number);
// Check if {divisor} is a Smi.
Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
@@ -1401,8 +1382,8 @@ compiler::Node* ModulusWithFeedbackStub::Generate(
Node* divisor_map = assembler->LoadMap(divisor);
// Check if {divisor} is a HeapNumber.
- assembler->GotoUnless(assembler->IsHeapNumberMap(divisor_map),
- &check_divisor_for_oddball);
+ assembler->GotoIfNot(assembler->IsHeapNumberMap(divisor_map),
+ &check_divisor_for_oddball);
// Both {dividend} and {divisor} are HeapNumbers. Load their values
// and divide them.
@@ -1429,7 +1410,7 @@ compiler::Node* ModulusWithFeedbackStub::Generate(
Node* dividend_instance_type = assembler->LoadInstanceType(dividend);
Node* dividend_is_oddball = assembler->Word32Equal(
dividend_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
- assembler->GotoUnless(dividend_is_oddball, &call_with_any_feedback);
+ assembler->GotoIfNot(dividend_is_oddball, &call_with_any_feedback);
assembler->GotoIf(assembler->TaggedIsSmi(divisor),
&call_with_oddball_feedback);
@@ -1508,23 +1489,6 @@ void SubStringStub::GenerateAssembly(
assembler.Parameter(Descriptor::kContext)));
}
-void LoadApiGetterStub::GenerateAssembly(
- compiler::CodeAssemblerState* state) const {
- typedef compiler::Node Node;
- CodeStubAssembler assembler(state);
- Node* context = assembler.Parameter(Descriptor::kContext);
- Node* receiver = assembler.Parameter(Descriptor::kReceiver);
- // For now we only support receiver_is_holder.
- DCHECK(receiver_is_holder());
- Node* holder = receiver;
- Node* map = assembler.LoadMap(receiver);
- Node* descriptors = assembler.LoadMapDescriptors(map);
- Node* callback = assembler.LoadFixedArrayElement(
- descriptors, DescriptorArray::ToValueIndex(index()));
- assembler.TailCallStub(CodeFactory::ApiGetter(isolate()), context, receiver,
- holder, callback);
-}
-
void StoreGlobalStub::GenerateAssembly(
compiler::CodeAssemblerState* state) const {
typedef CodeStubAssembler::Label Label;
@@ -1584,7 +1548,7 @@ void StoreGlobalStub::GenerateAssembly(
if (cell_type == PropertyCellType::kConstantType) {
switch (constant_type()) {
case PropertyCellConstantType::kSmi:
- assembler.GotoUnless(assembler.TaggedIsSmi(value), &miss);
+ assembler.GotoIfNot(assembler.TaggedIsSmi(value), &miss);
value_is_smi = true;
break;
case PropertyCellConstantType::kStableMap: {
@@ -1620,11 +1584,6 @@ void StoreGlobalStub::GenerateAssembly(
}
}
-void LoadFieldStub::GenerateAssembly(
- compiler::CodeAssemblerState* state) const {
- AccessorAssembler::GenerateLoadField(state);
-}
-
void KeyedLoadSloppyArgumentsStub::GenerateAssembly(
compiler::CodeAssemblerState* state) const {
typedef CodeStubAssembler::Label Label;
@@ -1716,9 +1675,11 @@ void StoreInterceptorStub::GenerateAssembly(
Node* receiver = assembler.Parameter(Descriptor::kReceiver);
Node* name = assembler.Parameter(Descriptor::kName);
Node* value = assembler.Parameter(Descriptor::kValue);
+ Node* slot = assembler.Parameter(Descriptor::kSlot);
+ Node* vector = assembler.Parameter(Descriptor::kVector);
Node* context = assembler.Parameter(Descriptor::kContext);
assembler.TailCallRuntime(Runtime::kStorePropertyWithInterceptor, context,
- receiver, name, value);
+ value, slot, vector, receiver, name);
}
void LoadIndexedInterceptorStub::GenerateAssembly(
@@ -1745,59 +1706,207 @@ void LoadIndexedInterceptorStub::GenerateAssembly(
slot, vector);
}
-template<class StateType>
-void HydrogenCodeStub::TraceTransition(StateType from, StateType to) {
- // Note: Although a no-op transition is semantically OK, it is hinting at a
- // bug somewhere in our state transition machinery.
- DCHECK(from != to);
- if (V8_LIKELY(!FLAG_ic_stats)) return;
- if (FLAG_ic_stats &
- v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING) {
- auto ic_stats = ICStats::instance();
- ic_stats->Begin();
- ICInfo& ic_info = ic_stats->Current();
- ic_info.type = MajorName(MajorKey());
- ic_info.state = ToString(from);
- ic_info.state += "=>";
- ic_info.state += ToString(to);
- ic_stats->End();
- return;
- }
- OFStream os(stdout);
- os << "[";
- PrintBaseName(os);
- os << ": " << from << "=>" << to << "]" << std::endl;
-}
-
void CallICStub::PrintState(std::ostream& os) const { // NOLINT
- os << state();
+ os << convert_mode() << ", " << tail_call_mode();
}
+void CallICStub::GenerateAssembly(compiler::CodeAssemblerState* state) const {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ CodeStubAssembler assembler(state);
-void JSEntryStub::FinishCode(Handle<Code> code) {
- Handle<FixedArray> handler_table =
- code->GetIsolate()->factory()->NewFixedArray(1, TENURED);
- handler_table->set(0, Smi::FromInt(handler_offset_));
- code->set_handler_table(*handler_table);
-}
+ Node* context = assembler.Parameter(Descriptor::kContext);
+ Node* target = assembler.Parameter(Descriptor::kTarget);
+ Node* argc = assembler.Parameter(Descriptor::kActualArgumentsCount);
+ Node* slot = assembler.Parameter(Descriptor::kSlot);
+ Node* vector = assembler.Parameter(Descriptor::kVector);
+
+ // TODO(bmeurer): The slot should actually be an IntPtr, but TurboFan's
+ // SimplifiedLowering cannot deal with IntPtr machine type properly yet.
+ slot = assembler.ChangeInt32ToIntPtr(slot);
+
+ // Static checks to assert it is safe to examine the type feedback element.
+ // We don't know that we have a weak cell. We might have a private symbol
+ // or an AllocationSite, but the memory is safe to examine.
+ // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
+ // FixedArray.
+ // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
+ // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
+ // computed, meaning that it can't appear to be a pointer. If the low bit is
+ // 0, then hash is computed, but the 0 bit prevents the field from appearing
+ // to be a pointer.
+ STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
+ STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
+ WeakCell::kValueOffset &&
+ WeakCell::kValueOffset == Symbol::kHashFieldSlot);
+
+ // Increment the call count.
+ // TODO(bmeurer): Would it be beneficial to use Int32Add on 64-bit?
+ assembler.Comment("increment call count");
+ Node* call_count =
+ assembler.LoadFixedArrayElement(vector, slot, 1 * kPointerSize);
+ Node* new_count = assembler.SmiAdd(call_count, assembler.SmiConstant(1));
+ // Count is Smi, so we don't need a write barrier.
+ assembler.StoreFixedArrayElement(vector, slot, new_count, SKIP_WRITE_BARRIER,
+ 1 * kPointerSize);
+
+ Label call_function(&assembler), extra_checks(&assembler), call(&assembler);
+
+ // The checks. First, does function match the recorded monomorphic target?
+ Node* feedback_element = assembler.LoadFixedArrayElement(vector, slot);
+ Node* feedback_value = assembler.LoadWeakCellValueUnchecked(feedback_element);
+ Node* is_monomorphic = assembler.WordEqual(target, feedback_value);
+ assembler.GotoIfNot(is_monomorphic, &extra_checks);
+
+ // The compare above could have been a SMI/SMI comparison. Guard against
+ // this convincing us that we have a monomorphic JSFunction.
+ Node* is_smi = assembler.TaggedIsSmi(target);
+ assembler.Branch(is_smi, &extra_checks, &call_function);
+
+ assembler.Bind(&call_function);
+ {
+ // Call using CallFunction builtin.
+ Callable callable =
+ CodeFactory::CallFunction(isolate(), convert_mode(), tail_call_mode());
+ assembler.TailCallStub(callable, context, target, argc);
+ }
+
+ assembler.Bind(&extra_checks);
+ {
+ Label check_initialized(&assembler), mark_megamorphic(&assembler),
+ create_allocation_site(&assembler, Label::kDeferred),
+ create_weak_cell(&assembler, Label::kDeferred);
+
+ assembler.Comment("check if megamorphic");
+ // Check if it is a megamorphic target.
+ Node* is_megamorphic = assembler.WordEqual(
+ feedback_element,
+ assembler.HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
+ assembler.GotoIf(is_megamorphic, &call);
+
+ assembler.Comment("check if it is an allocation site");
+ assembler.GotoIfNot(
+ assembler.IsAllocationSiteMap(assembler.LoadMap(feedback_element)),
+ &check_initialized);
+
+ // If it is not the Array() function, mark megamorphic.
+ Node* context_slot = assembler.LoadContextElement(
+ assembler.LoadNativeContext(context), Context::ARRAY_FUNCTION_INDEX);
+ Node* is_array_function = assembler.WordEqual(context_slot, target);
+ assembler.GotoIfNot(is_array_function, &mark_megamorphic);
+
+ // Call ArrayConstructorStub.
+ Callable callable = CodeFactory::ArrayConstructor(isolate());
+ assembler.TailCallStub(callable, context, target, target, argc,
+ feedback_element);
+
+ assembler.Bind(&check_initialized);
+ {
+ assembler.Comment("check if uninitialized");
+ // Check if it is uninitialized target first.
+ Node* is_uninitialized = assembler.WordEqual(
+ feedback_element,
+ assembler.HeapConstant(
+ FeedbackVector::UninitializedSentinel(isolate())));
+ assembler.GotoIfNot(is_uninitialized, &mark_megamorphic);
+
+ assembler.Comment("handle unitinitialized");
+ // If it is not a JSFunction mark it as megamorphic.
+ Node* is_smi = assembler.TaggedIsSmi(target);
+ assembler.GotoIf(is_smi, &mark_megamorphic);
+
+ // Check if function is an object of JSFunction type.
+ Node* is_js_function = assembler.IsJSFunction(target);
+ assembler.GotoIfNot(is_js_function, &mark_megamorphic);
+
+ // Check if it is the Array() function.
+ Node* context_slot = assembler.LoadContextElement(
+ assembler.LoadNativeContext(context), Context::ARRAY_FUNCTION_INDEX);
+ Node* is_array_function = assembler.WordEqual(context_slot, target);
+ assembler.GotoIf(is_array_function, &create_allocation_site);
+
+ // Check if the function belongs to the same native context.
+ Node* native_context = assembler.LoadNativeContext(
+ assembler.LoadObjectField(target, JSFunction::kContextOffset));
+ Node* is_same_native_context = assembler.WordEqual(
+ native_context, assembler.LoadNativeContext(context));
+ assembler.Branch(is_same_native_context, &create_weak_cell,
+ &mark_megamorphic);
+ }
+ assembler.Bind(&create_weak_cell);
+ {
+ // Wrap the {target} in a WeakCell and remember it.
+ assembler.Comment("create weak cell");
+ assembler.CreateWeakCellInFeedbackVector(vector, assembler.SmiTag(slot),
+ target);
-void HandlerStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
- DCHECK(kind() == Code::LOAD_IC || kind() == Code::KEYED_LOAD_IC);
- if (kind() == Code::KEYED_LOAD_IC) {
- descriptor->Initialize(
- FUNCTION_ADDR(Runtime_KeyedLoadIC_MissFromStubFailure));
+ // Call using CallFunction builtin.
+ assembler.Goto(&call_function);
+ }
+
+ assembler.Bind(&create_allocation_site);
+ {
+ // Create an AllocationSite for the {target}.
+ assembler.Comment("create allocation site");
+ assembler.CreateAllocationSiteInFeedbackVector(vector,
+ assembler.SmiTag(slot));
+
+ // Call using CallFunction builtin. CallICs have a PREMONOMORPHIC state.
+ // They start collecting feedback only when a call is executed the second
+ // time. So, do not pass any feedback here.
+ assembler.Goto(&call_function);
+ }
+
+ assembler.Bind(&mark_megamorphic);
+ {
+ // Mark it as a megamorphic.
+ // MegamorphicSentinel is created as a part of Heap::InitialObjects
+ // and will not move during a GC. So it is safe to skip write barrier.
+ DCHECK(Heap::RootIsImmortalImmovable(Heap::kmegamorphic_symbolRootIndex));
+ assembler.StoreFixedArrayElement(
+ vector, slot, assembler.HeapConstant(
+ FeedbackVector::MegamorphicSentinel(isolate())),
+ SKIP_WRITE_BARRIER);
+ assembler.Goto(&call);
+ }
+ }
+
+ assembler.Bind(&call);
+ {
+ // Call using call builtin.
+ assembler.Comment("call using Call builtin");
+ Callable callable_call =
+ CodeFactory::Call(isolate(), convert_mode(), tail_call_mode());
+ assembler.TailCallStub(callable_call, context, target, argc);
}
}
+void CallICTrampolineStub::PrintState(std::ostream& os) const { // NOLINT
+ os << convert_mode() << ", " << tail_call_mode();
+}
-CallInterfaceDescriptor HandlerStub::GetCallInterfaceDescriptor() const {
- if (kind() == Code::LOAD_IC || kind() == Code::KEYED_LOAD_IC) {
- return LoadWithVectorDescriptor(isolate());
- } else {
- DCHECK(kind() == Code::STORE_IC || kind() == Code::KEYED_STORE_IC);
- return StoreWithVectorDescriptor(isolate());
- }
+void CallICTrampolineStub::GenerateAssembly(
+ compiler::CodeAssemblerState* state) const {
+ typedef compiler::Node Node;
+ CodeStubAssembler assembler(state);
+
+ Node* context = assembler.Parameter(Descriptor::kContext);
+ Node* target = assembler.Parameter(Descriptor::kTarget);
+ Node* argc = assembler.Parameter(Descriptor::kActualArgumentsCount);
+ Node* slot = assembler.Parameter(Descriptor::kSlot);
+ Node* vector = assembler.LoadFeedbackVectorForStub();
+
+ Callable callable =
+ CodeFactory::CallIC(isolate(), convert_mode(), tail_call_mode());
+ assembler.TailCallStub(callable, context, target, argc, slot, vector);
+}
+
+void JSEntryStub::FinishCode(Handle<Code> code) {
+ Handle<FixedArray> handler_table =
+ code->GetIsolate()->factory()->NewFixedArray(1, TENURED);
+ handler_table->set(0, Smi::FromInt(handler_offset_));
+ code->set_handler_table(*handler_table);
}
void TransitionElementsKindStub::InitializeDescriptor(
@@ -1814,15 +1923,6 @@ void AllocateHeapNumberStub::InitializeDescriptor(
}
-#define SIMD128_INIT_DESC(TYPE, Type, type, lane_count, lane_type) \
- void Allocate##Type##Stub::InitializeDescriptor( \
- CodeStubDescriptor* descriptor) { \
- descriptor->Initialize( \
- Runtime::FunctionForId(Runtime::kCreate##Type)->entry); \
- }
-SIMD128_TYPES(SIMD128_INIT_DESC)
-#undef SIMD128_INIT_DESC
-
void ToBooleanICStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
descriptor->Initialize(FUNCTION_ADDR(Runtime_ToBooleanIC_Miss));
descriptor->SetMissHandler(Runtime::kToBooleanIC_Miss);
@@ -1874,7 +1974,7 @@ void GetPropertyStub::GenerateAssembly(
};
CodeStubAssembler::LookupInHolder lookup_element_in_holder =
- [&assembler, context, &var_result, &end](
+ [&assembler](
Node* receiver, Node* holder, Node* holder_map,
Node* holder_instance_type, Node* index, Label* next_holder,
Label* if_bailout) {
@@ -1915,10 +2015,20 @@ void CreateWeakCellStub::GenerateAheadOfTime(Isolate* isolate) {
stub.GetCode();
}
+void StoreSlowElementStub::GenerateAssembly(
+ compiler::CodeAssemblerState* state) const {
+ typedef compiler::Node Node;
+ CodeStubAssembler assembler(state);
+
+ Node* receiver = assembler.Parameter(Descriptor::kReceiver);
+ Node* name = assembler.Parameter(Descriptor::kName);
+ Node* value = assembler.Parameter(Descriptor::kValue);
+ Node* slot = assembler.Parameter(Descriptor::kSlot);
+ Node* vector = assembler.Parameter(Descriptor::kVector);
+ Node* context = assembler.Parameter(Descriptor::kContext);
-void StoreElementStub::Generate(MacroAssembler* masm) {
- DCHECK_EQ(DICTIONARY_ELEMENTS, elements_kind());
- KeyedStoreIC::GenerateSlow(masm);
+ assembler.TailCallRuntime(Runtime::kKeyedStoreIC_Slow, context, value, slot,
+ vector, receiver, name);
}
void StoreFastElementStub::GenerateAssembly(
@@ -1998,15 +2108,12 @@ bool ToBooleanICStub::UpdateStatus(Handle<Object> object) {
new_hints |= ToBooleanHint::kHeapNumber;
double value = HeapNumber::cast(*object)->value();
to_boolean_value = value != 0 && !std::isnan(value);
- } else if (object->IsSimd128Value()) {
- new_hints |= ToBooleanHint::kSimdValue;
- to_boolean_value = true;
} else {
// We should never see an internal object at runtime here!
UNREACHABLE();
to_boolean_value = true;
}
- TraceTransition(old_hints, new_hints);
+
set_sub_minor_key(HintsBits::update(sub_minor_key(), new_hints));
return to_boolean_value;
}
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index 0a23062989..fca830c2af 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -8,6 +8,8 @@
#include "src/allocation.h"
#include "src/assembler.h"
#include "src/codegen.h"
+#include "src/factory.h"
+#include "src/find-and-replace-pattern.h"
#include "src/globals.h"
#include "src/ic/ic-state.h"
#include "src/interface-descriptors.h"
@@ -38,20 +40,15 @@ class Node;
V(CEntry) \
V(CompareIC) \
V(DoubleToI) \
- V(FunctionPrototype) \
V(InternalArrayConstructor) \
V(JSEntry) \
- V(LoadIndexedString) \
V(MathPow) \
V(ProfileEntryHook) \
V(RecordWrite) \
V(RegExpExec) \
V(StoreBufferOverflow) \
- V(StoreElement) \
+ V(StoreSlowElement) \
V(SubString) \
- V(FastNewRestParameter) \
- V(FastNewSloppyArguments) \
- V(FastNewStrictArguments) \
V(NameDictionaryLookup) \
/* This can be removed once there are no */ \
/* more deopting Hydrogen stubs. */ \
@@ -71,16 +68,6 @@ class Node;
V(TransitionElementsKind) \
/* --- TurboFanCodeStubs --- */ \
V(AllocateHeapNumber) \
- V(AllocateFloat32x4) \
- V(AllocateInt32x4) \
- V(AllocateUint32x4) \
- V(AllocateBool32x4) \
- V(AllocateInt16x8) \
- V(AllocateUint16x8) \
- V(AllocateBool16x8) \
- V(AllocateInt8x16) \
- V(AllocateUint8x16) \
- V(AllocateBool8x16) \
V(ArrayNoArgumentConstructor) \
V(ArraySingleArgumentConstructor) \
V(ArrayNArgumentsConstructor) \
@@ -102,13 +89,10 @@ class Node;
V(NumberToString) \
V(StringAdd) \
V(GetProperty) \
- V(LoadICProtoArray) \
V(StoreFastElement) \
V(StoreGlobal) \
V(StoreInterceptor) \
- V(LoadApiGetter) \
V(LoadIndexedInterceptor) \
- V(LoadField) \
V(GrowArrayElements)
// List of code stubs only used on ARM 32 bits platforms.
@@ -193,7 +177,7 @@ class CodeStub BASE_EMBEDDED {
Handle<Code> GetCode();
// Retrieve the code for the stub, make and return a copy of the code.
- Handle<Code> GetCodeCopy(const Code::FindAndReplacePattern& pattern);
+ Handle<Code> GetCodeCopy(const FindAndReplacePattern& pattern);
static Major MajorKeyFromKey(uint32_t key) {
return static_cast<Major>(MajorKeyBits::decode(key));
@@ -751,69 +735,6 @@ class NumberToStringStub final : public TurboFanCodeStub {
DEFINE_TURBOFAN_CODE_STUB(NumberToString, TurboFanCodeStub);
};
-// TODO(turbofan): This stub should be possible to write in TurboFan
-// using the CodeStubAssembler very soon in a way that is as efficient
-// and easy as the current handwritten version, which is partly a copy
-// of the strict arguments object materialization code.
-class FastNewRestParameterStub final : public PlatformCodeStub {
- public:
- explicit FastNewRestParameterStub(Isolate* isolate,
- bool skip_stub_frame = false)
- : PlatformCodeStub(isolate) {
- minor_key_ = SkipStubFrameBits::encode(skip_stub_frame);
- }
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewRestParameter);
- DEFINE_PLATFORM_CODE_STUB(FastNewRestParameter, PlatformCodeStub);
-
- int skip_stub_frame() const { return SkipStubFrameBits::decode(minor_key_); }
-
- private:
- class SkipStubFrameBits : public BitField<bool, 0, 1> {};
-};
-
-
-// TODO(turbofan): This stub should be possible to write in TurboFan
-// using the CodeStubAssembler very soon in a way that is as efficient
-// and easy as the current handwritten version.
-class FastNewSloppyArgumentsStub final : public PlatformCodeStub {
- public:
- explicit FastNewSloppyArgumentsStub(Isolate* isolate,
- bool skip_stub_frame = false)
- : PlatformCodeStub(isolate) {
- minor_key_ = SkipStubFrameBits::encode(skip_stub_frame);
- }
-
- int skip_stub_frame() const { return SkipStubFrameBits::decode(minor_key_); }
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewSloppyArguments);
- DEFINE_PLATFORM_CODE_STUB(FastNewSloppyArguments, PlatformCodeStub);
-
- private:
- class SkipStubFrameBits : public BitField<bool, 0, 1> {};
-};
-
-
-// TODO(turbofan): This stub should be possible to write in TurboFan
-// using the CodeStubAssembler very soon in a way that is as efficient
-// and easy as the current handwritten version.
-class FastNewStrictArgumentsStub final : public PlatformCodeStub {
- public:
- explicit FastNewStrictArgumentsStub(Isolate* isolate,
- bool skip_stub_frame = false)
- : PlatformCodeStub(isolate) {
- minor_key_ = SkipStubFrameBits::encode(skip_stub_frame);
- }
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewStrictArguments);
- DEFINE_PLATFORM_CODE_STUB(FastNewStrictArguments, PlatformCodeStub);
-
- int skip_stub_frame() const { return SkipStubFrameBits::decode(minor_key_); }
-
- private:
- class SkipStubFrameBits : public BitField<bool, 0, 1> {};
-};
-
class CreateAllocationSiteStub : public TurboFanCodeStub {
public:
explicit CreateAllocationSiteStub(Isolate* isolate)
@@ -867,7 +788,7 @@ class ArrayConstructorStub: public PlatformCodeStub {
void GenerateDispatchToArrayStub(MacroAssembler* masm,
AllocationSiteOverrideMode mode);
- DEFINE_CALL_INTERFACE_DESCRIPTOR(ArrayNArgumentsConstructor);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(ArrayConstructor);
DEFINE_PLATFORM_CODE_STUB(ArrayConstructor, PlatformCodeStub);
};
@@ -915,98 +836,31 @@ class MathPowStub: public PlatformCodeStub {
DEFINE_PLATFORM_CODE_STUB(MathPow, PlatformCodeStub);
};
-
-class CallICStub: public PlatformCodeStub {
+class CallICStub : public TurboFanCodeStub {
public:
- CallICStub(Isolate* isolate, const CallICState& state)
- : PlatformCodeStub(isolate) {
- minor_key_ = state.GetExtraICState();
- }
-
- Code::Kind GetCodeKind() const override { return Code::CALL_IC; }
-
- ExtraICState GetExtraICState() const final {
- return static_cast<ExtraICState>(minor_key_);
+ CallICStub(Isolate* isolate, ConvertReceiverMode convert_mode,
+ TailCallMode tail_call_mode)
+ : TurboFanCodeStub(isolate) {
+ minor_key_ = ConvertModeBits::encode(convert_mode) |
+ TailCallModeBits::encode(tail_call_mode);
}
protected:
- ConvertReceiverMode convert_mode() const { return state().convert_mode(); }
- TailCallMode tail_call_mode() const { return state().tail_call_mode(); }
-
- CallICState state() const { return CallICState(GetExtraICState()); }
-
- // Code generation helpers.
- void GenerateMiss(MacroAssembler* masm);
- void HandleArrayCase(MacroAssembler* masm, Label* miss);
-
- private:
- void PrintState(std::ostream& os) const override; // NOLINT
+ typedef BitField<ConvertReceiverMode, 0, 2> ConvertModeBits;
+ typedef BitField<TailCallMode, ConvertModeBits::kNext, 1> TailCallModeBits;
- DEFINE_CALL_INTERFACE_DESCRIPTOR(CallFunctionWithFeedbackAndVector);
- DEFINE_PLATFORM_CODE_STUB(CallIC, PlatformCodeStub);
-};
-
-
-// TODO(verwaest): Translate to hydrogen code stub.
-class FunctionPrototypeStub : public PlatformCodeStub {
- public:
- explicit FunctionPrototypeStub(Isolate* isolate)
- : PlatformCodeStub(isolate) {}
-
- Code::Kind GetCodeKind() const override { return Code::HANDLER; }
- ExtraICState GetExtraICState() const override { return Code::LOAD_IC; }
-
- // TODO(mvstanton): only the receiver register is accessed. When this is
- // translated to a hydrogen code stub, a new CallInterfaceDescriptor
- // should be created that just uses that register for more efficient code.
- CallInterfaceDescriptor GetCallInterfaceDescriptor() const override {
- return LoadWithVectorDescriptor(isolate());
+ ConvertReceiverMode convert_mode() const {
+ return ConvertModeBits::decode(minor_key_);
+ }
+ TailCallMode tail_call_mode() const {
+ return TailCallModeBits::decode(minor_key_);
}
-
- DEFINE_PLATFORM_CODE_STUB(FunctionPrototype, PlatformCodeStub);
-};
-
-
-class LoadIndexedStringStub : public PlatformCodeStub {
- public:
- explicit LoadIndexedStringStub(Isolate* isolate)
- : PlatformCodeStub(isolate) {}
-
- Code::Kind GetCodeKind() const override { return Code::HANDLER; }
- ExtraICState GetExtraICState() const override { return Code::KEYED_LOAD_IC; }
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
- DEFINE_PLATFORM_CODE_STUB(LoadIndexedString, PlatformCodeStub);
-};
-
-
-class HandlerStub : public HydrogenCodeStub {
- public:
- Code::Kind GetCodeKind() const override { return Code::HANDLER; }
- ExtraICState GetExtraICState() const override { return kind(); }
-
- void InitializeDescriptor(CodeStubDescriptor* descriptor) override;
-
- CallInterfaceDescriptor GetCallInterfaceDescriptor() const override;
-
- protected:
- explicit HandlerStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
-
- virtual Code::Kind kind() const = 0;
-
- DEFINE_CODE_STUB_BASE(HandlerStub, HydrogenCodeStub);
-};
-
-class LoadFieldStub : public TurboFanCodeStub {
- public:
- explicit LoadFieldStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
- Code::Kind GetCodeKind() const override { return Code::HANDLER; }
- ExtraICState GetExtraICState() const override { return GetCodeKind(); }
private:
- DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadField);
- DEFINE_TURBOFAN_CODE_STUB(LoadField, TurboFanCodeStub);
+ void PrintState(std::ostream& os) const final; // NOLINT
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(CallIC);
+ DEFINE_TURBOFAN_CODE_STUB(CallIC, TurboFanCodeStub);
};
class KeyedLoadSloppyArgumentsStub : public TurboFanCodeStub {
@@ -1041,33 +895,6 @@ class KeyedStoreSloppyArgumentsStub : public TurboFanCodeStub {
DEFINE_TURBOFAN_CODE_STUB(KeyedStoreSloppyArguments, TurboFanCodeStub);
};
-class LoadApiGetterStub : public TurboFanCodeStub {
- public:
- LoadApiGetterStub(Isolate* isolate, bool receiver_is_holder, int index)
- : TurboFanCodeStub(isolate) {
- // If that's not true, we need to ensure that the receiver is actually a
- // JSReceiver. http://crbug.com/609134
- DCHECK(receiver_is_holder);
- minor_key_ = IndexBits::encode(index) |
- ReceiverIsHolderBits::encode(receiver_is_holder);
- }
-
- Code::Kind GetCodeKind() const override { return Code::HANDLER; }
- ExtraICState GetExtraICState() const override { return Code::LOAD_IC; }
-
- int index() const { return IndexBits::decode(minor_key_); }
- bool receiver_is_holder() const {
- return ReceiverIsHolderBits::decode(minor_key_);
- }
-
- private:
- class ReceiverIsHolderBits : public BitField<bool, 0, 1> {};
- class IndexBits : public BitField<int, 1, kDescriptorIndexBitCount> {};
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
- DEFINE_TURBOFAN_CODE_STUB(LoadApiGetter, TurboFanCodeStub);
-};
-
class StoreGlobalStub : public TurboFanCodeStub {
public:
StoreGlobalStub(Isolate* isolate, PropertyCellType type,
@@ -1094,7 +921,7 @@ class StoreGlobalStub : public TurboFanCodeStub {
Handle<Code> GetCodeCopyFromTemplate(Handle<JSGlobalObject> global,
Handle<PropertyCell> cell) {
- Code::FindAndReplacePattern pattern;
+ FindAndReplacePattern pattern;
if (check_global()) {
pattern.Add(handle(global_map_placeholder(isolate())->map()),
Map::WeakCellForMap(Handle<Map>(global->map())));
@@ -1228,7 +1055,7 @@ class BinaryOpICWithAllocationSiteStub final : public PlatformCodeStub {
static void GenerateAheadOfTime(Isolate* isolate);
Handle<Code> GetCodeCopyFromTemplate(Handle<AllocationSite> allocation_site) {
- Code::FindAndReplacePattern pattern;
+ FindAndReplacePattern pattern;
pattern.Add(isolate()->factory()->undefined_map(), allocation_site);
return CodeStub::GetCodeCopy(pattern);
}
@@ -1513,13 +1340,6 @@ class StringCharCodeAtGenerator {
void GenerateSlow(MacroAssembler* masm, EmbedMode embed_mode,
const RuntimeCallHelper& call_helper);
- // Skip handling slow case and directly jump to bailout.
- void SkipSlow(MacroAssembler* masm, Label* bailout) {
- masm->bind(&index_not_smi_);
- masm->bind(&call_runtime_);
- masm->jmp(bailout);
- }
-
private:
Register object_;
Register index_;
@@ -1539,138 +1359,31 @@ class StringCharCodeAtGenerator {
DISALLOW_COPY_AND_ASSIGN(StringCharCodeAtGenerator);
};
-
-// Generates code for creating a one-char string from a char code.
-class StringCharFromCodeGenerator {
- public:
- StringCharFromCodeGenerator(Register code,
- Register result)
- : code_(code),
- result_(result) {
- DCHECK(!code_.is(result_));
- }
-
- // Generates the fast case code. On the fallthrough path |result|
- // register contains the result.
- void GenerateFast(MacroAssembler* masm);
-
- // Generates the slow case code. Must not be naturally
- // reachable. Expected to be put after a ret instruction (e.g., in
- // deferred code). Always jumps back to the fast case.
- void GenerateSlow(MacroAssembler* masm,
- const RuntimeCallHelper& call_helper);
-
- // Skip handling slow case and directly jump to bailout.
- void SkipSlow(MacroAssembler* masm, Label* bailout) {
- masm->bind(&slow_case_);
- masm->jmp(bailout);
- }
-
- private:
- Register code_;
- Register result_;
-
- Label slow_case_;
- Label exit_;
-
- DISALLOW_COPY_AND_ASSIGN(StringCharFromCodeGenerator);
-};
-
-
-// Generates code implementing String.prototype.charAt.
-//
-// Only supports the case when the receiver is a string and the index
-// is a number (smi or heap number) that is a valid index into the
-// string. Additional index constraints are specified by the
-// flags. Otherwise, bails out to the provided labels.
-//
-// Register usage: |object| may be changed to another string in a way
-// that doesn't affect charCodeAt/charAt semantics, |index| is
-// preserved, |scratch1|, |scratch2|, and |result| are clobbered.
-class StringCharAtGenerator {
- public:
- StringCharAtGenerator(Register object, Register index, Register scratch,
- Register result, Label* receiver_not_string,
- Label* index_not_number, Label* index_out_of_range,
- ReceiverCheckMode check_mode = RECEIVER_IS_UNKNOWN)
- : char_code_at_generator_(object, index, scratch, receiver_not_string,
- index_not_number, index_out_of_range,
- check_mode),
- char_from_code_generator_(scratch, result) {}
-
- // Generates the fast case code. On the fallthrough path |result|
- // register contains the result.
- void GenerateFast(MacroAssembler* masm) {
- char_code_at_generator_.GenerateFast(masm);
- char_from_code_generator_.GenerateFast(masm);
- }
-
- // Generates the slow case code. Must not be naturally
- // reachable. Expected to be put after a ret instruction (e.g., in
- // deferred code). Always jumps back to the fast case.
- void GenerateSlow(MacroAssembler* masm, EmbedMode embed_mode,
- const RuntimeCallHelper& call_helper) {
- char_code_at_generator_.GenerateSlow(masm, embed_mode, call_helper);
- char_from_code_generator_.GenerateSlow(masm, call_helper);
- }
-
- // Skip handling slow case and directly jump to bailout.
- void SkipSlow(MacroAssembler* masm, Label* bailout) {
- char_code_at_generator_.SkipSlow(masm, bailout);
- char_from_code_generator_.SkipSlow(masm, bailout);
- }
-
- private:
- StringCharCodeAtGenerator char_code_at_generator_;
- StringCharFromCodeGenerator char_from_code_generator_;
-
- DISALLOW_COPY_AND_ASSIGN(StringCharAtGenerator);
-};
-
-class CallICTrampolineStub : public PlatformCodeStub {
+class CallICTrampolineStub : public TurboFanCodeStub {
public:
- CallICTrampolineStub(Isolate* isolate, const CallICState& state)
- : PlatformCodeStub(isolate) {
- minor_key_ = state.GetExtraICState();
- }
-
- Code::Kind GetCodeKind() const override { return Code::CALL_IC; }
-
- ExtraICState GetExtraICState() const final {
- return static_cast<ExtraICState>(minor_key_);
+ CallICTrampolineStub(Isolate* isolate, ConvertReceiverMode convert_mode,
+ TailCallMode tail_call_mode)
+ : TurboFanCodeStub(isolate) {
+ minor_key_ = ConvertModeBits::encode(convert_mode) |
+ TailCallModeBits::encode(tail_call_mode);
}
protected:
- CallICState state() const {
- return CallICState(static_cast<ExtraICState>(minor_key_));
- }
+ typedef BitField<ConvertReceiverMode, 0, 2> ConvertModeBits;
+ typedef BitField<TailCallMode, ConvertModeBits::kNext, 1> TailCallModeBits;
- DEFINE_CALL_INTERFACE_DESCRIPTOR(CallFunctionWithFeedback);
- DEFINE_PLATFORM_CODE_STUB(CallICTrampoline, PlatformCodeStub);
-};
-
-class LoadICProtoArrayStub : public TurboFanCodeStub {
- public:
- explicit LoadICProtoArrayStub(Isolate* isolate,
- bool throw_reference_error_if_nonexistent)
- : TurboFanCodeStub(isolate) {
- minor_key_ = ThrowReferenceErrorIfNonexistentBits::encode(
- throw_reference_error_if_nonexistent);
+ ConvertReceiverMode convert_mode() const {
+ return ConvertModeBits::decode(minor_key_);
}
-
- bool throw_reference_error_if_nonexistent() const {
- return ThrowReferenceErrorIfNonexistentBits::decode(minor_key_);
- }
-
- ExtraICState GetExtraICState() const final {
- return static_cast<ExtraICState>(minor_key_);
+ TailCallMode tail_call_mode() const {
+ return TailCallModeBits::decode(minor_key_);
}
private:
- class ThrowReferenceErrorIfNonexistentBits : public BitField<bool, 0, 1> {};
+ void PrintState(std::ostream& os) const override; // NOLINT
- DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadICProtoArray);
- DEFINE_TURBOFAN_CODE_STUB(LoadICProtoArray, TurboFanCodeStub);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(CallICTrampoline);
+ DEFINE_TURBOFAN_CODE_STUB(CallICTrampoline, TurboFanCodeStub);
};
class DoubleToIStub : public PlatformCodeStub {
@@ -1847,21 +1560,6 @@ class AllocateHeapNumberStub : public TurboFanCodeStub {
DEFINE_TURBOFAN_CODE_STUB(AllocateHeapNumber, TurboFanCodeStub);
};
-#define SIMD128_ALLOC_STUB(TYPE, Type, type, lane_count, lane_type) \
- class Allocate##Type##Stub : public TurboFanCodeStub { \
- public: \
- explicit Allocate##Type##Stub(Isolate* isolate) \
- : TurboFanCodeStub(isolate) {} \
- \
- void InitializeDescriptor(CodeStubDescriptor* descriptor) override; \
- void GenerateAssembly(compiler::CodeAssemblerState* state) const override; \
- \
- DEFINE_CALL_INTERFACE_DESCRIPTOR(Allocate##Type); \
- DEFINE_CODE_STUB(Allocate##Type, TurboFanCodeStub); \
- };
-SIMD128_TYPES(SIMD128_ALLOC_STUB)
-#undef SIMD128_ALLOC_STUB
-
class CommonArrayConstructorStub : public TurboFanCodeStub {
protected:
CommonArrayConstructorStub(Isolate* isolate, ElementsKind kind,
@@ -1983,31 +1681,19 @@ class ArrayNArgumentsConstructorStub : public PlatformCodeStub {
DEFINE_PLATFORM_CODE_STUB(ArrayNArgumentsConstructor, PlatformCodeStub);
};
-class StoreElementStub : public PlatformCodeStub {
+class StoreSlowElementStub : public TurboFanCodeStub {
public:
- StoreElementStub(Isolate* isolate, ElementsKind elements_kind,
- KeyedAccessStoreMode mode)
- : PlatformCodeStub(isolate) {
- // TODO(jkummerow): Rename this stub to StoreSlowElementStub,
- // drop elements_kind parameter.
- DCHECK_EQ(DICTIONARY_ELEMENTS, elements_kind);
- minor_key_ = ElementsKindBits::encode(elements_kind) |
- CommonStoreModeBits::encode(mode);
+ StoreSlowElementStub(Isolate* isolate, KeyedAccessStoreMode mode)
+ : TurboFanCodeStub(isolate) {
+ minor_key_ = CommonStoreModeBits::encode(mode);
}
Code::Kind GetCodeKind() const override { return Code::HANDLER; }
ExtraICState GetExtraICState() const override { return Code::KEYED_STORE_IC; }
private:
- ElementsKind elements_kind() const {
- return ElementsKindBits::decode(minor_key_);
- }
-
- class ElementsKindBits
- : public BitField<ElementsKind, CommonStoreModeBits::kNext, 8> {};
-
DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
- DEFINE_PLATFORM_CODE_STUB(StoreElement, PlatformCodeStub);
+ DEFINE_TURBOFAN_CODE_STUB(StoreSlowElement, TurboFanCodeStub);
};
class ToBooleanICStub : public HydrogenCodeStub {
@@ -2045,7 +1731,7 @@ class ToBooleanICStub : public HydrogenCodeStub {
ToBooleanICStub(Isolate* isolate, InitializationState init_state)
: HydrogenCodeStub(isolate, init_state) {}
- static const int kNumHints = 9;
+ static const int kNumHints = 8;
STATIC_ASSERT(static_cast<int>(ToBooleanHint::kAny) ==
((1 << kNumHints) - 1));
class HintsBits : public BitField<uint16_t, 0, kNumHints> {};
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index 54350698af..11837e97ba 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -13,8 +13,10 @@
#include "src/ast/prettyprinter.h"
#include "src/bootstrapper.h"
#include "src/compilation-info.h"
+#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/eh-frame.h"
+#include "src/objects-inl.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -132,8 +134,6 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
info->prologue_offset(), info->is_debug() && !is_crankshafted);
isolate->counters()->total_compiled_code_size()->Increment(
code->instruction_size());
- isolate->heap()->IncrementCodeGeneratedBytes(is_crankshafted,
- code->instruction_size());
return code;
}
@@ -239,7 +239,8 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
? FLAG_print_builtin_code
: (FLAG_print_code || (info->IsStub() && FLAG_print_code_stubs) ||
(info->IsOptimizing() && FLAG_print_opt_code &&
- info->shared_info()->PassesFilter(FLAG_print_opt_code_filter)));
+ info->shared_info()->PassesFilter(FLAG_print_opt_code_filter)) ||
+ (info->IsWasm() && FLAG_print_wasm_code));
if (print_code) {
std::unique_ptr<char[]> debug_name = info->GetDebugName();
CodeTracer::Scope tracing_scope(info->isolate()->GetCodeTracer());
diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc
index af9fbb5734..8b2e51e76a 100644
--- a/deps/v8/src/compilation-cache.cc
+++ b/deps/v8/src/compilation-cache.cc
@@ -19,12 +19,11 @@ static const int kRegExpGenerations = 2;
// Initial size of each compilation cache table allocated.
static const int kInitialCacheSize = 64;
-
CompilationCache::CompilationCache(Isolate* isolate)
: isolate_(isolate),
- script_(isolate, 1),
- eval_global_(isolate, 1),
- eval_contextual_(isolate, 1),
+ script_(isolate),
+ eval_global_(isolate),
+ eval_contextual_(isolate),
reg_exp_(isolate, kRegExpGenerations),
enabled_(true) {
CompilationSubCache* subcaches[kSubCacheCount] =
@@ -103,11 +102,8 @@ void CompilationSubCache::Remove(Handle<SharedFunctionInfo> function_info) {
}
}
-
-CompilationCacheScript::CompilationCacheScript(Isolate* isolate,
- int generations)
- : CompilationSubCache(isolate, generations) {}
-
+CompilationCacheScript::CompilationCacheScript(Isolate* isolate)
+ : CompilationSubCache(isolate, 1) {}
// We only re-use a cached function for some script source code if the
// script originates from the same place. This is to avoid issues
@@ -141,29 +137,31 @@ bool CompilationCacheScript::HasOrigin(Handle<SharedFunctionInfo> function_info,
// be cached in the same script generation. Currently the first use
// will be cached, but subsequent code from different source / line
// won't.
-Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(
+InfoVectorPair CompilationCacheScript::Lookup(
Handle<String> source, Handle<Object> name, int line_offset,
int column_offset, ScriptOriginOptions resource_options,
Handle<Context> context, LanguageMode language_mode) {
- Object* result = NULL;
- int generation;
+ InfoVectorPair result;
// Probe the script generation tables. Make sure not to leak handles
// into the caller's handle scope.
{ HandleScope scope(isolate());
- for (generation = 0; generation < generations(); generation++) {
- Handle<CompilationCacheTable> table = GetTable(generation);
- Handle<Object> probe = table->Lookup(source, context, language_mode);
- if (probe->IsSharedFunctionInfo()) {
- Handle<SharedFunctionInfo> function_info =
- Handle<SharedFunctionInfo>::cast(probe);
- // Break when we've found a suitable shared function info that
- // matches the origin.
- if (HasOrigin(function_info, name, line_offset, column_offset,
- resource_options)) {
- result = *function_info;
- break;
- }
+ const int generation = 0;
+ DCHECK(generations() == 1);
+ Handle<CompilationCacheTable> table = GetTable(generation);
+ InfoVectorPair probe = table->LookupScript(source, context, language_mode);
+ if (probe.has_shared()) {
+ Handle<SharedFunctionInfo> function_info(probe.shared(), isolate());
+ Handle<Cell> vector_handle;
+ if (probe.has_vector()) {
+ vector_handle = Handle<Cell>(probe.vector(), isolate());
+ }
+ // Break when we've found a suitable shared function info that
+ // matches the origin.
+ if (HasOrigin(function_info, name, line_offset, column_offset,
+ resource_options)) {
+ result = InfoVectorPair(*function_info,
+ probe.has_vector() ? *vector_handle : nullptr);
}
}
}
@@ -171,72 +169,60 @@ Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(
// Once outside the manacles of the handle scope, we need to recheck
// to see if we actually found a cached script. If so, we return a
// handle created in the caller's handle scope.
- if (result != NULL) {
- Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(result),
- isolate());
+ if (result.has_shared()) {
+ Handle<SharedFunctionInfo> shared(result.shared(), isolate());
+ // TODO(mvstanton): Make sure HasOrigin can't allocate, or it will
+ // mess up our InfoVectorPair.
DCHECK(
HasOrigin(shared, name, line_offset, column_offset, resource_options));
- // If the script was found in a later generation, we promote it to
- // the first generation to let it survive longer in the cache.
- if (generation != 0) Put(source, context, language_mode, shared);
isolate()->counters()->compilation_cache_hits()->Increment();
- return shared;
} else {
isolate()->counters()->compilation_cache_misses()->Increment();
- return Handle<SharedFunctionInfo>::null();
}
+ return result;
}
-
-void CompilationCacheScript::Put(Handle<String> source,
- Handle<Context> context,
+void CompilationCacheScript::Put(Handle<String> source, Handle<Context> context,
LanguageMode language_mode,
- Handle<SharedFunctionInfo> function_info) {
+ Handle<SharedFunctionInfo> function_info,
+ Handle<Cell> literals) {
HandleScope scope(isolate());
Handle<CompilationCacheTable> table = GetFirstTable();
- SetFirstTable(CompilationCacheTable::Put(table, source, context,
- language_mode, function_info));
+ SetFirstTable(CompilationCacheTable::PutScript(
+ table, source, context, language_mode, function_info, literals));
}
-
-MaybeHandle<SharedFunctionInfo> CompilationCacheEval::Lookup(
+InfoVectorPair CompilationCacheEval::Lookup(
Handle<String> source, Handle<SharedFunctionInfo> outer_info,
- LanguageMode language_mode, int scope_position) {
+ Handle<Context> native_context, LanguageMode language_mode, int position) {
HandleScope scope(isolate());
// Make sure not to leak the table into the surrounding handle
// scope. Otherwise, we risk keeping old tables around even after
// having cleared the cache.
- Handle<Object> result = isolate()->factory()->undefined_value();
- int generation;
- for (generation = 0; generation < generations(); generation++) {
- Handle<CompilationCacheTable> table = GetTable(generation);
- result =
- table->LookupEval(source, outer_info, language_mode, scope_position);
- if (result->IsSharedFunctionInfo()) break;
- }
- if (result->IsSharedFunctionInfo()) {
- Handle<SharedFunctionInfo> function_info =
- Handle<SharedFunctionInfo>::cast(result);
- if (generation != 0) {
- Put(source, outer_info, function_info, scope_position);
- }
+ InfoVectorPair result;
+ const int generation = 0;
+ DCHECK(generations() == 1);
+ Handle<CompilationCacheTable> table = GetTable(generation);
+ result = table->LookupEval(source, outer_info, native_context, language_mode,
+ position);
+ if (result.has_shared()) {
isolate()->counters()->compilation_cache_hits()->Increment();
- return scope.CloseAndEscape(function_info);
} else {
isolate()->counters()->compilation_cache_misses()->Increment();
- return MaybeHandle<SharedFunctionInfo>();
}
+ return result;
}
-
void CompilationCacheEval::Put(Handle<String> source,
Handle<SharedFunctionInfo> outer_info,
Handle<SharedFunctionInfo> function_info,
- int scope_position) {
+ Handle<Context> native_context,
+ Handle<Cell> literals, int position) {
HandleScope scope(isolate());
Handle<CompilationCacheTable> table = GetFirstTable();
- table = CompilationCacheTable::PutEval(table, source, outer_info,
- function_info, scope_position);
+ table =
+ CompilationCacheTable::PutEval(table, source, outer_info, function_info,
+ native_context, literals, position);
SetFirstTable(table);
}
@@ -286,32 +272,33 @@ void CompilationCache::Remove(Handle<SharedFunctionInfo> function_info) {
script_.Remove(function_info);
}
-
-MaybeHandle<SharedFunctionInfo> CompilationCache::LookupScript(
+InfoVectorPair CompilationCache::LookupScript(
Handle<String> source, Handle<Object> name, int line_offset,
int column_offset, ScriptOriginOptions resource_options,
Handle<Context> context, LanguageMode language_mode) {
- if (!IsEnabled()) return MaybeHandle<SharedFunctionInfo>();
+ InfoVectorPair empty_result;
+ if (!IsEnabled()) return empty_result;
return script_.Lookup(source, name, line_offset, column_offset,
resource_options, context, language_mode);
}
-
-MaybeHandle<SharedFunctionInfo> CompilationCache::LookupEval(
+InfoVectorPair CompilationCache::LookupEval(
Handle<String> source, Handle<SharedFunctionInfo> outer_info,
- Handle<Context> context, LanguageMode language_mode, int scope_position) {
- if (!IsEnabled()) return MaybeHandle<SharedFunctionInfo>();
+ Handle<Context> context, LanguageMode language_mode, int position) {
+ InfoVectorPair result;
+ if (!IsEnabled()) return result;
- MaybeHandle<SharedFunctionInfo> result;
if (context->IsNativeContext()) {
- result =
- eval_global_.Lookup(source, outer_info, language_mode, scope_position);
+ result = eval_global_.Lookup(source, outer_info, context, language_mode,
+ position);
} else {
- DCHECK(scope_position != kNoSourcePosition);
- result = eval_contextual_.Lookup(source, outer_info, language_mode,
- scope_position);
+ DCHECK(position != kNoSourcePosition);
+ Handle<Context> native_context(context->native_context(), isolate());
+ result = eval_contextual_.Lookup(source, outer_info, native_context,
+ language_mode, position);
}
+
return result;
}
@@ -323,30 +310,31 @@ MaybeHandle<FixedArray> CompilationCache::LookupRegExp(Handle<String> source,
return reg_exp_.Lookup(source, flags);
}
-
-void CompilationCache::PutScript(Handle<String> source,
- Handle<Context> context,
+void CompilationCache::PutScript(Handle<String> source, Handle<Context> context,
LanguageMode language_mode,
- Handle<SharedFunctionInfo> function_info) {
+ Handle<SharedFunctionInfo> function_info,
+ Handle<Cell> literals) {
if (!IsEnabled()) return;
- script_.Put(source, context, language_mode, function_info);
+ script_.Put(source, context, language_mode, function_info, literals);
}
-
void CompilationCache::PutEval(Handle<String> source,
Handle<SharedFunctionInfo> outer_info,
Handle<Context> context,
Handle<SharedFunctionInfo> function_info,
- int scope_position) {
+ Handle<Cell> literals, int position) {
if (!IsEnabled()) return;
HandleScope scope(isolate());
if (context->IsNativeContext()) {
- eval_global_.Put(source, outer_info, function_info, scope_position);
+ eval_global_.Put(source, outer_info, function_info, context, literals,
+ position);
} else {
- DCHECK(scope_position != kNoSourcePosition);
- eval_contextual_.Put(source, outer_info, function_info, scope_position);
+ DCHECK(position != kNoSourcePosition);
+ Handle<Context> native_context(context->native_context(), isolate());
+ eval_contextual_.Put(source, outer_info, function_info, native_context,
+ literals, position);
}
}
diff --git a/deps/v8/src/compilation-cache.h b/deps/v8/src/compilation-cache.h
index 973673c524..229fe07d0d 100644
--- a/deps/v8/src/compilation-cache.h
+++ b/deps/v8/src/compilation-cache.h
@@ -6,12 +6,14 @@
#define V8_COMPILATION_CACHE_H_
#include "src/allocation.h"
-#include "src/handles.h"
#include "src/objects.h"
namespace v8 {
namespace internal {
+template <typename T>
+class Handle;
+
// The compilation cache consists of several generational sub-caches which uses
// this class as a base class. A sub-cache contains a compilation cache tables
// for each generation of the sub-cache. Since the same source code string has
@@ -74,17 +76,16 @@ class CompilationSubCache {
// Sub-cache for scripts.
class CompilationCacheScript : public CompilationSubCache {
public:
- CompilationCacheScript(Isolate* isolate, int generations);
+ explicit CompilationCacheScript(Isolate* isolate);
- Handle<SharedFunctionInfo> Lookup(Handle<String> source, Handle<Object> name,
- int line_offset, int column_offset,
- ScriptOriginOptions resource_options,
- Handle<Context> context,
- LanguageMode language_mode);
- void Put(Handle<String> source,
- Handle<Context> context,
- LanguageMode language_mode,
- Handle<SharedFunctionInfo> function_info);
+ InfoVectorPair Lookup(Handle<String> source, Handle<Object> name,
+ int line_offset, int column_offset,
+ ScriptOriginOptions resource_options,
+ Handle<Context> context, LanguageMode language_mode);
+
+ void Put(Handle<String> source, Handle<Context> context,
+ LanguageMode language_mode, Handle<SharedFunctionInfo> function_info,
+ Handle<Cell> literals);
private:
bool HasOrigin(Handle<SharedFunctionInfo> function_info, Handle<Object> name,
@@ -109,16 +110,17 @@ class CompilationCacheScript : public CompilationSubCache {
// 4. The start position of the calling scope.
class CompilationCacheEval: public CompilationSubCache {
public:
- CompilationCacheEval(Isolate* isolate, int generations)
- : CompilationSubCache(isolate, generations) { }
+ explicit CompilationCacheEval(Isolate* isolate)
+ : CompilationSubCache(isolate, 1) {}
- MaybeHandle<SharedFunctionInfo> Lookup(Handle<String> source,
- Handle<SharedFunctionInfo> outer_info,
- LanguageMode language_mode,
- int scope_position);
+ InfoVectorPair Lookup(Handle<String> source,
+ Handle<SharedFunctionInfo> outer_info,
+ Handle<Context> native_context,
+ LanguageMode language_mode, int position);
void Put(Handle<String> source, Handle<SharedFunctionInfo> outer_info,
- Handle<SharedFunctionInfo> function_info, int scope_position);
+ Handle<SharedFunctionInfo> function_info,
+ Handle<Context> native_context, Handle<Cell> literals, int position);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheEval);
@@ -140,7 +142,6 @@ class CompilationCacheRegExp: public CompilationSubCache {
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheRegExp);
};
-
// The compilation cache keeps shared function infos for compiled
// scripts and evals. The shared function infos are looked up using
// the source string as the key. For regular expressions the
@@ -150,17 +151,19 @@ class CompilationCache {
// Finds the script shared function info for a source
// string. Returns an empty handle if the cache doesn't contain a
// script for the given source string with the right origin.
- MaybeHandle<SharedFunctionInfo> LookupScript(
- Handle<String> source, Handle<Object> name, int line_offset,
- int column_offset, ScriptOriginOptions resource_options,
- Handle<Context> context, LanguageMode language_mode);
+ InfoVectorPair LookupScript(Handle<String> source, Handle<Object> name,
+ int line_offset, int column_offset,
+ ScriptOriginOptions resource_options,
+ Handle<Context> context,
+ LanguageMode language_mode);
// Finds the shared function info for a source string for eval in a
// given context. Returns an empty handle if the cache doesn't
// contain a script for the given source string.
- MaybeHandle<SharedFunctionInfo> LookupEval(
- Handle<String> source, Handle<SharedFunctionInfo> outer_info,
- Handle<Context> context, LanguageMode language_mode, int scope_position);
+ InfoVectorPair LookupEval(Handle<String> source,
+ Handle<SharedFunctionInfo> outer_info,
+ Handle<Context> context, LanguageMode language_mode,
+ int position);
// Returns the regexp data associated with the given regexp if it
// is in cache, otherwise an empty handle.
@@ -169,16 +172,17 @@ class CompilationCache {
// Associate the (source, kind) pair to the shared function
// info. This may overwrite an existing mapping.
- void PutScript(Handle<String> source,
- Handle<Context> context,
+ void PutScript(Handle<String> source, Handle<Context> context,
LanguageMode language_mode,
- Handle<SharedFunctionInfo> function_info);
+ Handle<SharedFunctionInfo> function_info,
+ Handle<Cell> literals);
// Associate the (source, context->closure()->shared(), kind) triple
// with the shared function info. This may overwrite an existing mapping.
void PutEval(Handle<String> source, Handle<SharedFunctionInfo> outer_info,
Handle<Context> context,
- Handle<SharedFunctionInfo> function_info, int scope_position);
+ Handle<SharedFunctionInfo> function_info, Handle<Cell> literals,
+ int position);
// Associate the (source, flags) pair to the given regexp data.
// This may overwrite an existing mapping.
diff --git a/deps/v8/src/compilation-info.cc b/deps/v8/src/compilation-info.cc
index 0a9ce310a7..b0dda6c7cd 100644
--- a/deps/v8/src/compilation-info.cc
+++ b/deps/v8/src/compilation-info.cc
@@ -9,6 +9,7 @@
#include "src/ast/scopes.h"
#include "src/debug/debug.h"
#include "src/isolate.h"
+#include "src/objects-inl.h"
#include "src/parsing/parse-info.h"
#include "src/source-position.h"
@@ -52,10 +53,10 @@ bool CompilationInfo::has_shared_info() const {
return parse_info_ && !parse_info_->shared_info().is_null();
}
-CompilationInfo::CompilationInfo(ParseInfo* parse_info,
+CompilationInfo::CompilationInfo(Zone* zone, ParseInfo* parse_info,
Handle<JSFunction> closure)
: CompilationInfo(parse_info, {}, Code::ComputeFlags(Code::FUNCTION), BASE,
- parse_info->isolate(), parse_info->zone()) {
+ parse_info->isolate(), zone) {
closure_ = closure;
// Compiling for the snapshot typically results in different code than
@@ -107,7 +108,6 @@ CompilationInfo::~CompilationInfo() {
shared_info()->DisableOptimization(bailout_reason());
}
dependencies()->Rollback();
- delete deferred_handles_;
}
int CompilationInfo::num_parameters() const {
@@ -131,8 +131,21 @@ bool CompilationInfo::ShouldSelfOptimize() {
!shared_info()->optimization_disabled();
}
+void CompilationInfo::set_deferred_handles(
+ std::shared_ptr<DeferredHandles> deferred_handles) {
+ DCHECK(deferred_handles_.get() == nullptr);
+ deferred_handles_.swap(deferred_handles);
+}
+
+void CompilationInfo::set_deferred_handles(DeferredHandles* deferred_handles) {
+ DCHECK(deferred_handles_.get() == nullptr);
+ deferred_handles_.reset(deferred_handles);
+}
+
void CompilationInfo::ReopenHandlesInNewHandleScope() {
- closure_ = Handle<JSFunction>(*closure_);
+ if (!closure_.is_null()) {
+ closure_ = Handle<JSFunction>(*closure_);
+ }
}
bool CompilationInfo::has_simple_parameters() {
diff --git a/deps/v8/src/compilation-info.h b/deps/v8/src/compilation-info.h
index 863183b5cd..a3938d2985 100644
--- a/deps/v8/src/compilation-info.h
+++ b/deps/v8/src/compilation-info.h
@@ -9,6 +9,7 @@
#include "src/compilation-dependencies.h"
#include "src/frames.h"
+#include "src/globals.h"
#include "src/handles.h"
#include "src/objects.h"
#include "src/source-position-table.h"
@@ -28,7 +29,7 @@ class Zone;
// CompilationInfo encapsulates some information known at compile time. It
// is constructed based on the resources available at compile-time.
-class CompilationInfo final {
+class V8_EXPORT_PRIVATE CompilationInfo final {
public:
// Various configuration flags for a compilation, as well as some properties
// of the compiled code produced by a compilation.
@@ -52,7 +53,8 @@ class CompilationInfo final {
kLoopPeelingEnabled = 1 << 16,
};
- CompilationInfo(ParseInfo* parse_info, Handle<JSFunction> closure);
+ CompilationInfo(Zone* zone, ParseInfo* parse_info,
+ Handle<JSFunction> closure);
CompilationInfo(Vector<const char> debug_name, Isolate* isolate, Zone* zone,
Code::Flags code_flags);
~CompilationInfo();
@@ -208,6 +210,7 @@ class CompilationInfo final {
// Accessors for the different compilation modes.
bool IsOptimizing() const { return mode_ == OPTIMIZE; }
bool IsStub() const { return mode_ == STUB; }
+ bool IsWasm() const { return output_code_kind() == Code::WASM_FUNCTION; }
void SetOptimizing();
void SetOptimizingForOsr(BailoutId osr_ast_id, JavaScriptFrame* osr_frame) {
SetOptimizing();
@@ -230,9 +233,10 @@ class CompilationInfo final {
// Determines whether or not to insert a self-optimization header.
bool ShouldSelfOptimize();
- void set_deferred_handles(DeferredHandles* deferred_handles) {
- DCHECK(deferred_handles_ == NULL);
- deferred_handles_ = deferred_handles;
+ void set_deferred_handles(std::shared_ptr<DeferredHandles> deferred_handles);
+ void set_deferred_handles(DeferredHandles* deferred_handles);
+ std::shared_ptr<DeferredHandles> deferred_handles() {
+ return deferred_handles_;
}
void ReopenHandlesInNewHandleScope();
@@ -362,7 +366,7 @@ class CompilationInfo final {
// CompilationInfo allocates.
Zone* zone_;
- DeferredHandles* deferred_handles_;
+ std::shared_ptr<DeferredHandles> deferred_handles_;
// Dependencies for this compilation, e.g. stable maps.
CompilationDependencies dependencies_;
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc
index fdb975a5e4..56d166f578 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc
@@ -17,7 +17,6 @@
#include "src/parsing/scanner-character-streams.h"
#include "src/unicode-cache.h"
#include "src/utils.h"
-#include "src/zone/zone.h"
namespace v8 {
namespace internal {
@@ -66,20 +65,54 @@ CompilerDispatcherJob::CompilerDispatcherJob(Isolate* isolate,
CompilerDispatcherTracer* tracer,
Handle<SharedFunctionInfo> shared,
size_t max_stack_size)
- : isolate_(isolate),
+ : status_(CompileJobStatus::kInitial),
+ isolate_(isolate),
tracer_(tracer),
+ context_(Handle<Context>::cast(
+ isolate_->global_handles()->Create(isolate->context()))),
shared_(Handle<SharedFunctionInfo>::cast(
isolate_->global_handles()->Create(*shared))),
max_stack_size_(max_stack_size),
trace_compiler_dispatcher_jobs_(FLAG_trace_compiler_dispatcher_jobs) {
+ DCHECK(!shared_->is_toplevel());
HandleScope scope(isolate_);
- DCHECK(!shared_->outer_scope_info()->IsTheHole(isolate_));
Handle<Script> script(Script::cast(shared_->script()), isolate_);
Handle<String> source(String::cast(script->source()), isolate_);
if (trace_compiler_dispatcher_jobs_) {
PrintF("CompilerDispatcherJob[%p] created for ", static_cast<void*>(this));
shared_->ShortPrint();
- PrintF("\n");
+ PrintF(" in initial state.\n");
+ }
+}
+
+CompilerDispatcherJob::CompilerDispatcherJob(
+ Isolate* isolate, CompilerDispatcherTracer* tracer, Handle<Script> script,
+ Handle<SharedFunctionInfo> shared, FunctionLiteral* literal,
+ std::shared_ptr<Zone> parse_zone,
+ std::shared_ptr<DeferredHandles> parse_handles,
+ std::shared_ptr<DeferredHandles> compile_handles, size_t max_stack_size)
+ : status_(CompileJobStatus::kAnalyzed),
+ isolate_(isolate),
+ tracer_(tracer),
+ context_(Handle<Context>::cast(
+ isolate_->global_handles()->Create(isolate->context()))),
+ shared_(Handle<SharedFunctionInfo>::cast(
+ isolate_->global_handles()->Create(*shared))),
+ max_stack_size_(max_stack_size),
+ parse_info_(new ParseInfo(shared_)),
+ parse_zone_(parse_zone),
+ compile_info_(new CompilationInfo(parse_info_->zone(), parse_info_.get(),
+ Handle<JSFunction>::null())),
+ trace_compiler_dispatcher_jobs_(FLAG_trace_compiler_dispatcher_jobs) {
+ parse_info_->set_literal(literal);
+ parse_info_->set_script(script);
+ parse_info_->set_deferred_handles(parse_handles);
+ compile_info_->set_deferred_handles(compile_handles);
+
+ if (trace_compiler_dispatcher_jobs_) {
+ PrintF("CompilerDispatcherJob[%p] created for ", static_cast<void*>(this));
+ shared_->ShortPrint();
+ PrintF(" in Analyzed state.\n");
}
}
@@ -88,6 +121,7 @@ CompilerDispatcherJob::~CompilerDispatcherJob() {
DCHECK(status_ == CompileJobStatus::kInitial ||
status_ == CompileJobStatus::kDone);
i::GlobalHandles::Destroy(Handle<Object>::cast(shared_).location());
+ i::GlobalHandles::Destroy(Handle<Object>::cast(context_).location());
}
bool CompilerDispatcherJob::IsAssociatedWith(
@@ -105,11 +139,11 @@ void CompilerDispatcherJob::PrepareToParseOnMainThread() {
}
HandleScope scope(isolate_);
unicode_cache_.reset(new UnicodeCache());
- zone_.reset(new Zone(isolate_->allocator(), ZONE_NAME));
Handle<Script> script(Script::cast(shared_->script()), isolate_);
DCHECK(script->type() != Script::TYPE_NATIVE);
Handle<String> source(String::cast(script->source()), isolate_);
+ parse_info_.reset(new ParseInfo(isolate_->allocator()));
if (source->IsExternalTwoByteString() || source->IsExternalOneByteString()) {
character_stream_.reset(ScannerStream::For(
source, shared_->start_position(), shared_->end_position()));
@@ -140,7 +174,7 @@ void CompilerDispatcherJob::PrepareToParseOnMainThread() {
offset = shared_->start_position();
int byte_len = length * (source->IsOneByteRepresentation() ? 1 : 2);
- data = zone_->New(byte_len);
+ data = parse_info_->zone()->New(byte_len);
DisallowHeapAllocation no_allocation;
String::FlatContent content = source->GetFlatContent();
@@ -178,7 +212,6 @@ void CompilerDispatcherJob::PrepareToParseOnMainThread() {
ScannerStream::For(wrapper_, shared_->start_position() - offset,
shared_->end_position() - offset));
}
- parse_info_.reset(new ParseInfo(zone_.get()));
parse_info_->set_isolate(isolate_);
parse_info_->set_character_stream(character_stream_.get());
parse_info_->set_hash_seed(isolate_->heap()->HashSeed());
@@ -191,12 +224,12 @@ void CompilerDispatcherJob::PrepareToParseOnMainThread() {
parse_info_->set_function_literal_id(shared_->function_literal_id());
parser_.reset(new Parser(parse_info_.get()));
- Handle<ScopeInfo> outer_scope_info(
- handle(ScopeInfo::cast(shared_->outer_scope_info())));
- parser_->DeserializeScopeChain(parse_info_.get(),
- outer_scope_info->length() > 0
- ? MaybeHandle<ScopeInfo>(outer_scope_info)
- : MaybeHandle<ScopeInfo>());
+ MaybeHandle<ScopeInfo> outer_scope_info;
+ if (!shared_->outer_scope_info()->IsTheHole(isolate_) &&
+ ScopeInfo::cast(shared_->outer_scope_info())->length() > 0) {
+ outer_scope_info = handle(ScopeInfo::cast(shared_->outer_scope_info()));
+ }
+ parser_->DeserializeScopeChain(parse_info_.get(), outer_scope_info);
Handle<String> name(String::cast(shared_->name()));
parse_info_->set_function_name(
@@ -249,27 +282,30 @@ bool CompilerDispatcherJob::FinalizeParsingOnMainThread() {
wrapper_ = Handle<String>::null();
}
+ Handle<Script> script(Script::cast(shared_->script()), isolate_);
+ parse_info_->set_script(script);
if (parse_info_->literal() == nullptr) {
+ parser_->ReportErrors(isolate_, script);
status_ = CompileJobStatus::kFailed;
} else {
- status_ = CompileJobStatus::kReadyToAnalyse;
+ status_ = CompileJobStatus::kReadyToAnalyze;
}
+ parser_->UpdateStatistics(isolate_, script);
DeferredHandleScope scope(isolate_);
{
- Handle<Script> script(Script::cast(shared_->script()), isolate_);
+ parse_info_->ReopenHandlesInNewHandleScope();
- parse_info_->set_script(script);
- Handle<ScopeInfo> outer_scope_info(
- handle(ScopeInfo::cast(shared_->outer_scope_info())));
- if (outer_scope_info->length() > 0) {
+ if (!shared_->outer_scope_info()->IsTheHole(isolate_) &&
+ ScopeInfo::cast(shared_->outer_scope_info())->length() > 0) {
+ Handle<ScopeInfo> outer_scope_info(
+ handle(ScopeInfo::cast(shared_->outer_scope_info())));
parse_info_->set_outer_scope_info(outer_scope_info);
}
parse_info_->set_shared_info(shared_);
- // Do the parsing tasks which need to be done on the main thread. This
- // will also handle parse errors.
- parser_->Internalize(isolate_, script, parse_info_->literal() == nullptr);
+ // Internalize ast values on the main thread.
+ parse_info_->ast_value_factory()->Internalize(isolate_);
parser_->HandleSourceURLComments(isolate_, script);
parse_info_->set_character_stream(nullptr);
@@ -278,30 +314,43 @@ bool CompilerDispatcherJob::FinalizeParsingOnMainThread() {
unicode_cache_.reset();
character_stream_.reset();
}
- handles_from_parsing_.reset(scope.Detach());
+ parse_info_->set_deferred_handles(scope.Detach());
return status_ != CompileJobStatus::kFailed;
}
-bool CompilerDispatcherJob::PrepareToCompileOnMainThread() {
+bool CompilerDispatcherJob::AnalyzeOnMainThread() {
DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
- DCHECK(status() == CompileJobStatus::kReadyToAnalyse);
- COMPILER_DISPATCHER_TRACE_SCOPE(tracer_, kPrepareToCompile);
+ DCHECK(status() == CompileJobStatus::kReadyToAnalyze);
+ COMPILER_DISPATCHER_TRACE_SCOPE(tracer_, kAnalyze);
if (trace_compiler_dispatcher_jobs_) {
- PrintF("CompilerDispatcherJob[%p]: Preparing to compile\n",
- static_cast<void*>(this));
+ PrintF("CompilerDispatcherJob[%p]: Analyzing\n", static_cast<void*>(this));
}
- compile_info_.reset(
- new CompilationInfo(parse_info_.get(), Handle<JSFunction>::null()));
+ compile_info_.reset(new CompilationInfo(
+ parse_info_->zone(), parse_info_.get(), Handle<JSFunction>::null()));
DeferredHandleScope scope(isolate_);
- if (Compiler::Analyze(parse_info_.get())) {
- compile_job_.reset(
- Compiler::PrepareUnoptimizedCompilationJob(compile_info_.get()));
+ {
+ if (Compiler::Analyze(parse_info_.get())) {
+ status_ = CompileJobStatus::kAnalyzed;
+ } else {
+ status_ = CompileJobStatus::kFailed;
+ if (!isolate_->has_pending_exception()) isolate_->StackOverflow();
+ }
}
compile_info_->set_deferred_handles(scope.Detach());
+ return status_ != CompileJobStatus::kFailed;
+}
+
+bool CompilerDispatcherJob::PrepareToCompileOnMainThread() {
+ DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
+ DCHECK(status() == CompileJobStatus::kAnalyzed);
+ COMPILER_DISPATCHER_TRACE_SCOPE(tracer_, kPrepareToCompile);
+
+ compile_job_.reset(
+ Compiler::PrepareUnoptimizedCompilationJob(compile_info_.get()));
if (!compile_job_.get()) {
if (!isolate_->has_pending_exception()) isolate_->StackOverflow();
status_ = CompileJobStatus::kFailed;
@@ -344,18 +393,20 @@ bool CompilerDispatcherJob::FinalizeCompilingOnMainThread() {
static_cast<void*>(this));
}
- if (compile_job_->state() == CompilationJob::State::kFailed ||
- !Compiler::FinalizeCompilationJob(compile_job_.release())) {
- if (!isolate_->has_pending_exception()) isolate_->StackOverflow();
- status_ = CompileJobStatus::kFailed;
- return false;
+ {
+ HandleScope scope(isolate_);
+ if (compile_job_->state() == CompilationJob::State::kFailed ||
+ !Compiler::FinalizeCompilationJob(compile_job_.release())) {
+ if (!isolate_->has_pending_exception()) isolate_->StackOverflow();
+ status_ = CompileJobStatus::kFailed;
+ return false;
+ }
}
- zone_.reset();
- parse_info_.reset();
- compile_info_.reset();
compile_job_.reset();
- handles_from_parsing_.reset();
+ compile_info_.reset();
+ parse_zone_.reset();
+ parse_info_.reset();
status_ = CompileJobStatus::kDone;
return true;
@@ -368,14 +419,13 @@ void CompilerDispatcherJob::ResetOnMainThread() {
PrintF("CompilerDispatcherJob[%p]: Resetting\n", static_cast<void*>(this));
}
+ compile_job_.reset();
+ compile_info_.reset();
+ parse_zone_.reset();
parser_.reset();
unicode_cache_.reset();
character_stream_.reset();
parse_info_.reset();
- handles_from_parsing_.reset();
- compile_info_.reset();
- compile_job_.reset();
- zone_.reset();
if (!source_.is_null()) {
i::GlobalHandles::Destroy(Handle<Object>::cast(source_).location());
@@ -401,7 +451,10 @@ double CompilerDispatcherJob::EstimateRuntimeOfNextStepInMs() const {
case CompileJobStatus::kParsed:
return tracer_->EstimateFinalizeParsingInMs();
- case CompileJobStatus::kReadyToAnalyse:
+ case CompileJobStatus::kReadyToAnalyze:
+ return tracer_->EstimateAnalyzeInMs();
+
+ case CompileJobStatus::kAnalyzed:
return tracer_->EstimatePrepareToCompileInMs();
case CompileJobStatus::kReadyToCompile:
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h
index e0a2677f8e..aea484729e 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h
@@ -16,9 +16,12 @@
namespace v8 {
namespace internal {
+class AstValueFactory;
class CompilerDispatcherTracer;
class CompilationInfo;
class CompilationJob;
+class DeferredHandles;
+class FunctionLiteral;
class Isolate;
class ParseInfo;
class Parser;
@@ -26,13 +29,13 @@ class SharedFunctionInfo;
class String;
class UnicodeCache;
class Utf16CharacterStream;
-class Zone;
enum class CompileJobStatus {
kInitial,
kReadyToParse,
kParsed,
- kReadyToAnalyse,
+ kReadyToAnalyze,
+ kAnalyzed,
kReadyToCompile,
kCompiled,
kFailed,
@@ -41,13 +44,25 @@ enum class CompileJobStatus {
class V8_EXPORT_PRIVATE CompilerDispatcherJob {
public:
+ // Creates a CompilerDispatcherJob in the initial state.
CompilerDispatcherJob(Isolate* isolate, CompilerDispatcherTracer* tracer,
Handle<SharedFunctionInfo> shared,
size_t max_stack_size);
+ // Creates a CompilerDispatcherJob in the analyzed state.
+ CompilerDispatcherJob(Isolate* isolate, CompilerDispatcherTracer* tracer,
+ Handle<Script> script,
+ Handle<SharedFunctionInfo> shared,
+ FunctionLiteral* literal,
+ std::shared_ptr<Zone> parse_zone,
+ std::shared_ptr<DeferredHandles> parse_handles,
+ std::shared_ptr<DeferredHandles> compile_handles,
+ size_t max_stack_size);
~CompilerDispatcherJob();
CompileJobStatus status() const { return status_; }
+ Context* context() { return *context_; }
+
// Returns true if this CompilerDispatcherJob was created for the given
// function.
bool IsAssociatedWith(Handle<SharedFunctionInfo> shared) const;
@@ -58,11 +73,15 @@ class V8_EXPORT_PRIVATE CompilerDispatcherJob {
// Transition from kReadyToParse to kParsed.
void Parse();
- // Transition from kParsed to kReadyToAnalyse (or kFailed). Returns false
+ // Transition from kParsed to kReadyToAnalyze (or kFailed). Returns false
// when transitioning to kFailed. In that case, an exception is pending.
bool FinalizeParsingOnMainThread();
- // Transition from kReadyToAnalyse to kReadyToCompile (or kFailed). Returns
+ // Transition from kReadyToAnalyze to kAnalyzed (or kFailed). Returns
+ // false when transitioning to kFailed. In that case, an exception is pending.
+ bool AnalyzeOnMainThread();
+
+ // Transition from kAnalyzed to kReadyToCompile (or kFailed). Returns
// false when transitioning to kFailed. In that case, an exception is pending.
bool PrepareToCompileOnMainThread();
@@ -86,9 +105,10 @@ class V8_EXPORT_PRIVATE CompilerDispatcherJob {
private:
FRIEND_TEST(CompilerDispatcherJobTest, ScopeChain);
- CompileJobStatus status_ = CompileJobStatus::kInitial;
+ CompileJobStatus status_;
Isolate* isolate_;
CompilerDispatcherTracer* tracer_;
+ Handle<Context> context_; // Global handle.
Handle<SharedFunctionInfo> shared_; // Global handle.
Handle<String> source_; // Global handle.
Handle<String> wrapper_; // Global handle.
@@ -97,11 +117,12 @@ class V8_EXPORT_PRIVATE CompilerDispatcherJob {
// Members required for parsing.
std::unique_ptr<UnicodeCache> unicode_cache_;
- std::unique_ptr<Zone> zone_;
std::unique_ptr<Utf16CharacterStream> character_stream_;
std::unique_ptr<ParseInfo> parse_info_;
std::unique_ptr<Parser> parser_;
- std::unique_ptr<DeferredHandles> handles_from_parsing_;
+
+ // Members required for compiling a parsed function.
+ std::shared_ptr<Zone> parse_zone_;
// Members required for compiling.
std::unique_ptr<CompilationInfo> compile_info_;
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.cc b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.cc
index 0703e016e9..d98209b147 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.cc
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.cc
@@ -39,6 +39,9 @@ CompilerDispatcherTracer::Scope::~Scope() {
case ScopeID::kFinalizeParsing:
tracer_->RecordFinalizeParsing(elapsed);
break;
+ case ScopeID::kAnalyze:
+ tracer_->RecordAnalyze(elapsed);
+ break;
case ScopeID::kPrepareToCompile:
tracer_->RecordPrepareToCompile(elapsed);
break;
@@ -60,6 +63,8 @@ const char* CompilerDispatcherTracer::Scope::Name(ScopeID scope_id) {
return "V8.BackgroundCompile_Parse";
case ScopeID::kFinalizeParsing:
return "V8.BackgroundCompile_FinalizeParsing";
+ case ScopeID::kAnalyze:
+ return "V8.BackgroundCompile_Analyze";
case ScopeID::kPrepareToCompile:
return "V8.BackgroundCompile_PrepareToCompile";
case ScopeID::kCompile:
@@ -97,6 +102,11 @@ void CompilerDispatcherTracer::RecordFinalizeParsing(double duration_ms) {
finalize_parsing_events_.Push(duration_ms);
}
+void CompilerDispatcherTracer::RecordAnalyze(double duration_ms) {
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ analyze_events_.Push(duration_ms);
+}
+
void CompilerDispatcherTracer::RecordPrepareToCompile(double duration_ms) {
base::LockGuard<base::Mutex> lock(&mutex_);
prepare_compile_events_.Push(duration_ms);
@@ -128,6 +138,11 @@ double CompilerDispatcherTracer::EstimateFinalizeParsingInMs() const {
return Average(finalize_parsing_events_);
}
+double CompilerDispatcherTracer::EstimateAnalyzeInMs() const {
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ return Average(analyze_events_);
+}
+
double CompilerDispatcherTracer::EstimatePrepareToCompileInMs() const {
base::LockGuard<base::Mutex> lock(&mutex_);
return Average(prepare_compile_events_);
@@ -148,11 +163,12 @@ void CompilerDispatcherTracer::DumpStatistics() const {
PrintF(
"CompilerDispatcherTracer: "
"prepare_parsing=%.2lfms parsing=%.2lfms/kb finalize_parsing=%.2lfms "
- "prepare_compiling=%.2lfms compiling=%.2lfms/kb "
- "finalize_compilig=%.2lfms\n",
+ "analyze=%.2lfms prepare_compiling=%.2lfms compiling=%.2lfms/kb "
+ "finalize_compiling=%.2lfms\n",
EstimatePrepareToParseInMs(), EstimateParseInMs(1 * KB),
- EstimateFinalizeParsingInMs(), EstimatePrepareToCompileInMs(),
- EstimateCompileInMs(1 * KB), EstimateFinalizeCompilingInMs());
+ EstimateFinalizeParsingInMs(), EstimateAnalyzeInMs(),
+ EstimatePrepareToCompileInMs(), EstimateCompileInMs(1 * KB),
+ EstimateFinalizeCompilingInMs());
}
double CompilerDispatcherTracer::Average(
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.h b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.h
index 3751d0da54..7bbd5d9d60 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.h
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.h
@@ -35,6 +35,7 @@ class V8_EXPORT_PRIVATE CompilerDispatcherTracer {
kPrepareToParse,
kParse,
kFinalizeParsing,
+ kAnalyze,
kPrepareToCompile,
kCompile,
kFinalizeCompiling
@@ -62,6 +63,7 @@ class V8_EXPORT_PRIVATE CompilerDispatcherTracer {
void RecordPrepareToParse(double duration_ms);
void RecordParse(double duration_ms, size_t source_length);
void RecordFinalizeParsing(double duration_ms);
+ void RecordAnalyze(double duration_ms);
void RecordPrepareToCompile(double duration_ms);
void RecordCompile(double duration_ms, size_t ast_size_in_bytes);
void RecordFinalizeCompiling(double duration_ms);
@@ -69,6 +71,7 @@ class V8_EXPORT_PRIVATE CompilerDispatcherTracer {
double EstimatePrepareToParseInMs() const;
double EstimateParseInMs(size_t source_length) const;
double EstimateFinalizeParsingInMs() const;
+ double EstimateAnalyzeInMs() const;
double EstimatePrepareToCompileInMs() const;
double EstimateCompileInMs(size_t ast_size_in_bytes) const;
double EstimateFinalizeCompilingInMs() const;
@@ -84,6 +87,7 @@ class V8_EXPORT_PRIVATE CompilerDispatcherTracer {
base::RingBuffer<double> prepare_parse_events_;
base::RingBuffer<std::pair<size_t, double>> parse_events_;
base::RingBuffer<double> finalize_parsing_events_;
+ base::RingBuffer<double> analyze_events_;
base::RingBuffer<double> prepare_compile_events_;
base::RingBuffer<std::pair<size_t, double>> compile_events_;
base::RingBuffer<double> finalize_compiling_events_;
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
index 70edce9673..802142b883 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
@@ -8,6 +8,7 @@
#include "include/v8.h"
#include "src/base/platform/time.h"
#include "src/cancelable-task.h"
+#include "src/compilation-info.h"
#include "src/compiler-dispatcher/compiler-dispatcher-job.h"
#include "src/compiler-dispatcher/compiler-dispatcher-tracer.h"
#include "src/flags.h"
@@ -23,6 +24,13 @@ enum class ExceptionHandling { kSwallow, kThrow };
bool DoNextStepOnMainThread(Isolate* isolate, CompilerDispatcherJob* job,
ExceptionHandling exception_handling) {
DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.CompilerDispatcherForgroundStep");
+
+ // Ensure we are in the correct context for the job.
+ SaveContext save(isolate);
+ isolate->set_context(job->context());
+
switch (job->status()) {
case CompileJobStatus::kInitial:
job->PrepareToParseOnMainThread();
@@ -36,7 +44,11 @@ bool DoNextStepOnMainThread(Isolate* isolate, CompilerDispatcherJob* job,
job->FinalizeParsingOnMainThread();
break;
- case CompileJobStatus::kReadyToAnalyse:
+ case CompileJobStatus::kReadyToAnalyze:
+ job->AnalyzeOnMainThread();
+ break;
+
+ case CompileJobStatus::kAnalyzed:
job->PrepareToCompileOnMainThread();
break;
@@ -74,6 +86,9 @@ bool CanRunOnAnyThread(CompilerDispatcherJob* job) {
void DoNextStepOnBackgroundThread(CompilerDispatcherJob* job) {
DCHECK(CanRunOnAnyThread(job));
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.CompilerDispatcherBackgroundStep");
+
switch (job->status()) {
case CompileJobStatus::kReadyToParse:
job->Parse();
@@ -224,7 +239,7 @@ CompilerDispatcher::~CompilerDispatcher() {
task_manager_->CancelAndWait();
}
-bool CompilerDispatcher::Enqueue(Handle<SharedFunctionInfo> function) {
+bool CompilerDispatcher::CanEnqueue(Handle<SharedFunctionInfo> function) {
if (!IsEnabled()) return false;
DCHECK(FLAG_ignition);
@@ -245,12 +260,19 @@ bool CompilerDispatcher::Enqueue(Handle<SharedFunctionInfo> function) {
return false;
}
+ return true;
+}
+
+bool CompilerDispatcher::Enqueue(Handle<SharedFunctionInfo> function) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.CompilerDispatcherEnqueue");
+ if (!CanEnqueue(function)) return false;
if (IsEnqueued(function)) return true;
if (trace_compiler_dispatcher_) {
PrintF("CompilerDispatcher: enqueuing ");
function->ShortPrint();
- PrintF("\n");
+ PrintF(" for parse and compile\n");
}
std::unique_ptr<CompilerDispatcherJob> job(new CompilerDispatcherJob(
@@ -263,6 +285,9 @@ bool CompilerDispatcher::Enqueue(Handle<SharedFunctionInfo> function) {
}
bool CompilerDispatcher::EnqueueAndStep(Handle<SharedFunctionInfo> function) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.CompilerDispatcherEnqueueAndStep");
+ if (IsEnqueued(function)) return true;
if (!Enqueue(function)) return false;
if (trace_compiler_dispatcher_) {
@@ -277,17 +302,71 @@ bool CompilerDispatcher::EnqueueAndStep(Handle<SharedFunctionInfo> function) {
return true;
}
-bool CompilerDispatcher::IsEnabled() const {
- v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
- return FLAG_compiler_dispatcher && platform_->IdleTasksEnabled(v8_isolate);
+bool CompilerDispatcher::Enqueue(
+ Handle<Script> script, Handle<SharedFunctionInfo> function,
+ FunctionLiteral* literal, std::shared_ptr<Zone> parse_zone,
+ std::shared_ptr<DeferredHandles> parse_handles,
+ std::shared_ptr<DeferredHandles> compile_handles) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.CompilerDispatcherEnqueue");
+ if (!CanEnqueue(function)) return false;
+ if (IsEnqueued(function)) return true;
+
+ if (trace_compiler_dispatcher_) {
+ PrintF("CompilerDispatcher: enqueuing ");
+ function->ShortPrint();
+ PrintF(" for compile\n");
+ }
+
+ std::unique_ptr<CompilerDispatcherJob> job(new CompilerDispatcherJob(
+ isolate_, tracer_.get(), script, function, literal, parse_zone,
+ parse_handles, compile_handles, max_stack_size_));
+ std::pair<int, int> key(Script::cast(function->script())->id(),
+ function->function_literal_id());
+ jobs_.insert(std::make_pair(key, std::move(job)));
+ ScheduleIdleTaskIfNeeded();
+ return true;
+}
+
+bool CompilerDispatcher::EnqueueAndStep(
+ Handle<Script> script, Handle<SharedFunctionInfo> function,
+ FunctionLiteral* literal, std::shared_ptr<Zone> parse_zone,
+ std::shared_ptr<DeferredHandles> parse_handles,
+ std::shared_ptr<DeferredHandles> compile_handles) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.CompilerDispatcherEnqueueAndStep");
+ if (IsEnqueued(function)) return true;
+ if (!Enqueue(script, function, literal, parse_zone, parse_handles,
+ compile_handles)) {
+ return false;
+ }
+
+ if (trace_compiler_dispatcher_) {
+ PrintF("CompilerDispatcher: stepping ");
+ function->ShortPrint();
+ PrintF("\n");
+ }
+ JobMap::const_iterator job = GetJobFor(function);
+ DoNextStepOnMainThread(isolate_, job->second.get(),
+ ExceptionHandling::kSwallow);
+ ConsiderJobForBackgroundProcessing(job->second.get());
+ return true;
}
+bool CompilerDispatcher::IsEnabled() const { return FLAG_compiler_dispatcher; }
+
bool CompilerDispatcher::IsEnqueued(Handle<SharedFunctionInfo> function) const {
+ if (jobs_.empty()) return false;
return GetJobFor(function) != jobs_.end();
}
void CompilerDispatcher::WaitForJobIfRunningOnBackground(
CompilerDispatcherJob* job) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.CompilerDispatcherWaitForBackgroundJob");
+ RuntimeCallTimerScope runtimeTimer(
+ isolate_, &RuntimeCallStats::CompileWaitForDispatcher);
+
base::LockGuard<base::Mutex> lock(&mutex_);
if (running_background_jobs_.find(job) == running_background_jobs_.end()) {
pending_background_jobs_.erase(job);
@@ -303,6 +382,8 @@ void CompilerDispatcher::WaitForJobIfRunningOnBackground(
}
bool CompilerDispatcher::FinishNow(Handle<SharedFunctionInfo> function) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.CompilerDispatcherFinishNow");
JobMap::const_iterator job = GetJobFor(function);
CHECK(job != jobs_.end());
@@ -479,6 +560,8 @@ void CompilerDispatcher::ConsiderJobForBackgroundProcessing(
}
void CompilerDispatcher::ScheduleMoreBackgroundTasksIfNeeded() {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.CompilerDispatcherScheduleMoreBackgroundTasksIfNeeded");
if (FLAG_single_threaded) return;
{
base::LockGuard<base::Mutex> lock(&mutex_);
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h
index 41d4c83d52..6347aa89d3 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h
@@ -28,8 +28,11 @@ namespace internal {
class CancelableTaskManager;
class CompilerDispatcherJob;
class CompilerDispatcherTracer;
+class DeferredHandles;
+class FunctionLiteral;
class Isolate;
class SharedFunctionInfo;
+class Zone;
template <typename T>
class Handle;
@@ -68,7 +71,10 @@ class V8_EXPORT_PRIVATE CompilerDispatcher {
size_t max_stack_size);
~CompilerDispatcher();
- // Returns true if a job was enqueued.
+ // Returns true if the compiler dispatcher is enabled.
+ bool IsEnabled() const;
+
+ // Enqueue a job for parse and compile. Returns true if a job was enqueued.
bool Enqueue(Handle<SharedFunctionInfo> function);
// Like Enqueue, but also advances the job so that it can potentially
@@ -76,11 +82,28 @@ class V8_EXPORT_PRIVATE CompilerDispatcher {
// true if the job was enqueued.
bool EnqueueAndStep(Handle<SharedFunctionInfo> function);
+ // Enqueue a job for compilation. Function must have already been parsed and
+ // analyzed and be ready for compilation. Returns true if a job was enqueued.
+ bool Enqueue(Handle<Script> script, Handle<SharedFunctionInfo> function,
+ FunctionLiteral* literal, std::shared_ptr<Zone> parse_zone,
+ std::shared_ptr<DeferredHandles> parse_handles,
+ std::shared_ptr<DeferredHandles> compile_handles);
+
+ // Like Enqueue, but also advances the job so that it can potentially
+ // continue running on a background thread (if at all possible). Returns
+ // true if the job was enqueued.
+ bool EnqueueAndStep(Handle<Script> script,
+ Handle<SharedFunctionInfo> function,
+ FunctionLiteral* literal,
+ std::shared_ptr<Zone> parse_zone,
+ std::shared_ptr<DeferredHandles> parse_handles,
+ std::shared_ptr<DeferredHandles> compile_handles);
+
// Returns true if there is a pending job for the given function.
bool IsEnqueued(Handle<SharedFunctionInfo> function) const;
// Blocks until the given function is compiled (and does so as fast as
- // possible). Returns true if the compile job was succesful.
+ // possible). Returns true if the compile job was successful.
bool FinishNow(Handle<SharedFunctionInfo> function);
// Aborts a given job. Blocks if requested.
@@ -95,6 +118,9 @@ class V8_EXPORT_PRIVATE CompilerDispatcher {
private:
FRIEND_TEST(CompilerDispatcherTest, EnqueueAndStep);
+ FRIEND_TEST(CompilerDispatcherTest, EnqueueAndStepTwice);
+ FRIEND_TEST(CompilerDispatcherTest, EnqueueParsed);
+ FRIEND_TEST(CompilerDispatcherTest, EnqueueAndStepParsed);
FRIEND_TEST(CompilerDispatcherTest, IdleTaskSmallIdleTime);
FRIEND_TEST(CompilerDispatcherTest, CompileOnBackgroundThread);
FRIEND_TEST(CompilerDispatcherTest, FinishNowWithBackgroundTask);
@@ -110,8 +136,8 @@ class V8_EXPORT_PRIVATE CompilerDispatcher {
class IdleTask;
void WaitForJobIfRunningOnBackground(CompilerDispatcherJob* job);
- bool IsEnabled() const;
void AbortInactiveJobs();
+ bool CanEnqueue(Handle<SharedFunctionInfo> function);
JobMap::const_iterator GetJobFor(Handle<SharedFunctionInfo> shared) const;
void ConsiderJobForBackgroundProcessing(CompilerDispatcherJob* job);
void ScheduleMoreBackgroundTasksIfNeeded();
diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
index 1169506384..04df928727 100644
--- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
@@ -9,6 +9,7 @@
#include "src/compiler.h"
#include "src/full-codegen/full-codegen.h"
#include "src/isolate.h"
+#include "src/objects-inl.h"
#include "src/tracing/trace-event.h"
#include "src/v8.h"
@@ -33,11 +34,11 @@ void DisposeCompilationJob(CompilationJob* job, bool restore_function_code) {
class OptimizingCompileDispatcher::CompileTask : public v8::Task {
public:
- explicit CompileTask(Isolate* isolate) : isolate_(isolate) {
- OptimizingCompileDispatcher* dispatcher =
- isolate_->optimizing_compile_dispatcher();
- base::LockGuard<base::Mutex> lock_guard(&dispatcher->ref_count_mutex_);
- ++dispatcher->ref_count_;
+ explicit CompileTask(Isolate* isolate,
+ OptimizingCompileDispatcher* dispatcher)
+ : isolate_(isolate), dispatcher_(dispatcher) {
+ base::LockGuard<base::Mutex> lock_guard(&dispatcher_->ref_count_mutex_);
+ ++dispatcher_->ref_count_;
}
virtual ~CompileTask() {}
@@ -49,30 +50,29 @@ class OptimizingCompileDispatcher::CompileTask : public v8::Task {
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
- OptimizingCompileDispatcher* dispatcher =
- isolate_->optimizing_compile_dispatcher();
{
TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.RecompileConcurrent");
- if (dispatcher->recompilation_delay_ != 0) {
+ if (dispatcher_->recompilation_delay_ != 0) {
base::OS::Sleep(base::TimeDelta::FromMilliseconds(
- dispatcher->recompilation_delay_));
+ dispatcher_->recompilation_delay_));
}
- dispatcher->CompileNext(dispatcher->NextInput(true));
+ dispatcher_->CompileNext(dispatcher_->NextInput(true));
}
{
- base::LockGuard<base::Mutex> lock_guard(&dispatcher->ref_count_mutex_);
- if (--dispatcher->ref_count_ == 0) {
- dispatcher->ref_count_zero_.NotifyOne();
+ base::LockGuard<base::Mutex> lock_guard(&dispatcher_->ref_count_mutex_);
+ if (--dispatcher_->ref_count_ == 0) {
+ dispatcher_->ref_count_zero_.NotifyOne();
}
}
}
Isolate* isolate_;
+ OptimizingCompileDispatcher* dispatcher_;
DISALLOW_COPY_AND_ASSIGN(CompileTask);
};
@@ -222,14 +222,14 @@ void OptimizingCompileDispatcher::QueueForOptimization(CompilationJob* job) {
blocked_jobs_++;
} else {
V8::GetCurrentPlatform()->CallOnBackgroundThread(
- new CompileTask(isolate_), v8::Platform::kShortRunningTask);
+ new CompileTask(isolate_, this), v8::Platform::kShortRunningTask);
}
}
void OptimizingCompileDispatcher::Unblock() {
while (blocked_jobs_ > 0) {
V8::GetCurrentPlatform()->CallOnBackgroundThread(
- new CompileTask(isolate_), v8::Platform::kShortRunningTask);
+ new CompileTask(isolate_, this), v8::Platform::kShortRunningTask);
blocked_jobs_--;
}
}
diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
index 7e08161517..5a9486d177 100644
--- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
@@ -12,6 +12,7 @@
#include "src/base/platform/mutex.h"
#include "src/base/platform/platform.h"
#include "src/flags.h"
+#include "src/globals.h"
#include "src/list.h"
namespace v8 {
@@ -20,7 +21,7 @@ namespace internal {
class CompilationJob;
class SharedFunctionInfo;
-class OptimizingCompileDispatcher {
+class V8_EXPORT_PRIVATE OptimizingCompileDispatcher {
public:
enum class BlockingBehavior { kBlock, kDontBlock };
@@ -38,9 +39,9 @@ class OptimizingCompileDispatcher {
~OptimizingCompileDispatcher();
- void Run();
void Stop();
void Flush(BlockingBehavior blocking_behavior);
+ // Takes ownership of |job|.
void QueueForOptimization(CompilationJob* job);
void Unblock();
void InstallOptimizedFunctions();
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index 5e6ae0106c..6767c75817 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -9,12 +9,14 @@
#include "src/asmjs/asm-js.h"
#include "src/asmjs/asm-typer.h"
+#include "src/assembler-inl.h"
#include "src/ast/ast-numbering.h"
#include "src/ast/prettyprinter.h"
#include "src/ast/scopes.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/compilation-cache.h"
+#include "src/compiler-dispatcher/compiler-dispatcher.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/compiler/pipeline.h"
#include "src/crankshaft/hydrogen.h"
@@ -38,6 +40,19 @@
namespace v8 {
namespace internal {
+// A wrapper around a ParseInfo that detaches the parser handles from the
+// underlying DeferredHandleScope and stores them in info_ on destruction.
+class ParseHandleScope final {
+ public:
+ explicit ParseHandleScope(ParseInfo* info)
+ : deferred_(info->isolate()), info_(info) {}
+ ~ParseHandleScope() { info_->set_deferred_handles(deferred_.Detach()); }
+
+ private:
+ DeferredHandleScope deferred_;
+ ParseInfo* info_;
+};
+
// A wrapper around a CompilationInfo that detaches the Handles from
// the underlying DeferredHandleScope and stores them in info_ on
// destruction.
@@ -313,18 +328,19 @@ void EnsureFeedbackMetadata(CompilationInfo* info) {
}
bool UseTurboFan(Handle<SharedFunctionInfo> shared) {
- bool optimization_disabled = shared->optimization_disabled();
+ if (shared->optimization_disabled()) {
+ return false;
+ }
+
bool must_use_ignition_turbo = shared->must_use_ignition_turbo();
// Check the enabling conditions for Turbofan.
// 1. "use asm" code.
- bool is_turbofanable_asm =
- FLAG_turbo_asm && shared->asm_function() && !optimization_disabled;
+ bool is_turbofanable_asm = FLAG_turbo_asm && shared->asm_function();
// 2. Fallback for features unsupported by Crankshaft.
bool is_unsupported_by_crankshaft_but_turbofanable =
- must_use_ignition_turbo && strcmp(FLAG_turbo_filter, "~~") == 0 &&
- !optimization_disabled;
+ must_use_ignition_turbo && strcmp(FLAG_turbo_filter, "~~") == 0;
// 3. Explicitly enabled by the command-line filter.
bool passes_turbo_filter = shared->PassesFilter(FLAG_turbo_filter);
@@ -333,10 +349,8 @@ bool UseTurboFan(Handle<SharedFunctionInfo> shared) {
passes_turbo_filter;
}
-bool ShouldUseIgnition(CompilationInfo* info) {
- DCHECK(info->has_shared_info());
- Handle<SharedFunctionInfo> shared = info->shared_info();
-
+bool ShouldUseIgnition(Handle<SharedFunctionInfo> shared,
+ bool marked_as_debug) {
// Code which can't be supported by the old pipeline should use Ignition.
if (shared->must_use_ignition_turbo()) return true;
@@ -355,7 +369,7 @@ bool ShouldUseIgnition(CompilationInfo* info) {
// When requesting debug code as a replacement for existing code, we provide
// the same kind as the existing code (to prevent implicit tier-change).
- if (info->is_debug() && shared->is_compiled()) {
+ if (marked_as_debug && shared->is_compiled()) {
return !shared->HasBaselineCode();
}
@@ -363,16 +377,29 @@ bool ShouldUseIgnition(CompilationInfo* info) {
if (UseTurboFan(shared)) return true;
// Only use Ignition for any other function if FLAG_ignition is true.
- if (!FLAG_ignition) return false;
+ return FLAG_ignition;
+}
- // Checks whether top level functions should be passed by the filter.
- if (shared->is_toplevel()) {
- Vector<const char> filter = CStrVector(FLAG_ignition_filter);
- return (filter.length() == 0) || (filter.length() == 1 && filter[0] == '*');
- }
+bool ShouldUseIgnition(CompilationInfo* info) {
+ DCHECK(info->has_shared_info());
+ return ShouldUseIgnition(info->shared_info(), info->is_debug());
+}
- // Finally respect the filter.
- return shared->PassesFilter(FLAG_ignition_filter);
+bool UseAsmWasm(DeclarationScope* scope, Handle<SharedFunctionInfo> shared_info,
+ bool is_debug) {
+ return FLAG_validate_asm && scope->asm_module() &&
+ !shared_info->is_asm_wasm_broken() && !is_debug;
+}
+
+bool UseCompilerDispatcher(Compiler::ConcurrencyMode inner_function_mode,
+ CompilerDispatcher* dispatcher,
+ DeclarationScope* scope,
+ Handle<SharedFunctionInfo> shared_info,
+ bool is_debug, bool will_serialize) {
+ return FLAG_compiler_dispatcher_eager_inner &&
+ inner_function_mode == Compiler::CONCURRENT &&
+ dispatcher->IsEnabled() && !is_debug && !will_serialize &&
+ !UseAsmWasm(scope, shared_info, is_debug);
}
CompilationJob* GetUnoptimizedCompilationJob(CompilationInfo* info) {
@@ -427,13 +454,30 @@ void InstallUnoptimizedCode(CompilationInfo* info) {
CompilationJob::Status FinalizeUnoptimizedCompilationJob(CompilationJob* job) {
CompilationJob::Status status = job->FinalizeJob();
if (status == CompilationJob::SUCCEEDED) {
- EnsureFeedbackMetadata(job->info());
- InstallUnoptimizedCode(job->info());
+ CompilationInfo* info = job->info();
+ EnsureFeedbackMetadata(info);
+ DCHECK(!info->code().is_null());
+ if (info->parse_info()->literal()->should_be_used_once_hint()) {
+ info->code()->MarkToBeExecutedOnce(info->isolate());
+ }
+ InstallUnoptimizedCode(info);
+ RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, info);
job->RecordUnoptimizedCompilationStats();
}
return status;
}
+void SetSharedFunctionFlagsFromLiteral(FunctionLiteral* literal,
+ Handle<SharedFunctionInfo> shared_info) {
+ shared_info->set_ast_node_count(literal->ast_node_count());
+ if (literal->dont_optimize_reason() != kNoReason) {
+ shared_info->DisableOptimization(literal->dont_optimize_reason());
+ }
+ if (literal->flags() & AstProperties::kMustUseIgnitionTurbo) {
+ shared_info->set_must_use_ignition_turbo(true);
+ }
+}
+
bool Renumber(ParseInfo* parse_info,
Compiler::EagerInnerFunctionLiterals* eager_literals) {
RuntimeCallTimerScope runtimeTimer(parse_info->isolate(),
@@ -443,23 +487,15 @@ bool Renumber(ParseInfo* parse_info,
parse_info->zone(), parse_info->literal(), eager_literals)) {
return false;
}
- Handle<SharedFunctionInfo> shared_info = parse_info->shared_info();
- if (!shared_info.is_null()) {
- FunctionLiteral* lit = parse_info->literal();
- shared_info->set_ast_node_count(lit->ast_node_count());
- if (lit->dont_optimize_reason() != kNoReason) {
- shared_info->DisableOptimization(lit->dont_optimize_reason());
- }
- if (lit->flags() & AstProperties::kMustUseIgnitionTurbo) {
- shared_info->set_must_use_ignition_turbo(true);
- }
+ if (!parse_info->shared_info().is_null()) {
+ SetSharedFunctionFlagsFromLiteral(parse_info->literal(),
+ parse_info->shared_info());
}
return true;
}
bool GenerateUnoptimizedCode(CompilationInfo* info) {
- if (FLAG_validate_asm && info->scope()->asm_module() &&
- !info->shared_info()->is_asm_wasm_broken() && !info->is_debug()) {
+ if (UseAsmWasm(info->scope(), info->shared_info(), info->is_debug())) {
EnsureFeedbackMetadata(info);
MaybeHandle<FixedArray> wasm_data;
wasm_data = AsmJs::CompileAsmViaWasm(info);
@@ -481,71 +517,111 @@ bool GenerateUnoptimizedCode(CompilationInfo* info) {
return true;
}
-bool CompileUnoptimizedInnerFunctionsRecursively(
- ThreadedList<ThreadedListZoneEntry<FunctionLiteral*>>* literals,
- CompilationInfo* outer_info) {
+bool CompileUnoptimizedInnerFunctions(
+ Compiler::EagerInnerFunctionLiterals* literals,
+ Compiler::ConcurrencyMode inner_function_mode,
+ std::shared_ptr<Zone> parse_zone, CompilationInfo* outer_info) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.CompileUnoptimizedInnerFunctions");
Isolate* isolate = outer_info->isolate();
Handle<Script> script = outer_info->script();
+ bool is_debug = outer_info->is_debug();
+ bool will_serialize = outer_info->will_serialize();
RuntimeCallTimerScope runtimeTimer(isolate,
&RuntimeCallStats::CompileInnerFunction);
for (auto it : *literals) {
FunctionLiteral* literal = it->value();
-
- // Find any previously allocated shared function info for the given literal.
- Handle<SharedFunctionInfo> shared;
- MaybeHandle<SharedFunctionInfo> maybe_existing =
- script->FindSharedFunctionInfo(isolate, literal);
- if (maybe_existing.ToHandle(&shared)) {
- DCHECK(!shared->is_toplevel());
- // If we found an existing shared function info with compiled code,
- // we are done.
- if (shared->is_compiled()) continue;
+ Handle<SharedFunctionInfo> shared =
+ Compiler::GetSharedFunctionInfo(literal, script, outer_info);
+ if (shared->is_compiled()) continue;
+
+ // The {literal} has already been numbered because AstNumbering decends into
+ // eagerly compiled function literals.
+ SetSharedFunctionFlagsFromLiteral(literal, shared);
+
+ // Try to enqueue the eager function on the compiler dispatcher.
+ CompilerDispatcher* dispatcher = isolate->compiler_dispatcher();
+ if (UseCompilerDispatcher(inner_function_mode, dispatcher, literal->scope(),
+ shared, is_debug, will_serialize) &&
+ dispatcher->EnqueueAndStep(outer_info->script(), shared, literal,
+ parse_zone,
+ outer_info->parse_info()->deferred_handles(),
+ outer_info->deferred_handles())) {
+ // If we have successfully queued up the function for compilation on the
+ // compiler dispatcher then we are done.
+ continue;
} else {
- shared =
- isolate->factory()->NewSharedFunctionInfoForLiteral(literal, script);
- shared->set_is_toplevel(false);
- }
-
- Zone zone(isolate->allocator(), ZONE_NAME);
- ParseInfo parse_info(&zone, script);
- parse_info.set_literal(literal);
- parse_info.set_shared_info(shared);
- parse_info.set_function_literal_id(shared->function_literal_id());
- parse_info.set_language_mode(literal->scope()->language_mode());
- parse_info.set_ast_value_factory(
- outer_info->parse_info()->ast_value_factory());
- parse_info.set_ast_value_factory_owned(false);
-
- CompilationInfo info(&parse_info, Handle<JSFunction>::null());
- if (outer_info->will_serialize()) info.PrepareForSerializing();
- if (outer_info->is_debug()) info.MarkAsDebug();
-
- Compiler::EagerInnerFunctionLiterals inner_literals;
- if (!Renumber(&parse_info, &inner_literals) ||
- !CompileUnoptimizedInnerFunctionsRecursively(&inner_literals,
- outer_info) ||
- !GenerateUnoptimizedCode(&info)) {
- if (!isolate->has_pending_exception()) isolate->StackOverflow();
- return false;
- }
-
- DCHECK(!info.code().is_null());
- RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, &info);
- if (literal->should_be_used_once_hint()) {
- info.code()->MarkToBeExecutedOnce(isolate);
+ // Otherwise generate unoptimized code now.
+ ParseInfo parse_info(script);
+ CompilationInfo info(parse_info.zone(), &parse_info,
+ Handle<JSFunction>::null());
+
+ parse_info.set_literal(literal);
+ parse_info.set_shared_info(shared);
+ parse_info.set_function_literal_id(shared->function_literal_id());
+ parse_info.set_language_mode(literal->scope()->language_mode());
+ parse_info.set_ast_value_factory(
+ outer_info->parse_info()->ast_value_factory());
+ parse_info.set_ast_value_factory_owned(false);
+
+ if (will_serialize) info.PrepareForSerializing();
+ if (is_debug) info.MarkAsDebug();
+
+ if (!GenerateUnoptimizedCode(&info)) {
+ if (!isolate->has_pending_exception()) isolate->StackOverflow();
+ return false;
+ }
}
}
return true;
}
-bool CompileUnoptimizedCode(CompilationInfo* info) {
+bool InnerFunctionIsAsmModule(
+ ThreadedList<ThreadedListZoneEntry<FunctionLiteral*>>* literals) {
+ for (auto it : *literals) {
+ FunctionLiteral* literal = it->value();
+ if (literal->scope()->IsAsmModule()) return true;
+ }
+ return false;
+}
+
+bool CompileUnoptimizedCode(CompilationInfo* info,
+ Compiler::ConcurrencyMode inner_function_mode) {
Isolate* isolate = info->isolate();
DCHECK(AllowCompilation::IsAllowed(isolate));
Compiler::EagerInnerFunctionLiterals inner_literals;
- if (!Compiler::Analyze(info->parse_info(), &inner_literals) ||
- !CompileUnoptimizedInnerFunctionsRecursively(&inner_literals, info) ||
+ {
+ std::unique_ptr<CompilationHandleScope> compilation_handle_scope;
+ if (inner_function_mode == Compiler::CONCURRENT) {
+ compilation_handle_scope.reset(new CompilationHandleScope(info));
+ }
+ if (!Compiler::Analyze(info->parse_info(), &inner_literals)) {
+ if (!isolate->has_pending_exception()) isolate->StackOverflow();
+ return false;
+ }
+ }
+
+ // Disable concurrent inner compilation for asm-wasm code.
+ // TODO(rmcilroy,bradnelson): Remove this AsmWasm check once the asm-wasm
+ // builder doesn't do parsing when visiting function declarations.
+ if (info->scope()->IsAsmModule() ||
+ InnerFunctionIsAsmModule(&inner_literals)) {
+ inner_function_mode = Compiler::NOT_CONCURRENT;
+ }
+
+ std::shared_ptr<Zone> parse_zone;
+ if (inner_function_mode == Compiler::CONCURRENT) {
+ // Seal the parse zone so that it can be shared by parallel inner function
+ // compilation jobs.
+ DCHECK_NE(info->parse_info()->zone(), info->zone());
+ parse_zone = info->parse_info()->zone_shared();
+ parse_zone->Seal();
+ }
+
+ if (!CompileUnoptimizedInnerFunctions(&inner_literals, inner_function_mode,
+ parse_zone, info) ||
!GenerateUnoptimizedCode(info)) {
if (!isolate->has_pending_exception()) isolate->StackOverflow();
return false;
@@ -568,14 +644,27 @@ void EnsureSharedFunctionInfosArrayOnScript(ParseInfo* info) {
info->script()->set_shared_function_infos(*infos);
}
-MUST_USE_RESULT MaybeHandle<Code> GetUnoptimizedCode(CompilationInfo* info) {
+MUST_USE_RESULT MaybeHandle<Code> GetUnoptimizedCode(
+ CompilationInfo* info, Compiler::ConcurrencyMode inner_function_mode) {
RuntimeCallTimerScope runtimeTimer(
info->isolate(), &RuntimeCallStats::CompileGetUnoptimizedCode);
VMState<COMPILER> state(info->isolate());
PostponeInterruptsScope postpone(info->isolate());
- // Parse and update CompilationInfo with the results.
- if (!parsing::ParseAny(info->parse_info())) return MaybeHandle<Code>();
+ // Parse and update ParseInfo with the results.
+ {
+ if (!parsing::ParseAny(info->parse_info(),
+ inner_function_mode != Compiler::CONCURRENT)) {
+ return MaybeHandle<Code>();
+ }
+
+ if (inner_function_mode == Compiler::CONCURRENT) {
+ ParseHandleScope parse_handles(info->parse_info());
+ info->parse_info()->ReopenHandlesInNewHandleScope();
+ info->parse_info()->ast_value_factory()->Internalize(info->isolate());
+ }
+ }
+
if (info->parse_info()->is_toplevel()) {
EnsureSharedFunctionInfosArrayOnScript(info->parse_info());
}
@@ -583,7 +672,9 @@ MUST_USE_RESULT MaybeHandle<Code> GetUnoptimizedCode(CompilationInfo* info) {
info->literal()->language_mode());
// Compile either unoptimized code or bytecode for the interpreter.
- if (!CompileUnoptimizedCode(info)) return MaybeHandle<Code>();
+ if (!CompileUnoptimizedCode(info, inner_function_mode)) {
+ return MaybeHandle<Code>();
+ }
// Record the function compilation event.
RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG, info);
@@ -598,14 +689,13 @@ MUST_USE_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeMap(
&RuntimeCallStats::CompileGetFromOptimizedCodeMap);
Handle<SharedFunctionInfo> shared(function->shared());
DisallowHeapAllocation no_gc;
- CodeAndLiterals cached = shared->SearchOptimizedCodeMap(
+ Code* code = shared->SearchOptimizedCodeMap(
function->context()->native_context(), osr_ast_id);
- if (cached.code != nullptr) {
+ if (code != nullptr) {
// Caching of optimized code enabled and optimized code found.
- if (cached.literals != nullptr) function->set_literals(cached.literals);
- DCHECK(!cached.code->marked_for_deoptimization());
+ DCHECK(!code->marked_for_deoptimization());
DCHECK(function->shared()->is_compiled());
- return Handle<Code>(cached.code);
+ return Handle<Code>(code);
}
return MaybeHandle<Code>();
}
@@ -627,10 +717,9 @@ void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
// Cache optimized context-specific code.
Handle<JSFunction> function = info->closure();
Handle<SharedFunctionInfo> shared(function->shared());
- Handle<LiteralsArray> literals(function->literals());
Handle<Context> native_context(function->context()->native_context());
SharedFunctionInfo::AddToOptimizedCodeMap(shared, native_context, code,
- literals, info->osr_ast_id());
+ info->osr_ast_id());
}
bool GetOptimizedCodeNow(CompilationJob* job) {
@@ -760,8 +849,12 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
DCHECK(!isolate->has_pending_exception());
PostponeInterruptsScope postpone(isolate);
bool use_turbofan = UseTurboFan(shared) || ignition_osr;
+ bool has_script = shared->script()->IsScript();
+ // BUG(5946): This DCHECK is necessary to make certain that we won't tolerate
+ // the lack of a script without bytecode.
+ DCHECK_IMPLIES(!has_script, ShouldUseIgnition(shared, false));
std::unique_ptr<CompilationJob> job(
- use_turbofan ? compiler::Pipeline::NewCompilationJob(function)
+ use_turbofan ? compiler::Pipeline::NewCompilationJob(function, has_script)
: new HCompilationJob(function));
CompilationInfo* info = job->info();
ParseInfo* parse_info = info->parse_info();
@@ -866,10 +959,8 @@ CompilationJob::Status FinalizeOptimizedCompilationJob(CompilationJob* job) {
} else if (job->FinalizeJob() == CompilationJob::SUCCEEDED) {
job->RecordOptimizedCompilationStats();
RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG, info);
- if (shared
- ->SearchOptimizedCodeMap(info->context()->native_context(),
- info->osr_ast_id())
- .code == nullptr) {
+ if (shared->SearchOptimizedCodeMap(info->context()->native_context(),
+ info->osr_ast_id()) == nullptr) {
InsertCodeIntoOptimizedCodeMap(info);
}
if (FLAG_trace_opt) {
@@ -896,9 +987,8 @@ MaybeHandle<Code> GetBaselineCode(Handle<JSFunction> function) {
Isolate* isolate = function->GetIsolate();
VMState<COMPILER> state(isolate);
PostponeInterruptsScope postpone(isolate);
- Zone zone(isolate->allocator(), ZONE_NAME);
- ParseInfo parse_info(&zone, handle(function->shared()));
- CompilationInfo info(&parse_info, function);
+ ParseInfo parse_info(handle(function->shared()));
+ CompilationInfo info(parse_info.zone(), &parse_info, function);
DCHECK(function->shared()->is_compiled());
@@ -987,17 +1077,10 @@ MaybeHandle<Code> GetLazyCode(Handle<JSFunction> function) {
switch (Compiler::NextCompilationTier(*function)) {
case Compiler::BASELINE: {
- if (FLAG_trace_opt) {
- PrintF("[recompiling function ");
- function->ShortPrint();
- PrintF(
- " to baseline eagerly (shared function marked for tier up)]\n");
- }
-
- Handle<Code> code;
- if (GetBaselineCode(function).ToHandle(&code)) {
- return code;
- }
+ // We don't try to handle baseline here because GetBaselineCode()
+ // doesn't handle top-level code. We aren't supporting
+ // the hybrid pipeline going forward (where Ignition is a first
+ // tier followed by full-code).
break;
}
case Compiler::OPTIMIZED: {
@@ -1030,11 +1113,12 @@ MaybeHandle<Code> GetLazyCode(Handle<JSFunction> function) {
return entry;
}
- Zone zone(isolate->allocator(), ZONE_NAME);
- ParseInfo parse_info(&zone, handle(function->shared()));
- CompilationInfo info(&parse_info, function);
+ ParseInfo parse_info(handle(function->shared()));
+ Zone compile_zone(isolate->allocator(), ZONE_NAME);
+ CompilationInfo info(&compile_zone, &parse_info, function);
Handle<Code> result;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, result, GetUnoptimizedCode(&info), Code);
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result, GetUnoptimizedCode(&info, Compiler::CONCURRENT), Code);
if (FLAG_always_opt && !info.shared_info()->HasAsmWasmData()) {
Handle<Code> opt_code;
@@ -1069,9 +1153,16 @@ Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
Handle<SharedFunctionInfo> result;
{ VMState<COMPILER> state(info->isolate());
- if (parse_info->literal() == nullptr &&
- !parsing::ParseProgram(parse_info)) {
- return Handle<SharedFunctionInfo>::null();
+ if (parse_info->literal() == nullptr) {
+ if (!parsing::ParseProgram(parse_info, false)) {
+ return Handle<SharedFunctionInfo>::null();
+ }
+
+ {
+ ParseHandleScope parse_handles(parse_info);
+ parse_info->ReopenHandlesInNewHandleScope();
+ parse_info->ast_value_factory()->Internalize(info->isolate());
+ }
}
EnsureSharedFunctionInfosArrayOnScript(parse_info);
@@ -1095,7 +1186,7 @@ Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
parse_info->set_function_literal_id(result->function_literal_id());
// Compile the code.
- if (!CompileUnoptimizedCode(info)) {
+ if (!CompileUnoptimizedCode(info, Compiler::CONCURRENT)) {
return Handle<SharedFunctionInfo>::null();
}
@@ -1130,7 +1221,9 @@ bool Compiler::Analyze(ParseInfo* info,
&RuntimeCallStats::CompileAnalyse);
if (!Rewriter::Rewrite(info)) return false;
DeclarationScope::Analyze(info, AnalyzeMode::kRegular);
- if (!Renumber(info, eager_literals)) return false;
+ if (!Renumber(info, eager_literals)) {
+ return false;
+ }
DCHECK_NOT_NULL(info->scope());
return true;
}
@@ -1149,13 +1242,25 @@ bool Compiler::Compile(Handle<JSFunction> function, ClearExceptionFlag flag) {
Isolate* isolate = function->GetIsolate();
DCHECK(AllowCompilation::IsAllowed(isolate));
- // Start a compilation.
+ CompilerDispatcher* dispatcher = isolate->compiler_dispatcher();
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate);
Handle<Code> code;
- if (!GetLazyCode(function).ToHandle(&code)) {
- if (flag == CLEAR_EXCEPTION) {
- isolate->clear_pending_exception();
+ if (dispatcher->IsEnqueued(shared)) {
+ if (!dispatcher->FinishNow(shared)) {
+ if (flag == CLEAR_EXCEPTION) {
+ isolate->clear_pending_exception();
+ }
+ return false;
+ }
+ code = handle(shared->code(), isolate);
+ } else {
+ // Start a compilation.
+ if (!GetLazyCode(function).ToHandle(&code)) {
+ if (flag == CLEAR_EXCEPTION) {
+ isolate->clear_pending_exception();
+ }
+ return false;
}
- return false;
}
// Install code on closure.
@@ -1202,21 +1307,11 @@ bool Compiler::CompileOptimized(Handle<JSFunction> function,
// Start a compilation.
Handle<Code> code;
if (!GetOptimizedCode(function, mode).ToHandle(&code)) {
- // Optimization failed, get unoptimized code.
+ // Optimization failed, get unoptimized code. Unoptimized code must exist
+ // already if we are optimizing.
DCHECK(!isolate->has_pending_exception());
- if (function->shared()->is_compiled()) {
- code = handle(function->shared()->code(), isolate);
- } else if (function->shared()->HasBytecodeArray()) {
- code = isolate->builtins()->InterpreterEntryTrampoline();
- function->shared()->ReplaceCode(*code);
- } else {
- Zone zone(isolate->allocator(), ZONE_NAME);
- ParseInfo parse_info(&zone, handle(function->shared()));
- CompilationInfo info(&parse_info, function);
- if (!GetUnoptimizedCode(&info).ToHandle(&code)) {
- return false;
- }
- }
+ DCHECK(function->shared()->is_compiled());
+ code = handle(function->shared()->code(), isolate);
}
// Install code on closure.
@@ -1235,11 +1330,11 @@ bool Compiler::CompileDebugCode(Handle<SharedFunctionInfo> shared) {
DCHECK(AllowCompilation::IsAllowed(isolate));
// Start a compilation.
- Zone zone(isolate->allocator(), ZONE_NAME);
- ParseInfo parse_info(&zone, shared);
- CompilationInfo info(&parse_info, Handle<JSFunction>::null());
+ ParseInfo parse_info(shared);
+ CompilationInfo info(parse_info.zone(), &parse_info,
+ Handle<JSFunction>::null());
info.MarkAsDebug();
- if (GetUnoptimizedCode(&info).is_null()) {
+ if (GetUnoptimizedCode(&info, Compiler::NOT_CONCURRENT).is_null()) {
isolate->clear_pending_exception();
return false;
}
@@ -1263,9 +1358,9 @@ MaybeHandle<JSArray> Compiler::CompileForLiveEdit(Handle<Script> script) {
script->set_shared_function_infos(isolate->heap()->empty_fixed_array());
// Start a compilation.
- Zone zone(isolate->allocator(), ZONE_NAME);
- ParseInfo parse_info(&zone, script);
- CompilationInfo info(&parse_info, Handle<JSFunction>::null());
+ ParseInfo parse_info(script);
+ Zone compile_zone(isolate->allocator(), ZONE_NAME);
+ CompilationInfo info(&compile_zone, &parse_info, Handle<JSFunction>::null());
info.MarkAsDebug();
// TODO(635): support extensions.
@@ -1275,7 +1370,7 @@ MaybeHandle<JSArray> Compiler::CompileForLiveEdit(Handle<Script> script) {
// Check postconditions on success.
DCHECK(!isolate->has_pending_exception());
infos = LiveEditFunctionTracker::Collect(parse_info.literal(), script,
- &zone, isolate);
+ parse_info.zone(), isolate);
}
// Restore the original function info list in order to remain side-effect
@@ -1288,7 +1383,12 @@ MaybeHandle<JSArray> Compiler::CompileForLiveEdit(Handle<Script> script) {
bool Compiler::EnsureBytecode(CompilationInfo* info) {
if (!info->shared_info()->is_compiled()) {
- if (GetUnoptimizedCode(info).is_null()) return false;
+ CompilerDispatcher* dispatcher = info->isolate()->compiler_dispatcher();
+ if (dispatcher->IsEnqueued(info->shared_info())) {
+ if (!dispatcher->FinishNow(info->shared_info())) return false;
+ } else if (GetUnoptimizedCode(info, Compiler::NOT_CONCURRENT).is_null()) {
+ return false;
+ }
}
DCHECK(info->shared_info()->is_compiled());
@@ -1304,9 +1404,16 @@ bool Compiler::EnsureDeoptimizationSupport(CompilationInfo* info) {
DCHECK_NOT_NULL(info->literal());
DCHECK_NOT_NULL(info->scope());
Handle<SharedFunctionInfo> shared = info->shared_info();
+
+ CompilerDispatcher* dispatcher = info->isolate()->compiler_dispatcher();
+ if (dispatcher->IsEnqueued(shared)) {
+ if (!dispatcher->FinishNow(shared)) return false;
+ }
+
if (!shared->has_deoptimization_support()) {
- Zone zone(info->isolate()->allocator(), ZONE_NAME);
- CompilationInfo unoptimized(info->parse_info(), info->closure());
+ Zone compile_zone(info->isolate()->allocator(), ZONE_NAME);
+ CompilationInfo unoptimized(&compile_zone, info->parse_info(),
+ info->closure());
unoptimized.EnableDeoptimizationSupport();
// Don't generate full-codegen code for functions it can't support.
@@ -1370,22 +1477,46 @@ Compiler::CompilationTier Compiler::NextCompilationTier(JSFunction* function) {
MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
Handle<String> source, Handle<SharedFunctionInfo> outer_info,
Handle<Context> context, LanguageMode language_mode,
- ParseRestriction restriction, int eval_scope_position, int eval_position,
- int line_offset, int column_offset, Handle<Object> script_name,
+ ParseRestriction restriction, int parameters_end_pos,
+ int eval_scope_position, int eval_position, int line_offset,
+ int column_offset, Handle<Object> script_name,
ScriptOriginOptions options) {
Isolate* isolate = source->GetIsolate();
int source_length = source->length();
isolate->counters()->total_eval_size()->Increment(source_length);
isolate->counters()->total_compile_size()->Increment(source_length);
+ // The cache lookup key needs to be aware of the separation between the
+ // parameters and the body to prevent this valid invocation:
+ // Function("", "function anonymous(\n/**/) {\n}");
+ // from adding an entry that falsely approves this invalid invocation:
+ // Function("\n/**/) {\nfunction anonymous(", "}");
+ // The actual eval_scope_position for indirect eval and CreateDynamicFunction
+ // is unused (just 0), which means it's an available field to use to indicate
+ // this separation. But to make sure we're not causing other false hits, we
+ // negate the scope position.
+ int position = eval_scope_position;
+ if (FLAG_harmony_function_tostring &&
+ restriction == ONLY_SINGLE_FUNCTION_LITERAL &&
+ parameters_end_pos != kNoSourcePosition) {
+ // use the parameters_end_pos as the eval_scope_position in the eval cache.
+ DCHECK_EQ(eval_scope_position, 0);
+ position = -parameters_end_pos;
+ }
CompilationCache* compilation_cache = isolate->compilation_cache();
- MaybeHandle<SharedFunctionInfo> maybe_shared_info =
- compilation_cache->LookupEval(source, outer_info, context, language_mode,
- eval_scope_position);
+ InfoVectorPair eval_result = compilation_cache->LookupEval(
+ source, outer_info, context, language_mode, position);
Handle<SharedFunctionInfo> shared_info;
+ if (eval_result.has_shared()) {
+ shared_info = Handle<SharedFunctionInfo>(eval_result.shared(), isolate);
+ }
+ Handle<Cell> vector;
+ if (eval_result.has_vector()) {
+ vector = Handle<Cell>(eval_result.vector(), isolate);
+ }
Handle<Script> script;
- if (!maybe_shared_info.ToHandle(&shared_info)) {
+ if (!eval_result.has_shared()) {
script = isolate->factory()->NewScript(source);
if (isolate->NeedsSourcePositionsForProfiling()) {
Script::InitLineEnds(script);
@@ -1399,32 +1530,51 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
script->set_compilation_type(Script::COMPILATION_TYPE_EVAL);
Script::SetEvalOrigin(script, outer_info, eval_position);
- Zone zone(isolate->allocator(), ZONE_NAME);
- ParseInfo parse_info(&zone, script);
- CompilationInfo info(&parse_info, Handle<JSFunction>::null());
+ ParseInfo parse_info(script);
+ Zone compile_zone(isolate->allocator(), ZONE_NAME);
+ CompilationInfo info(&compile_zone, &parse_info,
+ Handle<JSFunction>::null());
parse_info.set_eval();
parse_info.set_language_mode(language_mode);
parse_info.set_parse_restriction(restriction);
+ parse_info.set_parameters_end_pos(parameters_end_pos);
if (!context->IsNativeContext()) {
parse_info.set_outer_scope_info(handle(context->scope_info()));
}
shared_info = CompileToplevel(&info);
-
if (shared_info.is_null()) {
return MaybeHandle<JSFunction>();
- } else {
- // If caller is strict mode, the result must be in strict mode as well.
- DCHECK(is_sloppy(language_mode) ||
- is_strict(shared_info->language_mode()));
- compilation_cache->PutEval(source, outer_info, context, shared_info,
- eval_scope_position);
}
}
- Handle<JSFunction> result =
- isolate->factory()->NewFunctionFromSharedFunctionInfo(
+ // If caller is strict mode, the result must be in strict mode as well.
+ DCHECK(is_sloppy(language_mode) || is_strict(shared_info->language_mode()));
+
+ Handle<JSFunction> result;
+ if (eval_result.has_shared()) {
+ if (eval_result.has_vector()) {
+ result = isolate->factory()->NewFunctionFromSharedFunctionInfo(
+ shared_info, context, vector, NOT_TENURED);
+ } else {
+ result = isolate->factory()->NewFunctionFromSharedFunctionInfo(
shared_info, context, NOT_TENURED);
+ JSFunction::EnsureLiterals(result);
+ // Make sure to cache this result.
+ Handle<Cell> new_vector(result->feedback_vector_cell(), isolate);
+ compilation_cache->PutEval(source, outer_info, context, shared_info,
+ new_vector, eval_scope_position);
+ }
+ } else {
+ result = isolate->factory()->NewFunctionFromSharedFunctionInfo(
+ shared_info, context, NOT_TENURED);
+ JSFunction::EnsureLiterals(result);
+ // Add the SharedFunctionInfo and the LiteralsArray to the eval cache if
+ // we didn't retrieve from there.
+ Handle<Cell> vector(result->feedback_vector_cell(), isolate);
+ compilation_cache->PutEval(source, outer_info, context, shared_info, vector,
+ eval_scope_position);
+ }
// OnAfterCompile has to be called after we create the JSFunction, which we
// may require to recompile the eval for debugging, if we find a function
@@ -1465,7 +1615,7 @@ bool ContainsAsmModule(Handle<Script> script) {
MaybeHandle<JSFunction> Compiler::GetFunctionFromString(
Handle<Context> context, Handle<String> source,
- ParseRestriction restriction) {
+ ParseRestriction restriction, int parameters_end_pos) {
Isolate* const isolate = context->GetIsolate();
Handle<Context> native_context(context->native_context(), isolate);
@@ -1485,8 +1635,8 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromString(
int eval_position = kNoSourcePosition;
Handle<SharedFunctionInfo> outer_info(native_context->closure()->shared());
return Compiler::GetFunctionFromEval(source, outer_info, native_context,
- SLOPPY, restriction, eval_scope_position,
- eval_position);
+ SLOPPY, restriction, parameters_end_pos,
+ eval_scope_position, eval_position);
}
Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
@@ -1494,8 +1644,7 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
int column_offset, ScriptOriginOptions resource_options,
Handle<Object> source_map_url, Handle<Context> context,
v8::Extension* extension, ScriptData** cached_data,
- ScriptCompiler::CompileOptions compile_options, NativesFlag natives,
- bool is_module) {
+ ScriptCompiler::CompileOptions compile_options, NativesFlag natives) {
Isolate* isolate = source->GetIsolate();
if (compile_options == ScriptCompiler::kNoCompileOptions) {
cached_data = NULL;
@@ -1518,14 +1667,14 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
CompilationCache* compilation_cache = isolate->compilation_cache();
// Do a lookup in the compilation cache but not for extensions.
- MaybeHandle<SharedFunctionInfo> maybe_result;
Handle<SharedFunctionInfo> result;
+ Handle<Cell> vector;
if (extension == NULL) {
// First check per-isolate compilation cache.
- maybe_result = compilation_cache->LookupScript(
+ InfoVectorPair pair = compilation_cache->LookupScript(
source, script_name, line_offset, column_offset, resource_options,
context, language_mode);
- if (maybe_result.is_null() && FLAG_serialize_toplevel &&
+ if (!pair.has_shared() && FLAG_serialize_toplevel &&
compile_options == ScriptCompiler::kConsumeCodeCache &&
!isolate->debug()->is_loaded()) {
// Then check cached code provided by embedder.
@@ -1534,14 +1683,27 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
&RuntimeCallStats::CompileDeserialize);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompileDeserialize");
- Handle<SharedFunctionInfo> result;
+ Handle<SharedFunctionInfo> inner_result;
if (CodeSerializer::Deserialize(isolate, *cached_data, source)
- .ToHandle(&result)) {
+ .ToHandle(&inner_result)) {
// Promote to per-isolate compilation cache.
- compilation_cache->PutScript(source, context, language_mode, result);
- return result;
+ // TODO(mvstanton): create a feedback vector array here.
+ DCHECK(inner_result->is_compiled());
+ Handle<FeedbackVector> feedback_vector =
+ FeedbackVector::New(isolate, inner_result);
+ vector = isolate->factory()->NewCell(feedback_vector);
+ compilation_cache->PutScript(source, context, language_mode,
+ inner_result, vector);
+ return inner_result;
}
// Deserializer failed. Fall through to compile.
+ } else {
+ if (pair.has_shared()) {
+ result = Handle<SharedFunctionInfo>(pair.shared(), isolate);
+ }
+ if (pair.has_vector()) {
+ vector = Handle<Cell>(pair.vector(), isolate);
+ }
}
}
@@ -1551,7 +1713,7 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
timer.Start();
}
- if (!maybe_result.ToHandle(&result) ||
+ if (result.is_null() ||
(FLAG_serialize_toplevel &&
compile_options == ScriptCompiler::kProduceCodeCache)) {
// No cache entry found, or embedder wants a code cache. Compile the script.
@@ -1579,10 +1741,11 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
}
// Compile the function and add it to the cache.
- Zone zone(isolate->allocator(), ZONE_NAME);
- ParseInfo parse_info(&zone, script);
- CompilationInfo info(&parse_info, Handle<JSFunction>::null());
- if (is_module) parse_info.set_module();
+ ParseInfo parse_info(script);
+ Zone compile_zone(isolate->allocator(), ZONE_NAME);
+ CompilationInfo info(&compile_zone, &parse_info,
+ Handle<JSFunction>::null());
+ if (resource_options.IsModule()) parse_info.set_module();
if (compile_options != ScriptCompiler::kNoCompileOptions) {
parse_info.set_cached_data(cached_data);
}
@@ -1600,7 +1763,13 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
static_cast<LanguageMode>(parse_info.language_mode() | language_mode));
result = CompileToplevel(&info);
if (extension == NULL && !result.is_null()) {
- compilation_cache->PutScript(source, context, language_mode, result);
+ // We need a feedback vector.
+ DCHECK(result->is_compiled());
+ Handle<FeedbackVector> feedback_vector =
+ FeedbackVector::New(isolate, result);
+ vector = isolate->factory()->NewCell(feedback_vector);
+ compilation_cache->PutScript(source, context, language_mode, result,
+ vector);
if (FLAG_serialize_toplevel &&
compile_options == ScriptCompiler::kProduceCodeCache &&
!ContainsAsmModule(script)) {
@@ -1619,7 +1788,9 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
}
if (result.is_null()) {
- if (natives != EXTENSION_CODE) isolate->ReportPendingMessages();
+ if (natives != EXTENSION_CODE && natives != NATIVES_CODE) {
+ isolate->ReportPendingMessages();
+ }
} else {
isolate->debug()->OnAfterCompile(script);
}
@@ -1640,7 +1811,9 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForStreamedScript(
parse_info->set_language_mode(
static_cast<LanguageMode>(parse_info->language_mode() | language_mode));
- CompilationInfo compile_info(parse_info, Handle<JSFunction>::null());
+ Zone compile_zone(isolate->allocator(), ZONE_NAME);
+ CompilationInfo compile_info(&compile_zone, parse_info,
+ Handle<JSFunction>::null());
// The source was parsed lazily, so compiling for debugging is not possible.
DCHECK(!compile_info.is_debug());
@@ -1696,7 +1869,7 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForNative(
Handle<Code> code = Handle<Code>(fun->shared()->code());
Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub());
Handle<SharedFunctionInfo> shared = isolate->factory()->NewSharedFunctionInfo(
- name, fun->shared()->num_literals(), FunctionKind::kNormalFunction, code,
+ name, FunctionKind::kNormalFunction, code,
Handle<ScopeInfo>(fun->shared()->scope_info()));
shared->set_outer_scope_info(fun->shared()->outer_scope_info());
shared->SetConstructStub(*construct_stub);
@@ -1737,13 +1910,8 @@ bool Compiler::FinalizeCompilationJob(CompilationJob* raw_job) {
return FinalizeOptimizedCompilationJob(job.get()) ==
CompilationJob::SUCCEEDED;
} else {
- if (FinalizeUnoptimizedCompilationJob(job.get()) ==
- CompilationJob::SUCCEEDED) {
- RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG,
- job->info());
- return true;
- }
- return false;
+ return FinalizeUnoptimizedCompilationJob(job.get()) ==
+ CompilationJob::SUCCEEDED;
}
}
@@ -1757,19 +1925,16 @@ void Compiler::PostInstantiation(Handle<JSFunction> function,
function->MarkForOptimization();
}
- CodeAndLiterals cached = shared->SearchOptimizedCodeMap(
+ Code* code = shared->SearchOptimizedCodeMap(
function->context()->native_context(), BailoutId::None());
- if (cached.code != nullptr) {
+ if (code != nullptr) {
// Caching of optimized code enabled and optimized code found.
- DCHECK(!cached.code->marked_for_deoptimization());
+ DCHECK(!code->marked_for_deoptimization());
DCHECK(function->shared()->is_compiled());
- function->ReplaceCode(cached.code);
+ function->ReplaceCode(code);
}
- if (cached.literals != nullptr) {
- DCHECK(shared->is_compiled());
- function->set_literals(cached.literals);
- } else if (shared->is_compiled()) {
+ if (shared->is_compiled()) {
// TODO(mvstanton): pass pretenure flag to EnsureLiterals.
JSFunction::EnsureLiterals(function);
}
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index dfbd520f9d..e26484a4a6 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -37,7 +37,7 @@ class ThreadedListZoneEntry;
// parameters which then can be executed. If the source code contains other
// functions, they might be compiled and allocated as part of the compilation
// of the source code or deferred for lazy compilation at a later point.
-class Compiler : public AllStatic {
+class V8_EXPORT_PRIVATE Compiler : public AllStatic {
public:
enum ClearExceptionFlag { KEEP_EXCEPTION, CLEAR_EXCEPTION };
enum ConcurrencyMode { NOT_CONCURRENT, CONCURRENT };
@@ -98,15 +98,15 @@ class Compiler : public AllStatic {
MUST_USE_RESULT static MaybeHandle<JSFunction> GetFunctionFromEval(
Handle<String> source, Handle<SharedFunctionInfo> outer_info,
Handle<Context> context, LanguageMode language_mode,
- ParseRestriction restriction, int eval_scope_position, int eval_position,
- int line_offset = 0, int column_offset = 0,
- Handle<Object> script_name = Handle<Object>(),
+ ParseRestriction restriction, int parameters_end_pos,
+ int eval_scope_position, int eval_position, int line_offset = 0,
+ int column_offset = 0, Handle<Object> script_name = Handle<Object>(),
ScriptOriginOptions options = ScriptOriginOptions());
// Create a (bound) function for a String source within a context for eval.
MUST_USE_RESULT static MaybeHandle<JSFunction> GetFunctionFromString(
Handle<Context> context, Handle<String> source,
- ParseRestriction restriction);
+ ParseRestriction restriction, int parameters_end_pos);
// Create a shared function info object for a String source within a context.
static Handle<SharedFunctionInfo> GetSharedFunctionInfoForScript(
@@ -115,7 +115,7 @@ class Compiler : public AllStatic {
Handle<Object> source_map_url, Handle<Context> context,
v8::Extension* extension, ScriptData** cached_data,
ScriptCompiler::CompileOptions compile_options,
- NativesFlag is_natives_code, bool is_module);
+ NativesFlag is_natives_code);
// Create a shared function info object for a Script that has already been
// parsed while the script was being loaded from a streamed source.
@@ -154,7 +154,7 @@ class Compiler : public AllStatic {
//
// Each of the three phases can either fail or succeed. The current state of
// the job can be checked using {state()}.
-class CompilationJob {
+class V8_EXPORT_PRIVATE CompilationJob {
public:
enum Status { SUCCEEDED, FAILED };
enum class State {
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index 9fd531c637..2722590c76 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -104,6 +104,15 @@ FieldAccess AccessBuilder::ForJSObjectOffset(
}
// static
+FieldAccess AccessBuilder::ForJSCollectionTable() {
+ FieldAccess access = {kTaggedBase, JSCollection::kTableOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::OtherInternal(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
+ return access;
+}
+
+// static
FieldAccess AccessBuilder::ForJSFunctionPrototypeOrInitialMap() {
FieldAccess access = {
kTaggedBase, JSFunction::kPrototypeOrInitialMapOffset,
@@ -134,8 +143,8 @@ FieldAccess AccessBuilder::ForJSFunctionSharedFunctionInfo() {
}
// static
-FieldAccess AccessBuilder::ForJSFunctionLiterals() {
- FieldAccess access = {kTaggedBase, JSFunction::kLiteralsOffset,
+FieldAccess AccessBuilder::ForJSFunctionFeedbackVector() {
+ FieldAccess access = {kTaggedBase, JSFunction::kFeedbackVectorOffset,
Handle<Name>(), MaybeHandle<Map>(),
Type::Internal(), MachineType::TaggedPointer(),
kPointerWriteBarrier};
@@ -483,9 +492,9 @@ FieldAccess AccessBuilder::ForModuleRegularImports() {
// static
FieldAccess AccessBuilder::ForNameHashField() {
- FieldAccess access = {kTaggedBase, Name::kHashFieldOffset,
- Handle<Name>(), MaybeHandle<Map>(),
- Type::Internal(), MachineType::Uint32(),
+ FieldAccess access = {kTaggedBase, Name::kHashFieldOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Unsigned32(), MachineType::Uint32(),
kNoWriteBarrier};
return access;
}
@@ -521,6 +530,15 @@ FieldAccess AccessBuilder::ForConsStringSecond() {
}
// static
+FieldAccess AccessBuilder::ForThinStringActual() {
+ FieldAccess access = {kTaggedBase, ThinString::kActualOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::String(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
+ return access;
+}
+
+// static
FieldAccess AccessBuilder::ForSlicedStringOffset() {
FieldAccess access = {kTaggedBase, SlicedString::kOffsetOffset,
Handle<Name>(), MaybeHandle<Map>(),
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index f76aedf5a9..9d23220e82 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -52,6 +52,9 @@ class V8_EXPORT_PRIVATE AccessBuilder final
static FieldAccess ForJSObjectOffset(
int offset, WriteBarrierKind write_barrier_kind = kFullWriteBarrier);
+ // Provides access to JSCollecton::table() field.
+ static FieldAccess ForJSCollectionTable();
+
// Provides access to JSFunction::prototype_or_initial_map() field.
static FieldAccess ForJSFunctionPrototypeOrInitialMap();
@@ -61,8 +64,8 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to JSFunction::shared() field.
static FieldAccess ForJSFunctionSharedFunctionInfo();
- // Provides access to JSFunction::literals() field.
- static FieldAccess ForJSFunctionLiterals();
+ // Provides access to JSFunction::feedback_vector() field.
+ static FieldAccess ForJSFunctionFeedbackVector();
// Provides access to JSFunction::code() field.
static FieldAccess ForJSFunctionCodeEntry();
@@ -172,6 +175,9 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to ConsString::second() field.
static FieldAccess ForConsStringSecond();
+ // Provides access to ThinString::actual() field.
+ static FieldAccess ForThinStringActual();
+
// Provides access to SlicedString::offset() field.
static FieldAccess ForSlicedStringOffset();
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index f23154aa45..8fef2f079c 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -80,11 +80,12 @@ PropertyAccessInfo PropertyAccessInfo::DataConstant(
// static
PropertyAccessInfo PropertyAccessInfo::DataField(
- MapList const& receiver_maps, FieldIndex field_index,
- MachineRepresentation field_representation, Type* field_type,
- MaybeHandle<Map> field_map, MaybeHandle<JSObject> holder,
+ PropertyConstness constness, MapList const& receiver_maps,
+ FieldIndex field_index, MachineRepresentation field_representation,
+ Type* field_type, MaybeHandle<Map> field_map, MaybeHandle<JSObject> holder,
MaybeHandle<Map> transition_map) {
- return PropertyAccessInfo(holder, transition_map, field_index,
+ Kind kind = constness == kConst ? kDataConstantField : kDataField;
+ return PropertyAccessInfo(kind, holder, transition_map, field_index,
field_representation, field_type, field_map,
receiver_maps);
}
@@ -126,10 +127,10 @@ PropertyAccessInfo::PropertyAccessInfo(Kind kind, MaybeHandle<JSObject> holder,
field_type_(Type::Any()) {}
PropertyAccessInfo::PropertyAccessInfo(
- MaybeHandle<JSObject> holder, MaybeHandle<Map> transition_map,
+ Kind kind, MaybeHandle<JSObject> holder, MaybeHandle<Map> transition_map,
FieldIndex field_index, MachineRepresentation field_representation,
Type* field_type, MaybeHandle<Map> field_map, MapList const& receiver_maps)
- : kind_(kDataField),
+ : kind_(kind),
receiver_maps_(receiver_maps),
transition_map_(transition_map),
holder_(holder),
@@ -146,9 +147,11 @@ bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that) {
case kInvalid:
break;
- case kDataField: {
+ case kDataField:
+ case kDataConstantField: {
// Check if we actually access the same field.
- if (this->transition_map_.address() == that->transition_map_.address() &&
+ if (this->kind_ == that->kind_ &&
+ this->transition_map_.address() == that->transition_map_.address() &&
this->field_index_ == that->field_index_ &&
this->field_map_.address() == that->field_map_.address() &&
this->field_type_->Is(that->field_type_) &&
@@ -338,8 +341,8 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
}
}
*access_info = PropertyAccessInfo::DataField(
- MapList{receiver_map}, field_index, field_representation,
- field_type, field_map, holder);
+ details.constness(), MapList{receiver_map}, field_index,
+ field_representation, field_type, field_map, holder);
return true;
} else {
DCHECK_EQ(kAccessor, details.kind());
@@ -350,6 +353,7 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
} else {
DCHECK_EQ(kDescriptor, details.location());
if (details.kind() == kData) {
+ DCHECK(!FLAG_track_constant_fields);
*access_info = PropertyAccessInfo::DataConstant(
MapList{receiver_map},
handle(descriptors->GetValue(number), isolate()), holder);
@@ -373,6 +377,17 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
}
if (V8_UNLIKELY(FLAG_runtime_stats)) return false;
}
+ if (access_mode == AccessMode::kLoad) {
+ Handle<Name> cached_property_name;
+ if (FunctionTemplateInfo::TryGetCachedPropertyName(isolate(),
+ accessor)
+ .ToHandle(&cached_property_name)) {
+ if (ComputePropertyAccessInfo(map, cached_property_name,
+ access_mode, access_info)) {
+ return true;
+ }
+ }
+ }
*access_info = PropertyAccessInfo::AccessorConstant(
MapList{receiver_map}, accessor, holder);
return true;
@@ -391,7 +406,7 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
// Don't search on the prototype when storing in literals
if (access_mode == AccessMode::kStoreInLiteral) {
- return false;
+ return LookupTransition(receiver_map, name, holder, access_info);
}
// Don't lookup private symbols on the prototype chain.
@@ -490,8 +505,9 @@ bool AccessInfoFactory::LookupSpecialFieldAccessor(
field_type = type_cache_.kJSArrayLengthType;
}
}
+ // Special fields are always mutable.
*access_info = PropertyAccessInfo::DataField(
- MapList{map}, field_index, field_representation, field_type);
+ kMutable, MapList{map}, field_index, field_representation, field_type);
return true;
}
return false;
@@ -551,9 +567,10 @@ bool AccessInfoFactory::LookupTransition(Handle<Map> map, Handle<Name> name,
}
}
dependencies()->AssumeMapNotDeprecated(transition_map);
+ // Transitioning stores are never stores to constant fields.
*access_info = PropertyAccessInfo::DataField(
- MapList{map}, field_index, field_representation, field_type, field_map,
- holder, transition_map);
+ kMutable, MapList{map}, field_index, field_representation, field_type,
+ field_map, holder, transition_map);
return true;
}
return false;
diff --git a/deps/v8/src/compiler/access-info.h b/deps/v8/src/compiler/access-info.h
index e301ad9890..42fa1db1ad 100644
--- a/deps/v8/src/compiler/access-info.h
+++ b/deps/v8/src/compiler/access-info.h
@@ -62,6 +62,7 @@ class PropertyAccessInfo final {
kNotFound,
kDataConstant,
kDataField,
+ kDataConstantField,
kAccessorConstant,
kGeneric
};
@@ -72,9 +73,9 @@ class PropertyAccessInfo final {
Handle<Object> constant,
MaybeHandle<JSObject> holder);
static PropertyAccessInfo DataField(
- MapList const& receiver_maps, FieldIndex field_index,
- MachineRepresentation field_representation, Type* field_type,
- MaybeHandle<Map> field_map = MaybeHandle<Map>(),
+ PropertyConstness constness, MapList const& receiver_maps,
+ FieldIndex field_index, MachineRepresentation field_representation,
+ Type* field_type, MaybeHandle<Map> field_map = MaybeHandle<Map>(),
MaybeHandle<JSObject> holder = MaybeHandle<JSObject>(),
MaybeHandle<Map> transition_map = MaybeHandle<Map>());
static PropertyAccessInfo AccessorConstant(MapList const& receiver_maps,
@@ -89,6 +90,9 @@ class PropertyAccessInfo final {
bool IsNotFound() const { return kind() == kNotFound; }
bool IsDataConstant() const { return kind() == kDataConstant; }
bool IsDataField() const { return kind() == kDataField; }
+ // TODO(ishell): rename to IsDataConstant() once constant field tracking
+ // is done.
+ bool IsDataConstantField() const { return kind() == kDataConstantField; }
bool IsAccessorConstant() const { return kind() == kAccessorConstant; }
bool IsGeneric() const { return kind() == kGeneric; }
@@ -111,7 +115,7 @@ class PropertyAccessInfo final {
MapList const& receiver_maps);
PropertyAccessInfo(Kind kind, MaybeHandle<JSObject> holder,
Handle<Object> constant, MapList const& receiver_maps);
- PropertyAccessInfo(MaybeHandle<JSObject> holder,
+ PropertyAccessInfo(Kind kind, MaybeHandle<JSObject> holder,
MaybeHandle<Map> transition_map, FieldIndex field_index,
MachineRepresentation field_representation,
Type* field_type, MaybeHandle<Map> field_map,
diff --git a/deps/v8/src/compiler/arm/code-generator-arm.cc b/deps/v8/src/compiler/arm/code-generator-arm.cc
index a721f6a3be..82039c8d2e 100644
--- a/deps/v8/src/compiler/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/arm/code-generator-arm.cc
@@ -474,7 +474,8 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
// Check if current frame is an arguments adaptor frame.
__ ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ cmp(scratch1,
+ Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(ne, &done);
// Load arguments count from current arguments adaptor frame (note, it
@@ -737,10 +738,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchDeoptimize: {
int deopt_state_id =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
- Deoptimizer::BailoutType bailout_type =
- Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
- CodeGenResult result = AssembleDeoptimizerCall(
- deopt_state_id, bailout_type, current_source_position_);
+ CodeGenResult result =
+ AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
if (result != kSuccess) return result;
break;
}
@@ -1545,12 +1544,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kArmFloat32x4Eq: {
+ case kArmFloat32x4Equal: {
__ vceq(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmFloat32x4Ne: {
+ case kArmFloat32x4NotEqual: {
Simd128Register dst = i.OutputSimd128Register();
__ vceq(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
__ vmvn(dst, dst);
@@ -1578,6 +1577,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vcvt_u32_f32(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
+ case kArmInt32x4Neg: {
+ __ vneg(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kArmInt32x4ShiftLeftByScalar: {
+ __ vshl(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt5(1));
+ break;
+ }
+ case kArmInt32x4ShiftRightByScalar: {
+ __ vshr(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt5(1));
+ break;
+ }
case kArmInt32x4Add: {
__ vadd(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -1588,23 +1601,337 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kArmInt32x4Eq: {
+ case kArmInt32x4Mul: {
+ __ vmul(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmInt32x4Min: {
+ __ vmin(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmInt32x4Max: {
+ __ vmax(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmInt32x4Equal: {
__ vceq(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmInt32x4Ne: {
+ case kArmInt32x4NotEqual: {
Simd128Register dst = i.OutputSimd128Register();
__ vceq(Neon32, dst, i.InputSimd128Register(0),
i.InputSimd128Register(1));
__ vmvn(dst, dst);
break;
}
- case kArmSimd32x4Select: {
- // Select is a ternary op, so we need to move one input into the
- // destination. Use vtst to canonicalize the 'boolean' input #0.
- __ vtst(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(0));
+ case kArmInt32x4GreaterThan: {
+ __ vcgt(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmInt32x4GreaterThanOrEqual: {
+ Simd128Register dst = i.OutputSimd128Register();
+ __ vcge(NeonS32, dst, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmUint32x4ShiftRightByScalar: {
+ __ vshr(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt5(1));
+ break;
+ }
+ case kArmUint32x4Min: {
+ __ vmin(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmUint32x4Max: {
+ __ vmax(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmUint32x4GreaterThan: {
+ __ vcgt(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmUint32x4GreaterThanOrEqual: {
+ Simd128Register dst = i.OutputSimd128Register();
+ __ vcge(NeonU32, dst, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmInt16x8Splat: {
+ __ vdup(Neon16, i.OutputSimd128Register(), i.InputRegister(0));
+ break;
+ }
+ case kArmInt16x8ExtractLane: {
+ __ ExtractLane(i.OutputRegister(), i.InputSimd128Register(0), NeonS16,
+ i.InputInt8(1));
+ break;
+ }
+ case kArmInt16x8ReplaceLane: {
+ __ ReplaceLane(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputRegister(2), NeonS16, i.InputInt8(1));
+ break;
+ }
+ case kArmInt16x8Neg: {
+ __ vneg(Neon16, i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kArmInt16x8ShiftLeftByScalar: {
+ __ vshl(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt4(1));
+ break;
+ }
+ case kArmInt16x8ShiftRightByScalar: {
+ __ vshr(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt4(1));
+ break;
+ }
+ case kArmInt16x8Add: {
+ __ vadd(Neon16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmInt16x8AddSaturate: {
+ __ vqadd(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmInt16x8Sub: {
+ __ vsub(Neon16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmInt16x8SubSaturate: {
+ __ vqsub(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmInt16x8Mul: {
+ __ vmul(Neon16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmInt16x8Min: {
+ __ vmin(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmInt16x8Max: {
+ __ vmax(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmInt16x8Equal: {
+ __ vceq(Neon16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmInt16x8NotEqual: {
+ Simd128Register dst = i.OutputSimd128Register();
+ __ vceq(Neon16, dst, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vmvn(dst, dst);
+ break;
+ }
+ case kArmInt16x8GreaterThan: {
+ __ vcgt(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmInt16x8GreaterThanOrEqual: {
+ Simd128Register dst = i.OutputSimd128Register();
+ __ vcge(NeonS16, dst, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmUint16x8ShiftRightByScalar: {
+ __ vshr(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt4(1));
+ break;
+ }
+ case kArmUint16x8AddSaturate: {
+ __ vqadd(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmUint16x8SubSaturate: {
+ __ vqsub(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmUint16x8Min: {
+ __ vmin(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmUint16x8Max: {
+ __ vmax(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmUint16x8GreaterThan: {
+ __ vcgt(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmUint16x8GreaterThanOrEqual: {
+ Simd128Register dst = i.OutputSimd128Register();
+ __ vcge(NeonU16, dst, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmInt8x16Splat: {
+ __ vdup(Neon8, i.OutputSimd128Register(), i.InputRegister(0));
+ break;
+ }
+ case kArmInt8x16ExtractLane: {
+ __ ExtractLane(i.OutputRegister(), i.InputSimd128Register(0), NeonS8,
+ i.InputInt8(1));
+ break;
+ }
+ case kArmInt8x16ReplaceLane: {
+ __ ReplaceLane(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputRegister(2), NeonS8, i.InputInt8(1));
+ break;
+ }
+ case kArmInt8x16Neg: {
+ __ vneg(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kArmInt8x16ShiftLeftByScalar: {
+ __ vshl(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt3(1));
+ break;
+ }
+ case kArmInt8x16ShiftRightByScalar: {
+ __ vshr(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt3(1));
+ break;
+ }
+ case kArmInt8x16Add: {
+ __ vadd(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmInt8x16AddSaturate: {
+ __ vqadd(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmInt8x16Sub: {
+ __ vsub(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmInt8x16SubSaturate: {
+ __ vqsub(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmInt8x16Mul: {
+ __ vmul(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmInt8x16Min: {
+ __ vmin(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmInt8x16Max: {
+ __ vmax(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmInt8x16Equal: {
+ __ vceq(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmInt8x16NotEqual: {
+ Simd128Register dst = i.OutputSimd128Register();
+ __ vceq(Neon8, dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
+ __ vmvn(dst, dst);
+ break;
+ }
+ case kArmInt8x16GreaterThan: {
+ __ vcgt(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmInt8x16GreaterThanOrEqual: {
+ Simd128Register dst = i.OutputSimd128Register();
+ __ vcge(NeonS8, dst, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmUint8x16ShiftRightByScalar: {
+ __ vshr(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt3(1));
+ break;
+ }
+ case kArmUint8x16AddSaturate: {
+ __ vqadd(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmUint8x16SubSaturate: {
+ __ vqsub(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmUint8x16Min: {
+ __ vmin(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmUint8x16Max: {
+ __ vmax(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmUint8x16GreaterThan: {
+ __ vcgt(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmUint8x16GreaterThanOrEqual: {
+ Simd128Register dst = i.OutputSimd128Register();
+ __ vcge(NeonU8, dst, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmSimd128And: {
+ __ vand(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmSimd128Or: {
+ __ vorr(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmSimd128Xor: {
+ __ veor(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmSimd128Not: {
+ __ vmvn(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kArmSimd32x4Select:
+ case kArmSimd16x8Select:
+ case kArmSimd8x16Select: {
+ // vbsl clobbers the mask input so make sure it was DefineSameAsFirst.
+ DCHECK(i.OutputSimd128Register().is(i.InputSimd128Register(0)));
__ vbsl(i.OutputSimd128Register(), i.InputSimd128Register(1),
i.InputSimd128Register(2));
break;
@@ -1708,8 +2035,8 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
void Generate() final {
ArmOperandConverter i(gen_, instr_);
- Runtime::FunctionId trap_id = static_cast<Runtime::FunctionId>(
- i.InputInt32(instr_->InputCount() - 1));
+ Builtins::Name trap_id =
+ static_cast<Builtins::Name>(i.InputInt32(instr_->InputCount() - 1));
bool old_has_frame = __ has_frame();
if (frame_elided_) {
__ set_has_frame(true);
@@ -1719,14 +2046,11 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
if (frame_elided_) {
__ set_has_frame(old_has_frame);
}
- if (FLAG_debug_code) {
- __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
- }
}
private:
- void GenerateCallToTrap(Runtime::FunctionId trap_id) {
- if (trap_id == Runtime::kNumFunctions) {
+ void GenerateCallToTrap(Builtins::Name trap_id) {
+ if (trap_id == Builtins::builtin_count) {
// We cannot test calls to the runtime in cctest/test-run-wasm.
// Therefore we emit a call to C here instead of a call to the runtime.
// We use the context register as the scratch register, because we do
@@ -1735,15 +2059,20 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ CallCFunction(
ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
0);
+ __ LeaveFrame(StackFrame::WASM_COMPILED);
+ __ Ret();
} else {
- __ Move(cp, isolate()->native_context());
gen_->AssembleSourcePosition(instr_);
- __ CallRuntime(trap_id);
+ __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
+ RelocInfo::CODE_TARGET);
+ ReferenceMap* reference_map =
+ new (gen_->zone()) ReferenceMap(gen_->zone());
+ gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ if (FLAG_debug_code) {
+ __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
+ }
}
- ReferenceMap* reference_map =
- new (gen_->zone()) ReferenceMap(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
- Safepoint::kNoLazyDeopt);
}
bool frame_elided_;
@@ -1799,16 +2128,19 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
}
CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
- int deoptimization_id, Deoptimizer::BailoutType bailout_type,
- SourcePosition pos) {
+ int deoptimization_id, SourcePosition pos) {
+ DeoptimizeKind deoptimization_kind = GetDeoptimizationKind(deoptimization_id);
+ DeoptimizeReason deoptimization_reason =
+ GetDeoptimizationReason(deoptimization_id);
+ Deoptimizer::BailoutType bailout_type =
+ deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
+ : Deoptimizer::EAGER;
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
// TODO(turbofan): We should be able to generate better code by sharing the
// actual final call site and just bl'ing to it here, similar to what we do
// in the lithium backend.
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
- DeoptimizeReason deoptimization_reason =
- GetDeoptimizationReason(deoptimization_id);
__ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
__ CheckConstPool(false, false);
diff --git a/deps/v8/src/compiler/arm/instruction-codes-arm.h b/deps/v8/src/compiler/arm/instruction-codes-arm.h
index 6e5426c255..0c19debad7 100644
--- a/deps/v8/src/compiler/arm/instruction-codes-arm.h
+++ b/deps/v8/src/compiler/arm/instruction-codes-arm.h
@@ -129,18 +129,85 @@ namespace compiler {
V(ArmFloat32x4Neg) \
V(ArmFloat32x4Add) \
V(ArmFloat32x4Sub) \
- V(ArmFloat32x4Eq) \
- V(ArmFloat32x4Ne) \
+ V(ArmFloat32x4Equal) \
+ V(ArmFloat32x4NotEqual) \
V(ArmInt32x4Splat) \
V(ArmInt32x4ExtractLane) \
V(ArmInt32x4ReplaceLane) \
V(ArmInt32x4FromFloat32x4) \
V(ArmUint32x4FromFloat32x4) \
+ V(ArmInt32x4Neg) \
+ V(ArmInt32x4ShiftLeftByScalar) \
+ V(ArmInt32x4ShiftRightByScalar) \
V(ArmInt32x4Add) \
V(ArmInt32x4Sub) \
- V(ArmInt32x4Eq) \
- V(ArmInt32x4Ne) \
- V(ArmSimd32x4Select)
+ V(ArmInt32x4Mul) \
+ V(ArmInt32x4Min) \
+ V(ArmInt32x4Max) \
+ V(ArmInt32x4Equal) \
+ V(ArmInt32x4NotEqual) \
+ V(ArmInt32x4GreaterThan) \
+ V(ArmInt32x4GreaterThanOrEqual) \
+ V(ArmUint32x4ShiftRightByScalar) \
+ V(ArmUint32x4Min) \
+ V(ArmUint32x4Max) \
+ V(ArmUint32x4GreaterThan) \
+ V(ArmUint32x4GreaterThanOrEqual) \
+ V(ArmInt16x8Splat) \
+ V(ArmInt16x8ExtractLane) \
+ V(ArmInt16x8ReplaceLane) \
+ V(ArmInt16x8Neg) \
+ V(ArmInt16x8ShiftLeftByScalar) \
+ V(ArmInt16x8ShiftRightByScalar) \
+ V(ArmInt16x8Add) \
+ V(ArmInt16x8AddSaturate) \
+ V(ArmInt16x8Sub) \
+ V(ArmInt16x8SubSaturate) \
+ V(ArmInt16x8Mul) \
+ V(ArmInt16x8Min) \
+ V(ArmInt16x8Max) \
+ V(ArmInt16x8Equal) \
+ V(ArmInt16x8NotEqual) \
+ V(ArmInt16x8GreaterThan) \
+ V(ArmInt16x8GreaterThanOrEqual) \
+ V(ArmUint16x8ShiftRightByScalar) \
+ V(ArmUint16x8AddSaturate) \
+ V(ArmUint16x8SubSaturate) \
+ V(ArmUint16x8Min) \
+ V(ArmUint16x8Max) \
+ V(ArmUint16x8GreaterThan) \
+ V(ArmUint16x8GreaterThanOrEqual) \
+ V(ArmInt8x16Splat) \
+ V(ArmInt8x16ExtractLane) \
+ V(ArmInt8x16ReplaceLane) \
+ V(ArmInt8x16Neg) \
+ V(ArmInt8x16ShiftLeftByScalar) \
+ V(ArmInt8x16ShiftRightByScalar) \
+ V(ArmInt8x16Add) \
+ V(ArmInt8x16AddSaturate) \
+ V(ArmInt8x16Sub) \
+ V(ArmInt8x16SubSaturate) \
+ V(ArmInt8x16Mul) \
+ V(ArmInt8x16Min) \
+ V(ArmInt8x16Max) \
+ V(ArmInt8x16Equal) \
+ V(ArmInt8x16NotEqual) \
+ V(ArmInt8x16GreaterThan) \
+ V(ArmInt8x16GreaterThanOrEqual) \
+ V(ArmUint8x16ShiftRightByScalar) \
+ V(ArmUint8x16AddSaturate) \
+ V(ArmUint8x16SubSaturate) \
+ V(ArmUint8x16Min) \
+ V(ArmUint8x16Max) \
+ V(ArmUint8x16GreaterThan) \
+ V(ArmUint8x16GreaterThanOrEqual) \
+ V(ArmSimd128And) \
+ V(ArmSimd128Or) \
+ V(ArmSimd128Xor) \
+ V(ArmSimd128Not) \
+ V(ArmSimd32x4Select) \
+ V(ArmSimd16x8Select) \
+ V(ArmSimd8x16Select)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc b/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
index 8dfa68a2f6..ba2f21943a 100644
--- a/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
@@ -117,18 +117,85 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmFloat32x4Neg:
case kArmFloat32x4Add:
case kArmFloat32x4Sub:
- case kArmFloat32x4Eq:
- case kArmFloat32x4Ne:
+ case kArmFloat32x4Equal:
+ case kArmFloat32x4NotEqual:
case kArmInt32x4Splat:
case kArmInt32x4ExtractLane:
case kArmInt32x4ReplaceLane:
case kArmInt32x4FromFloat32x4:
case kArmUint32x4FromFloat32x4:
+ case kArmInt32x4Neg:
+ case kArmInt32x4ShiftLeftByScalar:
+ case kArmInt32x4ShiftRightByScalar:
case kArmInt32x4Add:
case kArmInt32x4Sub:
- case kArmInt32x4Eq:
- case kArmInt32x4Ne:
+ case kArmInt32x4Mul:
+ case kArmInt32x4Min:
+ case kArmInt32x4Max:
+ case kArmInt32x4Equal:
+ case kArmInt32x4NotEqual:
+ case kArmInt32x4GreaterThan:
+ case kArmInt32x4GreaterThanOrEqual:
+ case kArmUint32x4ShiftRightByScalar:
+ case kArmUint32x4Min:
+ case kArmUint32x4Max:
+ case kArmUint32x4GreaterThan:
+ case kArmUint32x4GreaterThanOrEqual:
+ case kArmInt16x8Splat:
+ case kArmInt16x8ExtractLane:
+ case kArmInt16x8ReplaceLane:
+ case kArmInt16x8Neg:
+ case kArmInt16x8ShiftLeftByScalar:
+ case kArmInt16x8ShiftRightByScalar:
+ case kArmInt16x8Add:
+ case kArmInt16x8AddSaturate:
+ case kArmInt16x8Sub:
+ case kArmInt16x8SubSaturate:
+ case kArmInt16x8Mul:
+ case kArmInt16x8Min:
+ case kArmInt16x8Max:
+ case kArmInt16x8Equal:
+ case kArmInt16x8NotEqual:
+ case kArmInt16x8GreaterThan:
+ case kArmInt16x8GreaterThanOrEqual:
+ case kArmUint16x8ShiftRightByScalar:
+ case kArmUint16x8AddSaturate:
+ case kArmUint16x8SubSaturate:
+ case kArmUint16x8Min:
+ case kArmUint16x8Max:
+ case kArmUint16x8GreaterThan:
+ case kArmUint16x8GreaterThanOrEqual:
+ case kArmInt8x16Splat:
+ case kArmInt8x16ExtractLane:
+ case kArmInt8x16ReplaceLane:
+ case kArmInt8x16Neg:
+ case kArmInt8x16ShiftLeftByScalar:
+ case kArmInt8x16ShiftRightByScalar:
+ case kArmInt8x16Add:
+ case kArmInt8x16AddSaturate:
+ case kArmInt8x16Sub:
+ case kArmInt8x16SubSaturate:
+ case kArmInt8x16Mul:
+ case kArmInt8x16Min:
+ case kArmInt8x16Max:
+ case kArmInt8x16Equal:
+ case kArmInt8x16NotEqual:
+ case kArmInt8x16GreaterThan:
+ case kArmInt8x16GreaterThanOrEqual:
+ case kArmUint8x16ShiftRightByScalar:
+ case kArmUint8x16AddSaturate:
+ case kArmUint8x16SubSaturate:
+ case kArmUint8x16Min:
+ case kArmUint8x16Max:
+ case kArmUint8x16GreaterThan:
+ case kArmUint8x16GreaterThanOrEqual:
+ case kArmSimd128And:
+ case kArmSimd128Or:
+ case kArmSimd128Xor:
+ case kArmSimd128Not:
case kArmSimd32x4Select:
+ case kArmSimd16x8Select:
+ case kArmSimd8x16Select:
return kNoOpcodeFlags;
case kArmVldrF32:
diff --git a/deps/v8/src/compiler/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
index def486af62..0cffff7a1c 100644
--- a/deps/v8/src/compiler/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
@@ -84,7 +84,6 @@ void VisitRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
g.UseRegister(node->InputAt(0)));
}
-
void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
ArmOperandGenerator g(selector);
selector->Emit(opcode, g.DefineAsRegister(node),
@@ -92,6 +91,29 @@ void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
g.UseRegister(node->InputAt(1)));
}
+void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+ ArmOperandGenerator g(selector);
+ // Use DefineSameAsFirst for ternary ops that clobber their first input,
+ // e.g. the NEON vbsl instruction.
+ selector->Emit(
+ opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2)));
+}
+
+void VisitRRI(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+ ArmOperandGenerator g(selector);
+ int32_t imm = OpParameter<int32_t>(node);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(imm));
+}
+
+void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+ ArmOperandGenerator g(selector);
+ int32_t imm = OpParameter<int32_t>(node);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(imm),
+ g.UseRegister(node->InputAt(1)));
+}
template <IrOpcode::Value kOpcode, int kImmMin, int kImmMax,
AddressingMode kImmMode, AddressingMode kRegMode>
@@ -266,7 +288,7 @@ void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->frame_state());
} else if (cont->IsTrap()) {
inputs[input_count++] = g.UseImmediate(cont->trap_id());
selector->Emit(opcode, output_count, outputs, input_count, inputs);
@@ -406,6 +428,9 @@ void InstructionSelector::VisitLoad(Node* node) {
break;
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kSimd1x4: // Fall through.
+ case MachineRepresentation::kSimd1x8: // Fall through.
+ case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -491,6 +516,9 @@ void InstructionSelector::VisitStore(Node* node) {
break;
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kSimd1x4: // Fall through.
+ case MachineRepresentation::kSimd1x8: // Fall through.
+ case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -654,6 +682,9 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kSimd1x4: // Fall through.
+ case MachineRepresentation::kSimd1x8: // Fall through.
+ case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -698,6 +729,9 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kSimd1x4: // Fall through.
+ case MachineRepresentation::kSimd1x8: // Fall through.
+ case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -892,7 +926,7 @@ void VisitShift(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->frame_state());
} else if (cont->IsTrap()) {
inputs[input_count++] = g.UseImmediate(cont->trap_id());
selector->Emit(opcode, output_count, outputs, input_count, inputs);
@@ -1090,15 +1124,8 @@ void InstructionSelector::VisitWord32Ror(Node* node) {
VisitShift(this, node, TryMatchROR);
}
-
-void InstructionSelector::VisitWord32Clz(Node* node) {
- VisitRR(this, kArmClz, node);
-}
-
-
void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
-
void InstructionSelector::VisitWord32ReverseBits(Node* node) {
DCHECK(IsSupported(ARMv7));
VisitRR(this, kArmRbit, node);
@@ -1261,8 +1288,8 @@ void EmitInt32MulWithOverflow(InstructionSelector* selector, Node* node,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
InstructionOperand in[] = {temp_operand, result_operand, shift_31};
- selector->EmitDeoptimize(opcode, 0, nullptr, 3, in, cont->reason(),
- cont->frame_state());
+ selector->EmitDeoptimize(opcode, 0, nullptr, 3, in, cont->kind(),
+ cont->reason(), cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), temp_operand,
result_operand, shift_31);
@@ -1299,12 +1326,6 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
VisitRRR(this, kArmMul, node);
}
-
-void InstructionSelector::VisitInt32MulHigh(Node* node) {
- VisitRRR(this, kArmSmmul, node);
-}
-
-
void InstructionSelector::VisitUint32MulHigh(Node* node) {
ArmOperandGenerator g(this);
InstructionOperand outputs[] = {g.TempRegister(), g.DefineAsRegister(node)};
@@ -1333,73 +1354,76 @@ void InstructionSelector::VisitUint32Mod(Node* node) {
VisitMod(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64);
}
-
-void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
- VisitRR(this, kArmVcvtF64F32, node);
-}
-
-
-void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
- VisitRR(this, kArmVcvtF32S32, node);
-}
-
-
-void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
- VisitRR(this, kArmVcvtF32U32, node);
-}
-
-
-void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
- VisitRR(this, kArmVcvtF64S32, node);
-}
-
-
-void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
- VisitRR(this, kArmVcvtF64U32, node);
-}
-
-
-void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
- VisitRR(this, kArmVcvtS32F32, node);
-}
-
-
-void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
- VisitRR(this, kArmVcvtU32F32, node);
-}
-
-
-void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
- VisitRR(this, kArmVcvtS32F64, node);
-}
-
-
-void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
- VisitRR(this, kArmVcvtU32F64, node);
-}
-
-void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
- VisitRR(this, kArmVcvtU32F64, node);
-}
-void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
- VisitRR(this, kArmVcvtF32F64, node);
-}
-
-void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
- VisitRR(this, kArchTruncateDoubleToI, node);
-}
-
-void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
- VisitRR(this, kArmVcvtS32F64, node);
-}
-
-void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
- VisitRR(this, kArmVmovU32F32, node);
-}
-
-void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
- VisitRR(this, kArmVmovF32U32, node);
-}
+#define RR_OP_LIST(V) \
+ V(Word32Clz, kArmClz) \
+ V(ChangeFloat32ToFloat64, kArmVcvtF64F32) \
+ V(RoundInt32ToFloat32, kArmVcvtF32S32) \
+ V(RoundUint32ToFloat32, kArmVcvtF32U32) \
+ V(ChangeInt32ToFloat64, kArmVcvtF64S32) \
+ V(ChangeUint32ToFloat64, kArmVcvtF64U32) \
+ V(TruncateFloat32ToInt32, kArmVcvtS32F32) \
+ V(TruncateFloat32ToUint32, kArmVcvtU32F32) \
+ V(ChangeFloat64ToInt32, kArmVcvtS32F64) \
+ V(ChangeFloat64ToUint32, kArmVcvtU32F64) \
+ V(TruncateFloat64ToUint32, kArmVcvtU32F64) \
+ V(TruncateFloat64ToFloat32, kArmVcvtF32F64) \
+ V(TruncateFloat64ToWord32, kArchTruncateDoubleToI) \
+ V(RoundFloat64ToInt32, kArmVcvtS32F64) \
+ V(BitcastFloat32ToInt32, kArmVmovU32F32) \
+ V(BitcastInt32ToFloat32, kArmVmovF32U32) \
+ V(Float64ExtractLowWord32, kArmVmovLowU32F64) \
+ V(Float64ExtractHighWord32, kArmVmovHighU32F64) \
+ V(Float64SilenceNaN, kArmFloat64SilenceNaN) \
+ V(Float32Abs, kArmVabsF32) \
+ V(Float64Abs, kArmVabsF64) \
+ V(Float32Neg, kArmVnegF32) \
+ V(Float64Neg, kArmVnegF64) \
+ V(Float32Sqrt, kArmVsqrtF32) \
+ V(Float64Sqrt, kArmVsqrtF64)
+
+#define RR_OP_LIST_V8(V) \
+ V(Float32RoundDown, kArmVrintmF32) \
+ V(Float64RoundDown, kArmVrintmF64) \
+ V(Float32RoundUp, kArmVrintpF32) \
+ V(Float64RoundUp, kArmVrintpF64) \
+ V(Float32RoundTruncate, kArmVrintzF32) \
+ V(Float64RoundTruncate, kArmVrintzF64) \
+ V(Float64RoundTiesAway, kArmVrintaF64) \
+ V(Float32RoundTiesEven, kArmVrintnF32) \
+ V(Float64RoundTiesEven, kArmVrintnF64)
+
+#define RRR_OP_LIST(V) \
+ V(Int32MulHigh, kArmSmmul) \
+ V(Float32Mul, kArmVmulF32) \
+ V(Float64Mul, kArmVmulF64) \
+ V(Float32Div, kArmVdivF32) \
+ V(Float64Div, kArmVdivF64) \
+ V(Float32Max, kArmFloat32Max) \
+ V(Float64Max, kArmFloat64Max) \
+ V(Float32Min, kArmFloat32Min) \
+ V(Float64Min, kArmFloat64Min)
+
+#define RR_VISITOR(Name, opcode) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRR(this, opcode, node); \
+ }
+RR_OP_LIST(RR_VISITOR)
+#undef RR_VISITOR
+
+#define RR_VISITOR_V8(Name, opcode) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ DCHECK(CpuFeatures::IsSupported(ARMv8)); \
+ VisitRR(this, opcode, node); \
+ }
+RR_OP_LIST_V8(RR_VISITOR_V8)
+#undef RR_VISITOR_V8
+
+#define RRR_VISITOR(Name, opcode) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRRR(this, opcode, node); \
+ }
+RRR_OP_LIST(RRR_VISITOR)
+#undef RRR_VISITOR
void InstructionSelector::VisitFloat32Add(Node* node) {
ArmOperandGenerator g(this);
@@ -1468,132 +1492,12 @@ void InstructionSelector::VisitFloat64Sub(Node* node) {
VisitRRR(this, kArmVsubF64, node);
}
-void InstructionSelector::VisitFloat32Mul(Node* node) {
- VisitRRR(this, kArmVmulF32, node);
-}
-
-
-void InstructionSelector::VisitFloat64Mul(Node* node) {
- VisitRRR(this, kArmVmulF64, node);
-}
-
-
-void InstructionSelector::VisitFloat32Div(Node* node) {
- VisitRRR(this, kArmVdivF32, node);
-}
-
-
-void InstructionSelector::VisitFloat64Div(Node* node) {
- VisitRRR(this, kArmVdivF64, node);
-}
-
-
void InstructionSelector::VisitFloat64Mod(Node* node) {
ArmOperandGenerator g(this);
Emit(kArmVmodF64, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0),
g.UseFixed(node->InputAt(1), d1))->MarkAsCall();
}
-void InstructionSelector::VisitFloat32Max(Node* node) {
- VisitRRR(this, kArmFloat32Max, node);
-}
-
-void InstructionSelector::VisitFloat64Max(Node* node) {
- VisitRRR(this, kArmFloat64Max, node);
-}
-
-void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
- VisitRR(this, kArmFloat64SilenceNaN, node);
-}
-
-void InstructionSelector::VisitFloat32Min(Node* node) {
- VisitRRR(this, kArmFloat32Min, node);
-}
-
-void InstructionSelector::VisitFloat64Min(Node* node) {
- VisitRRR(this, kArmFloat64Min, node);
-}
-
-void InstructionSelector::VisitFloat32Abs(Node* node) {
- VisitRR(this, kArmVabsF32, node);
-}
-
-
-void InstructionSelector::VisitFloat64Abs(Node* node) {
- VisitRR(this, kArmVabsF64, node);
-}
-
-void InstructionSelector::VisitFloat32Sqrt(Node* node) {
- VisitRR(this, kArmVsqrtF32, node);
-}
-
-
-void InstructionSelector::VisitFloat64Sqrt(Node* node) {
- VisitRR(this, kArmVsqrtF64, node);
-}
-
-
-void InstructionSelector::VisitFloat32RoundDown(Node* node) {
- DCHECK(CpuFeatures::IsSupported(ARMv8));
- VisitRR(this, kArmVrintmF32, node);
-}
-
-
-void InstructionSelector::VisitFloat64RoundDown(Node* node) {
- DCHECK(CpuFeatures::IsSupported(ARMv8));
- VisitRR(this, kArmVrintmF64, node);
-}
-
-
-void InstructionSelector::VisitFloat32RoundUp(Node* node) {
- DCHECK(CpuFeatures::IsSupported(ARMv8));
- VisitRR(this, kArmVrintpF32, node);
-}
-
-
-void InstructionSelector::VisitFloat64RoundUp(Node* node) {
- DCHECK(CpuFeatures::IsSupported(ARMv8));
- VisitRR(this, kArmVrintpF64, node);
-}
-
-
-void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
- DCHECK(CpuFeatures::IsSupported(ARMv8));
- VisitRR(this, kArmVrintzF32, node);
-}
-
-
-void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
- DCHECK(CpuFeatures::IsSupported(ARMv8));
- VisitRR(this, kArmVrintzF64, node);
-}
-
-
-void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
- DCHECK(CpuFeatures::IsSupported(ARMv8));
- VisitRR(this, kArmVrintaF64, node);
-}
-
-
-void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
- DCHECK(CpuFeatures::IsSupported(ARMv8));
- VisitRR(this, kArmVrintnF32, node);
-}
-
-
-void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
- DCHECK(CpuFeatures::IsSupported(ARMv8));
- VisitRR(this, kArmVrintnF64, node);
-}
-
-void InstructionSelector::VisitFloat32Neg(Node* node) {
- VisitRR(this, kArmVnegF32, node);
-}
-
-void InstructionSelector::VisitFloat64Neg(Node* node) {
- VisitRR(this, kArmVnegF64, node);
-}
-
void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
InstructionCode opcode) {
ArmOperandGenerator g(this);
@@ -1656,8 +1560,8 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
selector->Emit(opcode, g.NoOutput(), left, right,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
- cont->frame_state());
+ selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
+ cont->reason(), cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
} else {
@@ -1853,7 +1757,7 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->frame_state());
} else if (cont->IsTrap()) {
inputs[input_count++] = g.UseImmediate(cont->trap_id());
selector->Emit(opcode, output_count, outputs, input_count, inputs);
@@ -2012,7 +1916,7 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand, value_operand,
- cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
value_operand);
@@ -2032,14 +1936,16 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+ DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+ kNotEqual, p.kind(), p.reason(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+ DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+ kEqual, p.kind(), p.reason(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
@@ -2188,17 +2094,6 @@ void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
VisitFloat64Compare(this, node, &cont);
}
-
-void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
- VisitRR(this, kArmVmovLowU32F64, node);
-}
-
-
-void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
- VisitRR(this, kArmVmovHighU32F64, node);
-}
-
-
void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
ArmOperandGenerator g(this);
Node* left = node->InputAt(0);
@@ -2286,136 +2181,144 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
Emit(code, 0, nullptr, input_count, inputs);
}
-void InstructionSelector::VisitCreateFloat32x4(Node* node) {
- ArmOperandGenerator g(this);
- Emit(kArmFloat32x4Splat, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-void InstructionSelector::VisitFloat32x4ExtractLane(Node* node) {
- ArmOperandGenerator g(this);
- int32_t lane = OpParameter<int32_t>(node);
- Emit(kArmFloat32x4ExtractLane, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)), g.UseImmediate(lane));
-}
-
-void InstructionSelector::VisitFloat32x4ReplaceLane(Node* node) {
- ArmOperandGenerator g(this);
- int32_t lane = OpParameter<int32_t>(node);
- Emit(kArmFloat32x4ReplaceLane, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)), g.UseImmediate(lane),
- g.Use(node->InputAt(1)));
-}
-
-void InstructionSelector::VisitFloat32x4FromInt32x4(Node* node) {
- ArmOperandGenerator g(this);
- Emit(kArmFloat32x4FromInt32x4, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
-}
-
-void InstructionSelector::VisitFloat32x4FromUint32x4(Node* node) {
- ArmOperandGenerator g(this);
- Emit(kArmFloat32x4FromUint32x4, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
-}
-
-void InstructionSelector::VisitFloat32x4Abs(Node* node) {
- ArmOperandGenerator g(this);
- Emit(kArmFloat32x4Abs, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
-}
-
-void InstructionSelector::VisitFloat32x4Neg(Node* node) {
- ArmOperandGenerator g(this);
- Emit(kArmFloat32x4Neg, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
-}
-
-void InstructionSelector::VisitFloat32x4Add(Node* node) {
- ArmOperandGenerator g(this);
- Emit(kArmFloat32x4Add, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
-}
-
-void InstructionSelector::VisitFloat32x4Sub(Node* node) {
- ArmOperandGenerator g(this);
- Emit(kArmFloat32x4Sub, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
-}
-
-void InstructionSelector::VisitFloat32x4Equal(Node* node) {
- ArmOperandGenerator g(this);
- Emit(kArmFloat32x4Eq, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
-}
-
-void InstructionSelector::VisitFloat32x4NotEqual(Node* node) {
- ArmOperandGenerator g(this);
- Emit(kArmFloat32x4Ne, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
-}
-
-void InstructionSelector::VisitCreateInt32x4(Node* node) {
- ArmOperandGenerator g(this);
- Emit(kArmInt32x4Splat, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-void InstructionSelector::VisitInt32x4ExtractLane(Node* node) {
- ArmOperandGenerator g(this);
- int32_t lane = OpParameter<int32_t>(node);
- Emit(kArmInt32x4ExtractLane, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)), g.UseImmediate(lane));
-}
-
-void InstructionSelector::VisitInt32x4ReplaceLane(Node* node) {
- ArmOperandGenerator g(this);
- int32_t lane = OpParameter<int32_t>(node);
- Emit(kArmInt32x4ReplaceLane, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)), g.UseImmediate(lane),
- g.Use(node->InputAt(1)));
-}
-
-void InstructionSelector::VisitInt32x4FromFloat32x4(Node* node) {
- ArmOperandGenerator g(this);
- Emit(kArmInt32x4FromFloat32x4, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
-}
-
-void InstructionSelector::VisitUint32x4FromFloat32x4(Node* node) {
- ArmOperandGenerator g(this);
- Emit(kArmUint32x4FromFloat32x4, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
-}
-
-void InstructionSelector::VisitInt32x4Add(Node* node) {
- ArmOperandGenerator g(this);
- Emit(kArmInt32x4Add, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
-}
-
-void InstructionSelector::VisitInt32x4Sub(Node* node) {
- ArmOperandGenerator g(this);
- Emit(kArmInt32x4Sub, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
-}
-
-void InstructionSelector::VisitInt32x4Equal(Node* node) {
- ArmOperandGenerator g(this);
- Emit(kArmInt32x4Eq, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
- g.UseRegister(node->InputAt(1)));
-}
-
-void InstructionSelector::VisitInt32x4NotEqual(Node* node) {
- ArmOperandGenerator g(this);
- Emit(kArmInt32x4Ne, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
- g.UseRegister(node->InputAt(1)));
-}
-
-void InstructionSelector::VisitSimd32x4Select(Node* node) {
- ArmOperandGenerator g(this);
- Emit(kArmSimd32x4Select, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
- g.UseRegister(node->InputAt(2)));
-}
+#define SIMD_TYPE_LIST(V) \
+ V(Float32x4) \
+ V(Int32x4) \
+ V(Int16x8) \
+ V(Int8x16)
+
+#define SIMD_FORMAT_LIST(V) \
+ V(32x4) \
+ V(16x8) \
+ V(8x16)
+
+#define SIMD_UNOP_LIST(V) \
+ V(Float32x4FromInt32x4) \
+ V(Float32x4FromUint32x4) \
+ V(Float32x4Abs) \
+ V(Float32x4Neg) \
+ V(Int32x4FromFloat32x4) \
+ V(Uint32x4FromFloat32x4) \
+ V(Int32x4Neg) \
+ V(Int16x8Neg) \
+ V(Int8x16Neg) \
+ V(Simd128Not)
+
+#define SIMD_BINOP_LIST(V) \
+ V(Float32x4Add) \
+ V(Float32x4Sub) \
+ V(Float32x4Equal) \
+ V(Float32x4NotEqual) \
+ V(Int32x4Add) \
+ V(Int32x4Sub) \
+ V(Int32x4Mul) \
+ V(Int32x4Min) \
+ V(Int32x4Max) \
+ V(Int32x4Equal) \
+ V(Int32x4NotEqual) \
+ V(Int32x4GreaterThan) \
+ V(Int32x4GreaterThanOrEqual) \
+ V(Uint32x4Min) \
+ V(Uint32x4Max) \
+ V(Uint32x4GreaterThan) \
+ V(Uint32x4GreaterThanOrEqual) \
+ V(Int16x8Add) \
+ V(Int16x8AddSaturate) \
+ V(Int16x8Sub) \
+ V(Int16x8SubSaturate) \
+ V(Int16x8Mul) \
+ V(Int16x8Min) \
+ V(Int16x8Max) \
+ V(Int16x8Equal) \
+ V(Int16x8NotEqual) \
+ V(Int16x8GreaterThan) \
+ V(Int16x8GreaterThanOrEqual) \
+ V(Uint16x8AddSaturate) \
+ V(Uint16x8SubSaturate) \
+ V(Uint16x8Min) \
+ V(Uint16x8Max) \
+ V(Uint16x8GreaterThan) \
+ V(Uint16x8GreaterThanOrEqual) \
+ V(Int8x16Add) \
+ V(Int8x16AddSaturate) \
+ V(Int8x16Sub) \
+ V(Int8x16SubSaturate) \
+ V(Int8x16Mul) \
+ V(Int8x16Min) \
+ V(Int8x16Max) \
+ V(Int8x16Equal) \
+ V(Int8x16NotEqual) \
+ V(Int8x16GreaterThan) \
+ V(Int8x16GreaterThanOrEqual) \
+ V(Uint8x16AddSaturate) \
+ V(Uint8x16SubSaturate) \
+ V(Uint8x16Min) \
+ V(Uint8x16Max) \
+ V(Uint8x16GreaterThan) \
+ V(Uint8x16GreaterThanOrEqual) \
+ V(Simd128And) \
+ V(Simd128Or) \
+ V(Simd128Xor)
+
+#define SIMD_SHIFT_OP_LIST(V) \
+ V(Int32x4ShiftLeftByScalar) \
+ V(Int32x4ShiftRightByScalar) \
+ V(Uint32x4ShiftRightByScalar) \
+ V(Int16x8ShiftLeftByScalar) \
+ V(Int16x8ShiftRightByScalar) \
+ V(Uint16x8ShiftRightByScalar) \
+ V(Int8x16ShiftLeftByScalar) \
+ V(Int8x16ShiftRightByScalar) \
+ V(Uint8x16ShiftRightByScalar)
+
+#define SIMD_VISIT_SPLAT(Type) \
+ void InstructionSelector::VisitCreate##Type(Node* node) { \
+ VisitRR(this, kArm##Type##Splat, node); \
+ }
+SIMD_TYPE_LIST(SIMD_VISIT_SPLAT)
+#undef SIMD_VISIT_SPLAT
+
+#define SIMD_VISIT_EXTRACT_LANE(Type) \
+ void InstructionSelector::Visit##Type##ExtractLane(Node* node) { \
+ VisitRRI(this, kArm##Type##ExtractLane, node); \
+ }
+SIMD_TYPE_LIST(SIMD_VISIT_EXTRACT_LANE)
+#undef SIMD_VISIT_EXTRACT_LANE
+
+#define SIMD_VISIT_REPLACE_LANE(Type) \
+ void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
+ VisitRRIR(this, kArm##Type##ReplaceLane, node); \
+ }
+SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
+#undef SIMD_VISIT_REPLACE_LANE
+
+#define SIMD_VISIT_UNOP(Name) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRR(this, kArm##Name, node); \
+ }
+SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
+#undef SIMD_VISIT_UNOP
+
+#define SIMD_VISIT_BINOP(Name) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRRR(this, kArm##Name, node); \
+ }
+SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
+#undef SIMD_VISIT_BINOP
+
+#define SIMD_VISIT_SHIFT_OP(Name) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRRI(this, kArm##Name, node); \
+ }
+SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
+#undef SIMD_VISIT_SHIFT_OP
+
+#define SIMD_VISIT_SELECT_OP(format) \
+ void InstructionSelector::VisitSimd##format##Select(Node* node) { \
+ VisitRRRR(this, kArmSimd##format##Select, node); \
+ }
+SIMD_FORMAT_LIST(SIMD_VISIT_SELECT_OP)
+#undef SIMD_VISIT_SELECT_OP
// static
MachineOperatorBuilder::Flags
diff --git a/deps/v8/src/compiler/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
index 09fe0eb718..1cdedb0f9e 100644
--- a/deps/v8/src/compiler/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
@@ -570,7 +570,8 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
// Check if current frame is an arguments adaptor frame.
__ Ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ Cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ Cmp(scratch1,
+ Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ B(ne, &done);
// Load arguments count from current arguments adaptor frame (note, it
@@ -774,10 +775,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchDeoptimize: {
int deopt_state_id =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
- Deoptimizer::BailoutType bailout_type =
- Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
- CodeGenResult result = AssembleDeoptimizerCall(
- deopt_state_id, bailout_type, current_source_position_);
+ CodeGenResult result =
+ AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
if (result != kSuccess) return result;
break;
}
@@ -1712,8 +1711,8 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
gen_(gen) {}
void Generate() final {
Arm64OperandConverter i(gen_, instr_);
- Runtime::FunctionId trap_id = static_cast<Runtime::FunctionId>(
- i.InputInt32(instr_->InputCount() - 1));
+ Builtins::Name trap_id =
+ static_cast<Builtins::Name>(i.InputInt32(instr_->InputCount() - 1));
bool old_has_frame = __ has_frame();
if (frame_elided_) {
__ set_has_frame(true);
@@ -1723,32 +1722,34 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
if (frame_elided_) {
__ set_has_frame(old_has_frame);
}
- if (FLAG_debug_code) {
- // The trap code should never return.
- __ Brk(0);
- }
}
private:
- void GenerateCallToTrap(Runtime::FunctionId trap_id) {
- if (trap_id == Runtime::kNumFunctions) {
+ void GenerateCallToTrap(Builtins::Name trap_id) {
+ if (trap_id == Builtins::builtin_count) {
// We cannot test calls to the runtime in cctest/test-run-wasm.
// Therefore we emit a call to C here instead of a call to the runtime.
__ CallCFunction(
ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
0);
+ __ LeaveFrame(StackFrame::WASM_COMPILED);
+ __ Ret();
} else {
DCHECK(csp.Is(__ StackPointer()));
- __ Move(cp, isolate()->native_context());
// Initialize the jssp because it is required for the runtime call.
__ Mov(jssp, csp);
gen_->AssembleSourcePosition(instr_);
- __ CallRuntime(trap_id);
+ __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
+ RelocInfo::CODE_TARGET);
+ ReferenceMap* reference_map =
+ new (gen_->zone()) ReferenceMap(gen_->zone());
+ gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ if (FLAG_debug_code) {
+ // The trap code should never return.
+ __ Brk(0);
+ }
}
- ReferenceMap* reference_map =
- new (gen_->zone()) ReferenceMap(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
- Safepoint::kNoLazyDeopt);
}
bool frame_elided_;
Instruction* instr_;
@@ -1807,13 +1808,16 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
}
CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
- int deoptimization_id, Deoptimizer::BailoutType bailout_type,
- SourcePosition pos) {
+ int deoptimization_id, SourcePosition pos) {
+ DeoptimizeKind deoptimization_kind = GetDeoptimizationKind(deoptimization_id);
+ DeoptimizeReason deoptimization_reason =
+ GetDeoptimizationReason(deoptimization_id);
+ Deoptimizer::BailoutType bailout_type =
+ deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
+ : Deoptimizer::EAGER;
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
- DeoptimizeReason deoptimization_reason =
- GetDeoptimizationReason(deoptimization_id);
__ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
return kSuccess;
@@ -1886,7 +1890,6 @@ void CodeGenerator::AssembleConstructFrame() {
osr_pc_offset_ = __ pc_offset();
shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
}
-
// Build remainder of frame, including accounting for and filling-in
// frame-specific header information, e.g. claiming the extra slot that
// other platforms explicitly push for STUB frames and frames recording
@@ -1901,7 +1904,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (is_stub_frame) {
UseScratchRegisterScope temps(masm());
Register temp = temps.AcquireX();
- __ Mov(temp, Smi::FromInt(info()->GetOutputStackFrameType()));
+ __ Mov(temp, StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
__ Str(temp, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
}
}
diff --git a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
index 9cb33f6c44..bacf7921b7 100644
--- a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
@@ -123,7 +123,7 @@ class Arm64OperandGenerator final : public OperandGenerator {
bool CanBeLoadStoreShiftImmediate(Node* node, MachineRepresentation rep) {
// TODO(arm64): Load and Store on 128 bit Q registers is not supported yet.
- DCHECK_NE(MachineRepresentation::kSimd128, rep);
+ DCHECK_GT(MachineRepresentation::kSimd128, rep);
return IsIntegerConstant(node) &&
(GetIntegerConstantValue(node) == ElementSizeLog2Of(rep));
}
@@ -436,14 +436,18 @@ void VisitBinop(InstructionSelector* selector, Node* node,
Matcher m_shift(right_node);
inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
inputs[input_count++] = g.UseRegister(m_shift.left().node());
- inputs[input_count++] = g.UseImmediate(m_shift.right().node());
+ // We only need at most the last 6 bits of the shift.
+ inputs[input_count++] =
+ g.UseImmediate(static_cast<int>(m_shift.right().Value() & 0x3F));
} else if (can_commute && TryMatchAnyShift(selector, node, left_node, &opcode,
!is_add_sub)) {
if (must_commute_cond) cont->Commute();
Matcher m_shift(left_node);
inputs[input_count++] = g.UseRegisterOrImmediateZero(right_node);
inputs[input_count++] = g.UseRegister(m_shift.left().node());
- inputs[input_count++] = g.UseImmediate(m_shift.right().node());
+ // We only need at most the last 6 bits of the shift.
+ inputs[input_count++] =
+ g.UseImmediate(static_cast<int>(m_shift.right().Value() & 0x3F));
} else {
inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
inputs[input_count++] = g.UseRegister(right_node);
@@ -470,7 +474,7 @@ void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->frame_state());
} else if (cont->IsTrap()) {
inputs[input_count++] = g.UseImmediate(cont->trap_id());
selector->Emit(opcode, output_count, outputs, input_count, inputs);
@@ -589,6 +593,9 @@ void InstructionSelector::VisitLoad(Node* node) {
immediate_mode = kLoadStoreImm64;
break;
case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kSimd1x4: // Fall through.
+ case MachineRepresentation::kSimd1x8: // Fall through.
+ case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -685,6 +692,9 @@ void InstructionSelector::VisitStore(Node* node) {
immediate_mode = kLoadStoreImm64;
break;
case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kSimd1x4: // Fall through.
+ case MachineRepresentation::kSimd1x8: // Fall through.
+ case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -753,6 +763,9 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kSimd1x4: // Fall through.
+ case MachineRepresentation::kSimd1x8: // Fall through.
+ case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -805,6 +818,9 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kSimd1x4: // Fall through.
+ case MachineRepresentation::kSimd1x8: // Fall through.
+ case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -942,7 +958,8 @@ void InstructionSelector::VisitWord64And(Node* node) {
uint64_t mask = m.right().Value();
uint64_t mask_width = base::bits::CountPopulation64(mask);
uint64_t mask_msb = base::bits::CountLeadingZeros64(mask);
- if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
+ if ((mask_width != 0) && (mask_width != 64) &&
+ (mask_msb + mask_width == 64)) {
// The mask must be contiguous, and occupy the least-significant bits.
DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
@@ -1227,44 +1244,99 @@ void InstructionSelector::VisitWord64Ror(Node* node) {
VisitRRO(this, kArm64Ror, node, kShift64Imm);
}
-
-void InstructionSelector::VisitWord64Clz(Node* node) {
- Arm64OperandGenerator g(this);
- Emit(kArm64Clz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitWord32Clz(Node* node) {
- Arm64OperandGenerator g(this);
- Emit(kArm64Clz32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
-}
-
+#define RR_OP_LIST(V) \
+ V(Word64Clz, kArm64Clz) \
+ V(Word32Clz, kArm64Clz32) \
+ V(Word32ReverseBits, kArm64Rbit32) \
+ V(Word64ReverseBits, kArm64Rbit) \
+ V(ChangeFloat32ToFloat64, kArm64Float32ToFloat64) \
+ V(RoundInt32ToFloat32, kArm64Int32ToFloat32) \
+ V(RoundUint32ToFloat32, kArm64Uint32ToFloat32) \
+ V(ChangeInt32ToFloat64, kArm64Int32ToFloat64) \
+ V(ChangeUint32ToFloat64, kArm64Uint32ToFloat64) \
+ V(TruncateFloat32ToInt32, kArm64Float32ToInt32) \
+ V(ChangeFloat64ToInt32, kArm64Float64ToInt32) \
+ V(TruncateFloat32ToUint32, kArm64Float32ToUint32) \
+ V(ChangeFloat64ToUint32, kArm64Float64ToUint32) \
+ V(TruncateFloat64ToUint32, kArm64Float64ToUint32) \
+ V(TruncateFloat64ToFloat32, kArm64Float64ToFloat32) \
+ V(TruncateFloat64ToWord32, kArchTruncateDoubleToI) \
+ V(RoundFloat64ToInt32, kArm64Float64ToInt32) \
+ V(RoundInt64ToFloat32, kArm64Int64ToFloat32) \
+ V(RoundInt64ToFloat64, kArm64Int64ToFloat64) \
+ V(RoundUint64ToFloat32, kArm64Uint64ToFloat32) \
+ V(RoundUint64ToFloat64, kArm64Uint64ToFloat64) \
+ V(BitcastFloat32ToInt32, kArm64Float64ExtractLowWord32) \
+ V(BitcastFloat64ToInt64, kArm64U64MoveFloat64) \
+ V(BitcastInt32ToFloat32, kArm64Float64MoveU64) \
+ V(BitcastInt64ToFloat64, kArm64Float64MoveU64) \
+ V(Float32Abs, kArm64Float32Abs) \
+ V(Float64Abs, kArm64Float64Abs) \
+ V(Float32Sqrt, kArm64Float32Sqrt) \
+ V(Float64Sqrt, kArm64Float64Sqrt) \
+ V(Float32RoundDown, kArm64Float32RoundDown) \
+ V(Float64RoundDown, kArm64Float64RoundDown) \
+ V(Float32RoundUp, kArm64Float32RoundUp) \
+ V(Float64RoundUp, kArm64Float64RoundUp) \
+ V(Float32RoundTruncate, kArm64Float32RoundTruncate) \
+ V(Float64RoundTruncate, kArm64Float64RoundTruncate) \
+ V(Float64RoundTiesAway, kArm64Float64RoundTiesAway) \
+ V(Float32RoundTiesEven, kArm64Float32RoundTiesEven) \
+ V(Float64RoundTiesEven, kArm64Float64RoundTiesEven) \
+ V(Float32Neg, kArm64Float32Neg) \
+ V(Float64Neg, kArm64Float64Neg) \
+ V(Float64ExtractLowWord32, kArm64Float64ExtractLowWord32) \
+ V(Float64ExtractHighWord32, kArm64Float64ExtractHighWord32) \
+ V(Float64SilenceNaN, kArm64Float64SilenceNaN)
+
+#define RRR_OP_LIST(V) \
+ V(Int32Div, kArm64Idiv32) \
+ V(Int64Div, kArm64Idiv) \
+ V(Uint32Div, kArm64Udiv32) \
+ V(Uint64Div, kArm64Udiv) \
+ V(Int32Mod, kArm64Imod32) \
+ V(Int64Mod, kArm64Imod) \
+ V(Uint32Mod, kArm64Umod32) \
+ V(Uint64Mod, kArm64Umod) \
+ V(Float32Add, kArm64Float32Add) \
+ V(Float64Add, kArm64Float64Add) \
+ V(Float32Sub, kArm64Float32Sub) \
+ V(Float64Sub, kArm64Float64Sub) \
+ V(Float32Mul, kArm64Float32Mul) \
+ V(Float64Mul, kArm64Float64Mul) \
+ V(Float32Div, kArm64Float32Div) \
+ V(Float64Div, kArm64Float64Div) \
+ V(Float32Max, kArm64Float32Max) \
+ V(Float64Max, kArm64Float64Max) \
+ V(Float32Min, kArm64Float32Min) \
+ V(Float64Min, kArm64Float64Min)
+
+#define RR_VISITOR(Name, opcode) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRR(this, opcode, node); \
+ }
+RR_OP_LIST(RR_VISITOR)
+#undef RR_VISITOR
+
+#define RRR_VISITOR(Name, opcode) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRRR(this, opcode, node); \
+ }
+RRR_OP_LIST(RRR_VISITOR)
+#undef RRR_VISITOR
void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
-
void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
-
-void InstructionSelector::VisitWord32ReverseBits(Node* node) {
- VisitRR(this, kArm64Rbit32, node);
-}
-
-
-void InstructionSelector::VisitWord64ReverseBits(Node* node) {
- VisitRR(this, kArm64Rbit, node);
-}
-
void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
-
void InstructionSelector::VisitWord64Popcnt(Node* node) { UNREACHABLE(); }
-
void InstructionSelector::VisitInt32Add(Node* node) {
Arm64OperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -1386,8 +1458,8 @@ void EmitInt32MulWithOverflow(InstructionSelector* selector, Node* node,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
InstructionOperand in[] = {result, result};
- selector->EmitDeoptimize(opcode, 0, nullptr, 2, in, cont->reason(),
- cont->frame_state());
+ selector->EmitDeoptimize(opcode, 0, nullptr, 2, in, cont->kind(),
+ cont->reason(), cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), result, result);
} else {
@@ -1499,94 +1571,6 @@ void InstructionSelector::VisitUint32MulHigh(Node* node) {
}
-void InstructionSelector::VisitInt32Div(Node* node) {
- VisitRRR(this, kArm64Idiv32, node);
-}
-
-
-void InstructionSelector::VisitInt64Div(Node* node) {
- VisitRRR(this, kArm64Idiv, node);
-}
-
-
-void InstructionSelector::VisitUint32Div(Node* node) {
- VisitRRR(this, kArm64Udiv32, node);
-}
-
-
-void InstructionSelector::VisitUint64Div(Node* node) {
- VisitRRR(this, kArm64Udiv, node);
-}
-
-
-void InstructionSelector::VisitInt32Mod(Node* node) {
- VisitRRR(this, kArm64Imod32, node);
-}
-
-
-void InstructionSelector::VisitInt64Mod(Node* node) {
- VisitRRR(this, kArm64Imod, node);
-}
-
-
-void InstructionSelector::VisitUint32Mod(Node* node) {
- VisitRRR(this, kArm64Umod32, node);
-}
-
-
-void InstructionSelector::VisitUint64Mod(Node* node) {
- VisitRRR(this, kArm64Umod, node);
-}
-
-
-void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
- VisitRR(this, kArm64Float32ToFloat64, node);
-}
-
-
-void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
- VisitRR(this, kArm64Int32ToFloat32, node);
-}
-
-
-void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
- VisitRR(this, kArm64Uint32ToFloat32, node);
-}
-
-
-void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
- VisitRR(this, kArm64Int32ToFloat64, node);
-}
-
-
-void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
- VisitRR(this, kArm64Uint32ToFloat64, node);
-}
-
-
-void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
- VisitRR(this, kArm64Float32ToInt32, node);
-}
-
-
-void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
- VisitRR(this, kArm64Float64ToInt32, node);
-}
-
-
-void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
- VisitRR(this, kArm64Float32ToUint32, node);
-}
-
-
-void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
- VisitRR(this, kArm64Float64ToUint32, node);
-}
-
-void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
- VisitRR(this, kArm64Float64ToUint32, node);
-}
-
void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
Arm64OperandGenerator g(this);
@@ -1741,20 +1725,6 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(value));
}
-
-void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
- VisitRR(this, kArm64Float64ToFloat32, node);
-}
-
-void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
- VisitRR(this, kArchTruncateDoubleToI, node);
-}
-
-void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
- VisitRR(this, kArm64Float64ToInt32, node);
-}
-
-
void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
Arm64OperandGenerator g(this);
Node* value = node->InputAt(0);
@@ -1763,85 +1733,6 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
Emit(kArchNop, g.DefineSameAsFirst(node), g.UseRegister(value));
}
-
-void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
- VisitRR(this, kArm64Int64ToFloat32, node);
-}
-
-
-void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
- VisitRR(this, kArm64Int64ToFloat64, node);
-}
-
-
-void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
- VisitRR(this, kArm64Uint64ToFloat32, node);
-}
-
-
-void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
- VisitRR(this, kArm64Uint64ToFloat64, node);
-}
-
-
-void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
- VisitRR(this, kArm64Float64ExtractLowWord32, node);
-}
-
-
-void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
- VisitRR(this, kArm64U64MoveFloat64, node);
-}
-
-
-void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
- VisitRR(this, kArm64Float64MoveU64, node);
-}
-
-
-void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
- VisitRR(this, kArm64Float64MoveU64, node);
-}
-
-
-void InstructionSelector::VisitFloat32Add(Node* node) {
- VisitRRR(this, kArm64Float32Add, node);
-}
-
-
-void InstructionSelector::VisitFloat64Add(Node* node) {
- VisitRRR(this, kArm64Float64Add, node);
-}
-
-
-void InstructionSelector::VisitFloat32Sub(Node* node) {
- VisitRRR(this, kArm64Float32Sub, node);
-}
-
-void InstructionSelector::VisitFloat64Sub(Node* node) {
- VisitRRR(this, kArm64Float64Sub, node);
-}
-
-void InstructionSelector::VisitFloat32Mul(Node* node) {
- VisitRRR(this, kArm64Float32Mul, node);
-}
-
-
-void InstructionSelector::VisitFloat64Mul(Node* node) {
- VisitRRR(this, kArm64Float64Mul, node);
-}
-
-
-void InstructionSelector::VisitFloat32Div(Node* node) {
- VisitRRR(this, kArm64Float32Div, node);
-}
-
-
-void InstructionSelector::VisitFloat64Div(Node* node) {
- VisitRRR(this, kArm64Float64Div, node);
-}
-
-
void InstructionSelector::VisitFloat64Mod(Node* node) {
Arm64OperandGenerator g(this);
Emit(kArm64Float64Mod, g.DefineAsFixed(node, d0),
@@ -1849,94 +1740,6 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
g.UseFixed(node->InputAt(1), d1))->MarkAsCall();
}
-void InstructionSelector::VisitFloat32Max(Node* node) {
- VisitRRR(this, kArm64Float32Max, node);
-}
-
-void InstructionSelector::VisitFloat64Max(Node* node) {
- VisitRRR(this, kArm64Float64Max, node);
-}
-
-void InstructionSelector::VisitFloat32Min(Node* node) {
- VisitRRR(this, kArm64Float32Min, node);
-}
-
-void InstructionSelector::VisitFloat64Min(Node* node) {
- VisitRRR(this, kArm64Float64Min, node);
-}
-
-
-void InstructionSelector::VisitFloat32Abs(Node* node) {
- VisitRR(this, kArm64Float32Abs, node);
-}
-
-
-void InstructionSelector::VisitFloat64Abs(Node* node) {
- VisitRR(this, kArm64Float64Abs, node);
-}
-
-void InstructionSelector::VisitFloat32Sqrt(Node* node) {
- VisitRR(this, kArm64Float32Sqrt, node);
-}
-
-
-void InstructionSelector::VisitFloat64Sqrt(Node* node) {
- VisitRR(this, kArm64Float64Sqrt, node);
-}
-
-
-void InstructionSelector::VisitFloat32RoundDown(Node* node) {
- VisitRR(this, kArm64Float32RoundDown, node);
-}
-
-
-void InstructionSelector::VisitFloat64RoundDown(Node* node) {
- VisitRR(this, kArm64Float64RoundDown, node);
-}
-
-
-void InstructionSelector::VisitFloat32RoundUp(Node* node) {
- VisitRR(this, kArm64Float32RoundUp, node);
-}
-
-
-void InstructionSelector::VisitFloat64RoundUp(Node* node) {
- VisitRR(this, kArm64Float64RoundUp, node);
-}
-
-
-void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
- VisitRR(this, kArm64Float32RoundTruncate, node);
-}
-
-
-void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
- VisitRR(this, kArm64Float64RoundTruncate, node);
-}
-
-
-void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
- VisitRR(this, kArm64Float64RoundTiesAway, node);
-}
-
-
-void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
- VisitRR(this, kArm64Float32RoundTiesEven, node);
-}
-
-
-void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
- VisitRR(this, kArm64Float64RoundTiesEven, node);
-}
-
-void InstructionSelector::VisitFloat32Neg(Node* node) {
- VisitRR(this, kArm64Float32Neg, node);
-}
-
-void InstructionSelector::VisitFloat64Neg(Node* node) {
- VisitRR(this, kArm64Float64Neg, node);
-}
-
void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
InstructionCode opcode) {
Arm64OperandGenerator g(this);
@@ -2005,8 +1808,8 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
selector->Emit(opcode, g.NoOutput(), left, right,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
- cont->frame_state());
+ selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
+ cont->reason(), cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
} else {
@@ -2177,7 +1980,7 @@ void EmitBranchOrDeoptimize(InstructionSelector* selector,
} else {
DCHECK(cont->IsDeoptimize());
selector->EmitDeoptimize(cont->Encode(opcode), g.NoOutput(), value,
- cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->frame_state());
}
}
@@ -2531,7 +2334,7 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(cont->Encode(kArm64Tst32), g.NoOutput(),
g.UseRegister(value), g.UseRegister(value),
- cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->frame_state());
} else {
DCHECK(cont->IsTrap());
selector->Emit(cont->Encode(kArm64Tst32), g.NoOutput(),
@@ -2549,14 +2352,16 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+ DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+ kNotEqual, p.kind(), p.reason(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+ DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+ kEqual, p.kind(), p.reason(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
@@ -2806,21 +2611,6 @@ void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
VisitFloat64Compare(this, node, &cont);
}
-
-void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
- Arm64OperandGenerator g(this);
- Emit(kArm64Float64ExtractLowWord32, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
- Arm64OperandGenerator g(this);
- Emit(kArm64Float64ExtractHighWord32, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
-}
-
-
void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
Arm64OperandGenerator g(this);
Node* left = node->InputAt(0);
@@ -2855,10 +2645,6 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
g.UseRegister(left), g.UseRegister(right));
}
-void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
- VisitRR(this, kArm64Float64SilenceNaN, node);
-}
-
void InstructionSelector::VisitAtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
Arm64OperandGenerator g(this);
diff --git a/deps/v8/src/compiler/ast-graph-builder.cc b/deps/v8/src/compiler/ast-graph-builder.cc
index 87e7787156..e199a032a8 100644
--- a/deps/v8/src/compiler/ast-graph-builder.cc
+++ b/deps/v8/src/compiler/ast-graph-builder.cc
@@ -19,6 +19,7 @@
#include "src/compiler/state-values-utils.h"
#include "src/feedback-vector.h"
#include "src/objects-inl.h"
+#include "src/objects/literal-objects.h"
namespace v8 {
namespace internal {
@@ -930,10 +931,11 @@ void AstGraphBuilder::VisitVariableDeclaration(VariableDeclaration* decl) {
case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
globals()->push_back(variable->name());
- FeedbackVectorSlot slot = decl->proxy()->VariableFeedbackSlot();
+ FeedbackSlot slot = decl->proxy()->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals()->push_back(handle(Smi::FromInt(slot.ToInt()), isolate()));
globals()->push_back(isolate()->factory()->undefined_value());
+ globals()->push_back(isolate()->factory()->undefined_value());
break;
}
case VariableLocation::PARAMETER:
@@ -966,9 +968,15 @@ void AstGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* decl) {
// Check for stack-overflow exception.
if (function.is_null()) return SetStackOverflow();
globals()->push_back(variable->name());
- FeedbackVectorSlot slot = decl->proxy()->VariableFeedbackSlot();
+ FeedbackSlot slot = decl->proxy()->VariableFeedbackSlot();
+ DCHECK(!slot.IsInvalid());
+ globals()->push_back(handle(Smi::FromInt(slot.ToInt()), isolate()));
+
+ // We need the slot where the literals array lives, too.
+ slot = decl->fun()->LiteralFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals()->push_back(handle(Smi::FromInt(slot.ToInt()), isolate()));
+
globals()->push_back(function);
break;
}
@@ -1165,98 +1173,8 @@ void AstGraphBuilder::VisitForStatement(ForStatement* stmt) {
void AstGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
- VisitForValue(stmt->subject());
- Node* object = environment()->Pop();
- BlockBuilder for_block(this);
- for_block.BeginBlock();
- // Check for null or undefined before entering loop.
- Node* is_null_cond =
- NewNode(javascript()->StrictEqual(CompareOperationHint::kAny), object,
- jsgraph()->NullConstant());
- for_block.BreakWhen(is_null_cond, BranchHint::kFalse);
- Node* is_undefined_cond =
- NewNode(javascript()->StrictEqual(CompareOperationHint::kAny), object,
- jsgraph()->UndefinedConstant());
- for_block.BreakWhen(is_undefined_cond, BranchHint::kFalse);
- {
- // Convert object to jsobject.
- object = BuildToObject(object, stmt->ToObjectId());
- environment()->Push(object);
-
- // Prepare for-in cache.
- Node* prepare = NewNode(javascript()->ForInPrepare(), object);
- PrepareFrameState(prepare, stmt->PrepareId(),
- OutputFrameStateCombine::Push(3));
- Node* cache_type = NewNode(common()->Projection(0), prepare);
- Node* cache_array = NewNode(common()->Projection(1), prepare);
- Node* cache_length = NewNode(common()->Projection(2), prepare);
-
- // Construct the rest of the environment.
- environment()->Push(cache_type);
- environment()->Push(cache_array);
- environment()->Push(cache_length);
- environment()->Push(jsgraph()->ZeroConstant());
-
- // Build the actual loop body.
- LoopBuilder for_loop(this);
- for_loop.BeginLoop(GetVariablesAssignedInLoop(stmt), CheckOsrEntry(stmt));
- {
- // These stack values are renamed in the case of OSR, so reload them
- // from the environment.
- Node* index = environment()->Peek(0);
- Node* cache_length = environment()->Peek(1);
- Node* cache_array = environment()->Peek(2);
- Node* cache_type = environment()->Peek(3);
- Node* object = environment()->Peek(4);
-
- // Check loop termination condition (we know that the {index} is always
- // in Smi range, so we can just set the hint on the comparison below).
- PrepareEagerCheckpoint(stmt->EntryId());
- Node* exit_cond =
- NewNode(javascript()->LessThan(CompareOperationHint::kSignedSmall),
- index, cache_length);
- PrepareFrameState(exit_cond, BailoutId::None());
- for_loop.BreakUnless(exit_cond);
-
- // Compute the next enumerated value.
- Node* value = NewNode(javascript()->ForInNext(), object, cache_array,
- cache_type, index);
- PrepareFrameState(value, stmt->FilterId(),
- OutputFrameStateCombine::Push());
- IfBuilder test_value(this);
- Node* test_value_cond =
- NewNode(javascript()->StrictEqual(CompareOperationHint::kAny), value,
- jsgraph()->UndefinedConstant());
- test_value.If(test_value_cond, BranchHint::kFalse);
- test_value.Then();
- test_value.Else();
- {
- environment()->Push(value);
- PrepareEagerCheckpoint(stmt->FilterId());
- value = environment()->Pop();
- // Bind value and do loop body.
- VectorSlotPair feedback =
- CreateVectorSlotPair(stmt->EachFeedbackSlot());
- VisitForInAssignment(stmt->each(), value, feedback,
- stmt->AssignmentId());
- VisitIterationBody(stmt, &for_loop, stmt->StackCheckId());
- }
- test_value.End();
- for_loop.EndBody();
-
- // Increment counter and continue (we know that the {index} is always
- // in Smi range, so we can just set the hint on the increment below).
- index = environment()->Peek(0);
- PrepareEagerCheckpoint(stmt->IncrementId());
- index = NewNode(javascript()->Add(BinaryOperationHint::kSignedSmall),
- index, jsgraph()->OneConstant());
- PrepareFrameState(index, BailoutId::None());
- environment()->Poke(0, index);
- }
- for_loop.EndLoop();
- environment()->Drop(5);
- }
- for_block.EndBlock();
+ // Only the BytecodeGraphBuilder supports for-in.
+ return SetStackOverflow();
}
@@ -1279,10 +1197,8 @@ void AstGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
void AstGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
- Node* node =
- NewNode(javascript()->CallRuntime(Runtime::kHandleDebuggerStatement));
- PrepareFrameState(node, stmt->DebugBreakId());
- environment()->MarkAllLocalsLive();
+ // Debugger statement is supported only by going through Ignition first.
+ UNREACHABLE();
}
@@ -1353,7 +1269,8 @@ void AstGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
// Create node to materialize a regular expression literal.
const Operator* op = javascript()->CreateLiteralRegExp(
- expr->pattern(), expr->flags(), expr->literal_index());
+ expr->pattern(), expr->flags(),
+ FeedbackVector::GetIndex(expr->literal_slot()));
Node* literal = NewNode(op, closure);
PrepareFrameState(literal, expr->id(), ast_context()->GetStateCombine());
ast_context()->ProduceValue(expr, literal);
@@ -1366,7 +1283,7 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
// Create node to deep-copy the literal boilerplate.
const Operator* op = javascript()->CreateLiteralObject(
expr->GetOrBuildConstantProperties(isolate()), expr->ComputeFlags(true),
- expr->literal_index(), expr->properties_count());
+ FeedbackVector::GetIndex(expr->literal_slot()), expr->properties_count());
Node* literal = NewNode(op, closure);
PrepareFrameState(literal, expr->CreateLiteralId(),
OutputFrameStateCombine::Push());
@@ -1402,7 +1319,7 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<Name> name = key->AsPropertyName();
VectorSlotPair feedback =
CreateVectorSlotPair(property->GetSlot(0));
- Node* store = BuildNamedStore(literal, name, value, feedback);
+ Node* store = BuildNamedStoreOwn(literal, name, value, feedback);
PrepareFrameState(store, key->id(),
OutputFrameStateCombine::Ignore());
BuildSetHomeObject(value, literal, property, 1);
@@ -1495,7 +1412,7 @@ void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
// Create node to deep-copy the literal boilerplate.
const Operator* op = javascript()->CreateLiteralArray(
expr->GetOrBuildConstantElements(isolate()), expr->ComputeFlags(true),
- expr->literal_index(), expr->values()->length());
+ FeedbackVector::GetIndex(expr->literal_slot()), expr->values()->length());
Node* literal = NewNode(op, closure);
PrepareFrameState(literal, expr->CreateLiteralId(),
OutputFrameStateCombine::Push());
@@ -1525,51 +1442,6 @@ void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
ast_context()->ProduceValue(expr, environment()->Pop());
}
-void AstGraphBuilder::VisitForInAssignment(Expression* expr, Node* value,
- const VectorSlotPair& feedback,
- BailoutId bailout_id) {
- DCHECK(expr->IsValidReferenceExpressionOrThis());
-
- // Left-hand side can only be a property, a global or a variable slot.
- Property* property = expr->AsProperty();
- LhsKind assign_type = Property::GetAssignType(property);
-
- // Evaluate LHS expression and store the value.
- switch (assign_type) {
- case VARIABLE: {
- Variable* var = expr->AsVariableProxy()->var();
- BuildVariableAssignment(var, value, Token::ASSIGN, feedback, bailout_id);
- break;
- }
- case NAMED_PROPERTY: {
- environment()->Push(value);
- VisitForValue(property->obj());
- Node* object = environment()->Pop();
- value = environment()->Pop();
- Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- Node* store = BuildNamedStore(object, name, value, feedback);
- PrepareFrameState(store, bailout_id, OutputFrameStateCombine::Ignore());
- break;
- }
- case KEYED_PROPERTY: {
- environment()->Push(value);
- VisitForValue(property->obj());
- VisitForValue(property->key());
- Node* key = environment()->Pop();
- Node* object = environment()->Pop();
- value = environment()->Pop();
- Node* store = BuildKeyedStore(object, key, value, feedback);
- PrepareFrameState(store, bailout_id, OutputFrameStateCombine::Ignore());
- break;
- }
- case NAMED_SUPER_PROPERTY:
- case KEYED_SUPER_PROPERTY:
- UNREACHABLE();
- break;
- }
-}
-
-
void AstGraphBuilder::VisitAssignment(Assignment* expr) {
DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
@@ -1824,8 +1696,8 @@ void AstGraphBuilder::VisitCall(Call* expr) {
float const frequency = ComputeCallFrequency(expr->CallFeedbackICSlot());
VectorSlotPair feedback = CreateVectorSlotPair(expr->CallFeedbackICSlot());
const Operator* call =
- javascript()->CallFunction(args->length() + 2, frequency, feedback,
- receiver_hint, expr->tail_call_mode());
+ javascript()->Call(args->length() + 2, frequency, feedback, receiver_hint,
+ expr->tail_call_mode());
PrepareEagerCheckpoint(expr->CallId());
Node* value = ProcessArguments(call, args->length() + 2);
// The callee passed to the call, we just need to push something here to
@@ -1852,7 +1724,7 @@ void AstGraphBuilder::VisitCallNew(CallNew* expr) {
float const frequency = ComputeCallFrequency(expr->CallNewFeedbackSlot());
VectorSlotPair feedback = CreateVectorSlotPair(expr->CallNewFeedbackSlot());
const Operator* call =
- javascript()->CallConstruct(args->length() + 2, frequency, feedback);
+ javascript()->Construct(args->length() + 2, frequency, feedback);
Node* value = ProcessArguments(call, args->length() + 2);
PrepareFrameState(value, expr->ReturnId(), OutputFrameStateCombine::Push());
ast_context()->ProduceValue(expr, value);
@@ -1873,7 +1745,7 @@ void AstGraphBuilder::VisitCallJSRuntime(CallRuntime* expr) {
VisitForValues(args);
// Create node to perform the JS runtime call.
- const Operator* call = javascript()->CallFunction(args->length() + 2);
+ const Operator* call = javascript()->Call(args->length() + 2);
PrepareEagerCheckpoint(expr->CallId());
Node* value = ProcessArguments(call, args->length() + 2);
PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
@@ -2228,10 +2100,8 @@ void AstGraphBuilder::VisitIterationBody(IterationStatement* stmt,
LoopBuilder* loop,
BailoutId stack_check_id) {
ControlScopeForIteration scope(this, stmt, loop);
- if (FLAG_turbo_loop_stackcheck || !info()->shared_info()->asm_function()) {
- Node* node = NewNode(javascript()->StackCheck());
- PrepareFrameState(node, stack_check_id);
- }
+ Node* node = NewNode(javascript()->StackCheck());
+ PrepareFrameState(node, stack_check_id);
Visit(stmt->body());
}
@@ -2361,9 +2231,7 @@ LanguageMode AstGraphBuilder::language_mode() const {
return current_scope()->language_mode();
}
-
-VectorSlotPair AstGraphBuilder::CreateVectorSlotPair(
- FeedbackVectorSlot slot) const {
+VectorSlotPair AstGraphBuilder::CreateVectorSlotPair(FeedbackSlot slot) const {
return VectorSlotPair(handle(info()->closure()->feedback_vector()), slot);
}
@@ -2372,7 +2240,7 @@ void AstGraphBuilder::VisitRewritableExpression(RewritableExpression* node) {
Visit(node->expression());
}
-float AstGraphBuilder::ComputeCallFrequency(FeedbackVectorSlot slot) const {
+float AstGraphBuilder::ComputeCallFrequency(FeedbackSlot slot) const {
if (slot.IsInvalid()) return 0.0f;
Handle<FeedbackVector> feedback_vector(info()->closure()->feedback_vector(),
isolate());
@@ -2738,6 +2606,8 @@ Node* AstGraphBuilder::BuildNamedLoad(Node* object, Handle<Name> name,
Node* AstGraphBuilder::BuildKeyedStore(Node* object, Node* key, Node* value,
const VectorSlotPair& feedback) {
+ DCHECK_EQ(feedback.vector()->GetLanguageMode(feedback.slot()),
+ language_mode());
const Operator* op = javascript()->StoreProperty(language_mode(), feedback);
Node* node = NewNode(op, object, key, value);
return node;
@@ -2747,16 +2617,28 @@ Node* AstGraphBuilder::BuildKeyedStore(Node* object, Node* key, Node* value,
Node* AstGraphBuilder::BuildNamedStore(Node* object, Handle<Name> name,
Node* value,
const VectorSlotPair& feedback) {
+ DCHECK_EQ(feedback.vector()->GetLanguageMode(feedback.slot()),
+ language_mode());
const Operator* op =
javascript()->StoreNamed(language_mode(), name, feedback);
Node* node = NewNode(op, object, value);
return node;
}
+Node* AstGraphBuilder::BuildNamedStoreOwn(Node* object, Handle<Name> name,
+ Node* value,
+ const VectorSlotPair& feedback) {
+ DCHECK_EQ(FeedbackSlotKind::kStoreOwnNamed,
+ feedback.vector()->GetKind(feedback.slot()));
+ const Operator* op = javascript()->StoreNamedOwn(name, feedback);
+ Node* node = NewNode(op, object, value);
+ return node;
+}
Node* AstGraphBuilder::BuildGlobalLoad(Handle<Name> name,
const VectorSlotPair& feedback,
TypeofMode typeof_mode) {
+ DCHECK_EQ(feedback.vector()->GetTypeofMode(feedback.slot()), typeof_mode);
const Operator* op = javascript()->LoadGlobal(name, feedback, typeof_mode);
Node* node = NewNode(op);
return node;
@@ -2874,37 +2756,37 @@ Node* AstGraphBuilder::BuildBinaryOp(Node* left, Node* right, Token::Value op,
BinaryOperationHint hint = BinaryOperationHint::kAny;
switch (op) {
case Token::BIT_OR:
- js_op = javascript()->BitwiseOr(hint);
+ js_op = javascript()->BitwiseOr();
break;
case Token::BIT_AND:
- js_op = javascript()->BitwiseAnd(hint);
+ js_op = javascript()->BitwiseAnd();
break;
case Token::BIT_XOR:
- js_op = javascript()->BitwiseXor(hint);
+ js_op = javascript()->BitwiseXor();
break;
case Token::SHL:
- js_op = javascript()->ShiftLeft(hint);
+ js_op = javascript()->ShiftLeft();
break;
case Token::SAR:
- js_op = javascript()->ShiftRight(hint);
+ js_op = javascript()->ShiftRight();
break;
case Token::SHR:
- js_op = javascript()->ShiftRightLogical(hint);
+ js_op = javascript()->ShiftRightLogical();
break;
case Token::ADD:
js_op = javascript()->Add(hint);
break;
case Token::SUB:
- js_op = javascript()->Subtract(hint);
+ js_op = javascript()->Subtract();
break;
case Token::MUL:
- js_op = javascript()->Multiply(hint);
+ js_op = javascript()->Multiply();
break;
case Token::DIV:
- js_op = javascript()->Divide(hint);
+ js_op = javascript()->Divide();
break;
case Token::MOD:
- js_op = javascript()->Modulus(hint);
+ js_op = javascript()->Modulus();
break;
default:
UNREACHABLE();
diff --git a/deps/v8/src/compiler/ast-graph-builder.h b/deps/v8/src/compiler/ast-graph-builder.h
index 975e08094c..4fd3f35e78 100644
--- a/deps/v8/src/compiler/ast-graph-builder.h
+++ b/deps/v8/src/compiler/ast-graph-builder.h
@@ -246,10 +246,10 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
Node** EnsureInputBufferSize(int size);
// Named and keyed loads require a VectorSlotPair for successful lowering.
- VectorSlotPair CreateVectorSlotPair(FeedbackVectorSlot slot) const;
+ VectorSlotPair CreateVectorSlotPair(FeedbackSlot slot) const;
- // Computes the frequency for JSCallFunction and JSCallConstruct nodes.
- float ComputeCallFrequency(FeedbackVectorSlot slot) const;
+ // Computes the frequency for JSCall and JSConstruct nodes.
+ float ComputeCallFrequency(FeedbackSlot slot) const;
// ===========================================================================
// The following build methods all generate graph fragments and return one
@@ -287,6 +287,8 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
const VectorSlotPair& feedback);
Node* BuildNamedStore(Node* receiver, Handle<Name> name, Node* value,
const VectorSlotPair& feedback);
+ Node* BuildNamedStoreOwn(Node* receiver, Handle<Name> name, Node* value,
+ const VectorSlotPair& feedback);
// Builders for global variable loads and stores.
Node* BuildGlobalLoad(Handle<Name> name, const VectorSlotPair& feedback,
@@ -391,11 +393,6 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
void VisitLiteralCompareTypeof(CompareOperation* expr, Expression* sub_expr,
Handle<String> check);
- // Dispatched from VisitForInStatement.
- void VisitForInAssignment(Expression* expr, Node* value,
- const VectorSlotPair& feedback,
- BailoutId bailout_id);
-
// Dispatched from VisitObjectLiteral.
void VisitObjectLiteralAccessor(Node* home_object,
ObjectLiteralProperty* property);
diff --git a/deps/v8/src/compiler/branch-elimination.cc b/deps/v8/src/compiler/branch-elimination.cc
index 0b7ad19af7..2d9a084e21 100644
--- a/deps/v8/src/compiler/branch-elimination.cc
+++ b/deps/v8/src/compiler/branch-elimination.cc
@@ -85,7 +85,7 @@ Reduction BranchElimination::ReduceDeoptimizeConditional(Node* node) {
DCHECK(node->opcode() == IrOpcode::kDeoptimizeIf ||
node->opcode() == IrOpcode::kDeoptimizeUnless);
bool condition_is_true = node->opcode() == IrOpcode::kDeoptimizeUnless;
- DeoptimizeReason reason = DeoptimizeReasonOf(node->op());
+ DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
Node* condition = NodeProperties::GetValueInput(node, 0);
Node* frame_state = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
@@ -105,9 +105,8 @@ Reduction BranchElimination::ReduceDeoptimizeConditional(Node* node) {
// with the {control} node that already contains the right information.
ReplaceWithValue(node, dead(), effect, control);
} else {
- control =
- graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager, reason),
- frame_state, effect, control);
+ control = graph()->NewNode(common()->Deoptimize(p.kind(), p.reason()),
+ frame_state, effect, control);
// TODO(bmeurer): This should be on the AdvancedReducer somehow.
NodeProperties::MergeControlToEnd(graph(), common(), control);
Revisit(graph()->end());
diff --git a/deps/v8/src/compiler/bytecode-analysis.cc b/deps/v8/src/compiler/bytecode-analysis.cc
index f0e870739b..6d8afe1744 100644
--- a/deps/v8/src/compiler/bytecode-analysis.cc
+++ b/deps/v8/src/compiler/bytecode-analysis.cc
@@ -98,9 +98,8 @@ void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState& in_liveness,
const BytecodeArrayAccessor& accessor) {
int num_operands = Bytecodes::NumberOfOperands(bytecode);
const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode);
- AccumulatorUse accumulator_use = Bytecodes::GetAccumulatorUse(bytecode);
- if (accumulator_use == AccumulatorUse::kWrite) {
+ if (Bytecodes::WritesAccumulator(bytecode)) {
in_liveness.MarkAccumulatorDead();
}
for (int i = 0; i < num_operands; ++i) {
@@ -138,7 +137,7 @@ void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState& in_liveness,
}
}
- if (accumulator_use == AccumulatorUse::kRead) {
+ if (Bytecodes::ReadsAccumulator(bytecode)) {
in_liveness.MarkAccumulatorLive();
}
for (int i = 0; i < num_operands; ++i) {
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index 0acabe4103..aaeee666aa 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -8,11 +8,13 @@
#include "src/ast/scopes.h"
#include "src/compilation-info.h"
#include "src/compiler/compiler-source-position-table.h"
+#include "src/compiler/js-type-hint-lowering.h"
#include "src/compiler/linkage.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/simplified-operator.h"
#include "src/interpreter/bytecodes.h"
#include "src/objects-inl.h"
+#include "src/objects/literal-objects.h"
namespace v8 {
namespace internal {
@@ -82,7 +84,8 @@ class BytecodeGraphBuilder::Environment : public ZoneObject {
bool StateValuesRequireUpdate(Node** state_values, Node** values, int count);
void UpdateStateValues(Node** state_values, Node** values, int count);
void UpdateStateValuesWithCache(Node** state_values, Node** values, int count,
- const BitVector* liveness);
+ const BitVector* liveness,
+ int liveness_offset);
int RegisterToValuesIndex(interpreter::Register the_register) const;
@@ -349,7 +352,7 @@ bool BytecodeGraphBuilder::Environment::StateValuesRequireUpdate(
return true;
}
Node::Inputs inputs = (*state_values)->inputs();
- DCHECK_EQ(inputs.count(), count);
+ if (inputs.count() != count) return true;
for (int i = 0; i < count; i++) {
if (inputs[i] != values[i]) {
return true;
@@ -410,28 +413,43 @@ void BytecodeGraphBuilder::Environment::UpdateStateValues(Node** state_values,
}
void BytecodeGraphBuilder::Environment::UpdateStateValuesWithCache(
- Node** state_values, Node** values, int count, const BitVector* liveness) {
+ Node** state_values, Node** values, int count, const BitVector* liveness,
+ int liveness_offset) {
*state_values = builder_->state_values_cache_.GetNodeForValues(
- values, static_cast<size_t>(count), liveness);
+ values, static_cast<size_t>(count), liveness, liveness_offset);
}
Node* BytecodeGraphBuilder::Environment::Checkpoint(
BailoutId bailout_id, OutputFrameStateCombine combine,
bool owner_has_exception, const BytecodeLivenessState* liveness) {
- UpdateStateValues(&parameters_state_values_, &values()->at(0),
- parameter_count());
+ if (parameter_count() == register_count()) {
+ // Re-use the state-value cache if the number of local registers happens
+ // to match the parameter count.
+ UpdateStateValuesWithCache(&parameters_state_values_, &values()->at(0),
+ parameter_count(), nullptr, 0);
+ } else {
+ UpdateStateValues(&parameters_state_values_, &values()->at(0),
+ parameter_count());
+ }
- // TODO(leszeks): We should pass a view of the liveness bitvector here, with
- // offset and count, rather than passing the entire bitvector and assuming
- // that register liveness starts at offset 0.
UpdateStateValuesWithCache(&registers_state_values_,
&values()->at(register_base()), register_count(),
- liveness ? &liveness->bit_vector() : nullptr);
-
- Node* accumulator_value = liveness == nullptr || liveness->AccumulatorIsLive()
- ? values()->at(accumulator_base())
- : builder()->jsgraph()->OptimizedOutConstant();
- UpdateStateValues(&accumulator_state_values_, &accumulator_value, 1);
+ liveness ? &liveness->bit_vector() : nullptr, 0);
+
+ bool accumulator_is_live = !liveness || liveness->AccumulatorIsLive();
+ if (parameter_count() == 1 && accumulator_is_live &&
+ values()->at(accumulator_base()) == values()->at(0)) {
+ // Re-use the parameter state values if there happens to only be one
+ // parameter and the accumulator is live and holds that parameter's value.
+ accumulator_state_values_ = parameters_state_values_;
+ } else {
+ // Otherwise, use the state values cache to hopefully re-use local register
+ // state values (if there is only one local register), or at the very least
+ // re-use previous accumulator state values.
+ UpdateStateValuesWithCache(
+ &accumulator_state_values_, &values()->at(accumulator_base()), 1,
+ liveness ? &liveness->bit_vector() : nullptr, register_count());
+ }
const Operator* op = common()->FrameState(
bailout_id, combine, builder()->frame_state_function_info());
@@ -469,8 +487,8 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
current_exception_handler_(0),
input_buffer_size_(0),
input_buffer_(nullptr),
+ needs_eager_checkpoint_(true),
exit_controls_(local_zone),
- is_liveness_analysis_enabled_(FLAG_analyze_environment_liveness),
state_values_cache_(jsgraph),
source_positions_(source_positions),
start_position_(shared_info->start_position(), inlining_id) {}
@@ -521,7 +539,7 @@ Node* BytecodeGraphBuilder::BuildLoadNativeContextField(int index) {
VectorSlotPair BytecodeGraphBuilder::CreateVectorSlotPair(int slot_id) {
- FeedbackVectorSlot slot;
+ FeedbackSlot slot;
if (slot_id >= FeedbackVector::kReservedIndexCount) {
slot = feedback_vector()->ToSlot(slot_id);
}
@@ -555,9 +573,10 @@ bool BytecodeGraphBuilder::CreateGraph(bool stack_check) {
}
void BytecodeGraphBuilder::PrepareEagerCheckpoint() {
- if (environment()->GetEffectDependency()->opcode() != IrOpcode::kCheckpoint) {
+ if (needs_eager_checkpoint()) {
// Create an explicit checkpoint node for before the operation. This only
// needs to happen if we aren't effect-dominated by a {Checkpoint} already.
+ mark_as_needing_eager_checkpoint(false);
Node* node = NewNode(common()->Checkpoint());
DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
DCHECK_EQ(IrOpcode::kDead,
@@ -571,7 +590,21 @@ void BytecodeGraphBuilder::PrepareEagerCheckpoint() {
Node* frame_state_before = environment()->Checkpoint(
bailout_id, OutputFrameStateCombine::Ignore(), false, liveness_before);
NodeProperties::ReplaceFrameStateInput(node, frame_state_before);
+#ifdef DEBUG
+ } else {
+ // In case we skipped checkpoint creation above, we must be able to find an
+ // existing checkpoint that effect-dominates the nodes about to be created.
+ // Starting a search from the current effect-dependency has to succeed.
+ Node* effect = environment()->GetEffectDependency();
+ while (effect->opcode() != IrOpcode::kCheckpoint) {
+ DCHECK(effect->op()->HasProperty(Operator::kNoWrite));
+ DCHECK_EQ(1, effect->op()->EffectInputCount());
+ effect = NodeProperties::GetEffectInput(effect);
+ }
}
+#else
+ }
+#endif // DEBUG
}
void BytecodeGraphBuilder::PrepareFrameState(Node* node,
@@ -706,8 +739,7 @@ Node* BytecodeGraphBuilder::BuildLoadGlobal(Handle<Name> name,
uint32_t feedback_slot_index,
TypeofMode typeof_mode) {
VectorSlotPair feedback = CreateVectorSlotPair(feedback_slot_index);
- DCHECK_EQ(FeedbackVectorSlotKind::LOAD_GLOBAL_IC,
- feedback_vector()->GetKind(feedback.slot()));
+ DCHECK(IsLoadGlobalICKind(feedback_vector()->GetKind(feedback.slot())));
const Operator* op = javascript()->LoadGlobal(name, feedback, typeof_mode);
return NewNode(op);
}
@@ -771,9 +803,6 @@ void BytecodeGraphBuilder::VisitStaDataPropertyInLiteral() {
}
void BytecodeGraphBuilder::VisitLdaContextSlot() {
- // TODO(mythria): immutable flag is also set to false. This information is not
- // available in bytecode array. update this code when the implementation
- // changes.
const Operator* op = javascript()->LoadContext(
bytecode_iterator().GetUnsignedImmediateOperand(2),
bytecode_iterator().GetIndexOperand(1), false);
@@ -784,16 +813,31 @@ void BytecodeGraphBuilder::VisitLdaContextSlot() {
environment()->BindAccumulator(node);
}
+void BytecodeGraphBuilder::VisitLdaImmutableContextSlot() {
+ const Operator* op = javascript()->LoadContext(
+ bytecode_iterator().GetUnsignedImmediateOperand(2),
+ bytecode_iterator().GetIndexOperand(1), true);
+ Node* node = NewNode(op);
+ Node* context =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+ NodeProperties::ReplaceContextInput(node, context);
+ environment()->BindAccumulator(node);
+}
+
void BytecodeGraphBuilder::VisitLdaCurrentContextSlot() {
- // TODO(mythria): immutable flag is also set to false. This information is not
- // available in bytecode array. update this code when the implementation
- // changes.
const Operator* op = javascript()->LoadContext(
0, bytecode_iterator().GetIndexOperand(0), false);
Node* node = NewNode(op);
environment()->BindAccumulator(node);
}
+void BytecodeGraphBuilder::VisitLdaImmutableCurrentContextSlot() {
+ const Operator* op = javascript()->LoadContext(
+ 0, bytecode_iterator().GetIndexOperand(0), true);
+ Node* node = NewNode(op);
+ environment()->BindAccumulator(node);
+}
+
void BytecodeGraphBuilder::VisitStaContextSlot() {
const Operator* op = javascript()->StoreContext(
bytecode_iterator().GetUnsignedImmediateOperand(2),
@@ -844,8 +888,8 @@ BytecodeGraphBuilder::Environment* BytecodeGraphBuilder::CheckContextExtensions(
NewNode(javascript()->LoadContext(d, Context::EXTENSION_INDEX, false));
Node* check_no_extension =
- NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
- extension_slot, jsgraph()->TheHoleConstant());
+ NewNode(simplified()->ReferenceEqual(), extension_slot,
+ jsgraph()->TheHoleConstant());
NewBranch(check_no_extension);
Environment* true_environment = environment()->Copy();
@@ -912,6 +956,7 @@ void BytecodeGraphBuilder::BuildLdaLookupContextSlot(TypeofMode typeof_mode) {
fast_environment->Merge(environment());
set_environment(fast_environment);
+ mark_as_needing_eager_checkpoint(true);
}
}
@@ -961,6 +1006,7 @@ void BytecodeGraphBuilder::BuildLdaLookupGlobalSlot(TypeofMode typeof_mode) {
fast_environment->Merge(environment());
set_environment(fast_environment);
+ mark_as_needing_eager_checkpoint(true);
}
}
@@ -1019,7 +1065,8 @@ void BytecodeGraphBuilder::VisitLdaKeyedProperty() {
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
-void BytecodeGraphBuilder::BuildNamedStore(LanguageMode language_mode) {
+void BytecodeGraphBuilder::BuildNamedStore(LanguageMode language_mode,
+ StoreMode store_mode) {
PrepareEagerCheckpoint();
Node* value = environment()->LookupAccumulator();
Node* object =
@@ -1029,17 +1076,31 @@ void BytecodeGraphBuilder::BuildNamedStore(LanguageMode language_mode) {
VectorSlotPair feedback =
CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
- const Operator* op = javascript()->StoreNamed(language_mode, name, feedback);
+ const Operator* op;
+ if (store_mode == StoreMode::kOwn) {
+ DCHECK_EQ(FeedbackSlotKind::kStoreOwnNamed,
+ feedback.vector()->GetKind(feedback.slot()));
+ op = javascript()->StoreNamedOwn(name, feedback);
+ } else {
+ DCHECK(store_mode == StoreMode::kNormal);
+ DCHECK_EQ(feedback.vector()->GetLanguageMode(feedback.slot()),
+ language_mode);
+ op = javascript()->StoreNamed(language_mode, name, feedback);
+ }
Node* node = NewNode(op, object, value);
environment()->RecordAfterState(node, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitStaNamedPropertySloppy() {
- BuildNamedStore(LanguageMode::SLOPPY);
+ BuildNamedStore(LanguageMode::SLOPPY, StoreMode::kNormal);
}
void BytecodeGraphBuilder::VisitStaNamedPropertyStrict() {
- BuildNamedStore(LanguageMode::STRICT);
+ BuildNamedStore(LanguageMode::STRICT, StoreMode::kNormal);
+}
+
+void BytecodeGraphBuilder::VisitStaNamedOwnProperty() {
+ BuildNamedStore(LanguageMode::STRICT, StoreMode::kOwn);
}
void BytecodeGraphBuilder::BuildKeyedStore(LanguageMode language_mode) {
@@ -1052,6 +1113,7 @@ void BytecodeGraphBuilder::BuildKeyedStore(LanguageMode language_mode) {
VectorSlotPair feedback =
CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
+ DCHECK_EQ(feedback.vector()->GetLanguageMode(feedback.slot()), language_mode);
const Operator* op = javascript()->StoreProperty(language_mode, feedback);
Node* node = NewNode(op, object, key, value);
environment()->RecordAfterState(node, Environment::kAttachFrameState);
@@ -1068,8 +1130,8 @@ void BytecodeGraphBuilder::VisitStaKeyedPropertyStrict() {
void BytecodeGraphBuilder::VisitLdaModuleVariable() {
int32_t cell_index = bytecode_iterator().GetImmediateOperand(0);
uint32_t depth = bytecode_iterator().GetUnsignedImmediateOperand(1);
- Node* module = NewNode(
- javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false));
+ Node* module =
+ NewNode(javascript()->LoadContext(depth, Context::EXTENSION_INDEX, true));
Node* value = NewNode(javascript()->LoadModule(cell_index), module);
environment()->BindAccumulator(value);
}
@@ -1077,8 +1139,8 @@ void BytecodeGraphBuilder::VisitLdaModuleVariable() {
void BytecodeGraphBuilder::VisitStaModuleVariable() {
int32_t cell_index = bytecode_iterator().GetImmediateOperand(0);
uint32_t depth = bytecode_iterator().GetUnsignedImmediateOperand(1);
- Node* module = NewNode(
- javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false));
+ Node* module =
+ NewNode(javascript()->LoadContext(depth, Context::EXTENSION_INDEX, true));
Node* value = environment()->LookupAccumulator();
NewNode(javascript()->StoreModule(cell_index), module, value);
}
@@ -1213,16 +1275,16 @@ void BytecodeGraphBuilder::VisitCreateArrayLiteral() {
}
void BytecodeGraphBuilder::VisitCreateObjectLiteral() {
- PrepareEagerCheckpoint();
- Handle<FixedArray> constant_properties = Handle<FixedArray>::cast(
- bytecode_iterator().GetConstantForIndexOperand(0));
+ Handle<BoilerplateDescription> constant_properties =
+ Handle<BoilerplateDescription>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0));
int literal_index = bytecode_iterator().GetIndexOperand(1);
int bytecode_flags = bytecode_iterator().GetFlagOperand(2);
int literal_flags =
interpreter::CreateObjectLiteralFlags::FlagsBits::decode(bytecode_flags);
// TODO(mstarzinger): Thread through number of properties. The below number is
// only an estimate and does not match {ObjectLiteral::properties_count}.
- int number_of_properties = constant_properties->length() / 2;
+ int number_of_properties = constant_properties->size();
Node* literal = NewNode(
javascript()->CreateLiteralObject(constant_properties, literal_flags,
literal_index, number_of_properties),
@@ -1263,8 +1325,8 @@ void BytecodeGraphBuilder::BuildCall(TailCallMode tail_call_mode,
VectorSlotPair feedback = CreateVectorSlotPair(slot_id);
float const frequency = ComputeCallFrequency(slot_id);
- const Operator* call = javascript()->CallFunction(
- arg_count + 1, frequency, feedback, receiver_hint, tail_call_mode);
+ const Operator* call = javascript()->Call(arg_count + 1, frequency, feedback,
+ receiver_hint, tail_call_mode);
Node* value = ProcessCallArguments(call, callee, receiver, arg_count + 1);
environment()->BindAccumulator(value, Environment::kAttachFrameState);
}
@@ -1273,6 +1335,19 @@ void BytecodeGraphBuilder::VisitCall() {
BuildCall(TailCallMode::kDisallow, ConvertReceiverMode::kAny);
}
+void BytecodeGraphBuilder::VisitCallWithSpread() {
+ PrepareEagerCheckpoint();
+ Node* callee =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+ interpreter::Register receiver = bytecode_iterator().GetRegisterOperand(1);
+ size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
+ const Operator* call =
+ javascript()->CallWithSpread(static_cast<int>(arg_count + 1));
+
+ Node* value = ProcessCallArguments(call, callee, receiver, arg_count + 1);
+ environment()->BindAccumulator(value, Environment::kAttachFrameState);
+}
+
void BytecodeGraphBuilder::VisitCallProperty() {
BuildCall(TailCallMode::kDisallow, ConvertReceiverMode::kNotNullOrUndefined);
}
@@ -1293,7 +1368,7 @@ void BytecodeGraphBuilder::VisitCallJSRuntime() {
size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
// Create node to perform the JS runtime call.
- const Operator* call = javascript()->CallFunction(arg_count + 1);
+ const Operator* call = javascript()->Call(arg_count + 1);
Node* value = ProcessCallArguments(call, callee, receiver, arg_count + 1);
environment()->BindAccumulator(value, Environment::kAttachFrameState);
}
@@ -1338,14 +1413,34 @@ void BytecodeGraphBuilder::VisitCallRuntimeForPair() {
Environment::kAttachFrameState);
}
-void BytecodeGraphBuilder::VisitNewWithSpread() {
+Node* BytecodeGraphBuilder::ProcessConstructWithSpreadArguments(
+ const Operator* op, Node* callee, Node* new_target,
+ interpreter::Register first_arg, size_t arity) {
+ Node** all = local_zone()->NewArray<Node*>(arity);
+ all[0] = callee;
+ int first_arg_index = first_arg.index();
+ for (int i = 1; i < static_cast<int>(arity) - 1; ++i) {
+ all[i] = environment()->LookupRegister(
+ interpreter::Register(first_arg_index + i - 1));
+ }
+ all[arity - 1] = new_target;
+ Node* value = MakeNode(op, static_cast<int>(arity), all, false);
+ return value;
+}
+
+void BytecodeGraphBuilder::VisitConstructWithSpread() {
PrepareEagerCheckpoint();
- interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(0);
- size_t arg_count = bytecode_iterator().GetRegisterCountOperand(1);
+ interpreter::Register callee_reg = bytecode_iterator().GetRegisterOperand(0);
+ interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
+ size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
+
+ Node* new_target = environment()->LookupAccumulator();
+ Node* callee = environment()->LookupRegister(callee_reg);
const Operator* op =
- javascript()->CallConstructWithSpread(static_cast<int>(arg_count));
- Node* value = ProcessCallRuntimeArguments(op, first_arg, arg_count);
+ javascript()->ConstructWithSpread(static_cast<int>(arg_count) + 2);
+ Node* value = ProcessConstructWithSpreadArguments(op, callee, new_target,
+ first_arg, arg_count + 2);
environment()->BindAccumulator(value, Environment::kAttachFrameState);
}
@@ -1362,7 +1457,7 @@ void BytecodeGraphBuilder::VisitInvokeIntrinsic() {
environment()->BindAccumulator(value, Environment::kAttachFrameState);
}
-Node* BytecodeGraphBuilder::ProcessCallNewArguments(
+Node* BytecodeGraphBuilder::ProcessConstructArguments(
const Operator* call_new_op, Node* callee, Node* new_target,
interpreter::Register first_arg, size_t arity) {
Node** all = local_zone()->NewArray<Node*>(arity);
@@ -1377,7 +1472,7 @@ Node* BytecodeGraphBuilder::ProcessCallNewArguments(
return value;
}
-void BytecodeGraphBuilder::VisitNew() {
+void BytecodeGraphBuilder::VisitConstruct() {
PrepareEagerCheckpoint();
interpreter::Register callee_reg = bytecode_iterator().GetRegisterOperand(0);
interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
@@ -1392,24 +1487,18 @@ void BytecodeGraphBuilder::VisitNew() {
Node* callee = environment()->LookupRegister(callee_reg);
float const frequency = ComputeCallFrequency(slot_id);
- const Operator* call = javascript()->CallConstruct(
+ const Operator* call = javascript()->Construct(
static_cast<int>(arg_count) + 2, frequency, feedback);
- Node* value = ProcessCallNewArguments(call, callee, new_target, first_arg,
- arg_count + 2);
+ Node* value = ProcessConstructArguments(call, callee, new_target, first_arg,
+ arg_count + 2);
environment()->BindAccumulator(value, Environment::kAttachFrameState);
}
-void BytecodeGraphBuilder::BuildThrow() {
- PrepareEagerCheckpoint();
+void BytecodeGraphBuilder::VisitThrow() {
+ BuildLoopExitsForFunctionExit();
Node* value = environment()->LookupAccumulator();
Node* call = NewNode(javascript()->CallRuntime(Runtime::kThrow), value);
environment()->BindAccumulator(call, Environment::kAttachFrameState);
-}
-
-void BytecodeGraphBuilder::VisitThrow() {
- BuildLoopExitsForFunctionExit();
- BuildThrow();
- Node* call = environment()->LookupAccumulator();
Node* control = NewNode(common()->Throw(), call);
MergeControlToLeaveFunction(control);
}
@@ -1422,12 +1511,39 @@ void BytecodeGraphBuilder::VisitReThrow() {
MergeControlToLeaveFunction(control);
}
-void BytecodeGraphBuilder::BuildBinaryOp(const Operator* js_op) {
+Node* BytecodeGraphBuilder::TryBuildSimplifiedBinaryOp(const Operator* op,
+ Node* left, Node* right,
+ FeedbackSlot slot) {
+ Node* effect = environment()->GetEffectDependency();
+ Node* control = environment()->GetControlDependency();
+ JSTypeHintLowering type_hint_lowering(jsgraph(), feedback_vector());
+ Reduction early_reduction = type_hint_lowering.ReduceBinaryOperation(
+ op, left, right, effect, control, slot);
+ if (early_reduction.Changed()) {
+ Node* node = early_reduction.replacement();
+ if (node->op()->EffectOutputCount() > 0) {
+ environment()->UpdateEffectDependency(node);
+ }
+ return node;
+ }
+ return nullptr;
+}
+
+void BytecodeGraphBuilder::BuildBinaryOp(const Operator* op) {
PrepareEagerCheckpoint();
Node* left =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
Node* right = environment()->LookupAccumulator();
- Node* node = NewNode(js_op, left, right);
+
+ Node* node = nullptr;
+ FeedbackSlot slot = feedback_vector()->ToSlot(
+ bytecode_iterator().GetIndexOperand(kBinaryOperationHintIndex));
+ if (Node* simplified = TryBuildSimplifiedBinaryOp(op, left, right, slot)) {
+ node = simplified;
+ } else {
+ node = NewNode(op, left, right);
+ }
+
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
@@ -1435,10 +1551,9 @@ void BytecodeGraphBuilder::BuildBinaryOp(const Operator* js_op) {
// feedback.
BinaryOperationHint BytecodeGraphBuilder::GetBinaryOperationHint(
int operand_index) {
- FeedbackVectorSlot slot = feedback_vector()->ToSlot(
+ FeedbackSlot slot = feedback_vector()->ToSlot(
bytecode_iterator().GetIndexOperand(operand_index));
- DCHECK_EQ(FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC,
- feedback_vector()->GetKind(slot));
+ DCHECK_EQ(FeedbackSlotKind::kBinaryOp, feedback_vector()->GetKind(slot));
BinaryOpICNexus nexus(feedback_vector(), slot);
return nexus.GetBinaryOperationFeedback();
}
@@ -1450,10 +1565,9 @@ CompareOperationHint BytecodeGraphBuilder::GetCompareOperationHint() {
if (slot_index == 0) {
return CompareOperationHint::kAny;
}
- FeedbackVectorSlot slot =
+ FeedbackSlot slot =
feedback_vector()->ToSlot(bytecode_iterator().GetIndexOperand(1));
- DCHECK_EQ(FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC,
- feedback_vector()->GetKind(slot));
+ DCHECK_EQ(FeedbackSlotKind::kCompareOp, feedback_vector()->GetKind(slot));
CompareICNexus nexus(feedback_vector(), slot);
return nexus.GetCompareOperationFeedback();
}
@@ -1469,61 +1583,58 @@ void BytecodeGraphBuilder::VisitAdd() {
}
void BytecodeGraphBuilder::VisitSub() {
- BuildBinaryOp(javascript()->Subtract(
- GetBinaryOperationHint(kBinaryOperationHintIndex)));
+ BuildBinaryOp(javascript()->Subtract());
}
void BytecodeGraphBuilder::VisitMul() {
- BuildBinaryOp(javascript()->Multiply(
- GetBinaryOperationHint(kBinaryOperationHintIndex)));
+ BuildBinaryOp(javascript()->Multiply());
}
-void BytecodeGraphBuilder::VisitDiv() {
- BuildBinaryOp(
- javascript()->Divide(GetBinaryOperationHint(kBinaryOperationHintIndex)));
-}
+void BytecodeGraphBuilder::VisitDiv() { BuildBinaryOp(javascript()->Divide()); }
void BytecodeGraphBuilder::VisitMod() {
- BuildBinaryOp(
- javascript()->Modulus(GetBinaryOperationHint(kBinaryOperationHintIndex)));
+ BuildBinaryOp(javascript()->Modulus());
}
void BytecodeGraphBuilder::VisitBitwiseOr() {
- BuildBinaryOp(javascript()->BitwiseOr(
- GetBinaryOperationHint(kBinaryOperationHintIndex)));
+ BuildBinaryOp(javascript()->BitwiseOr());
}
void BytecodeGraphBuilder::VisitBitwiseXor() {
- BuildBinaryOp(javascript()->BitwiseXor(
- GetBinaryOperationHint(kBinaryOperationHintIndex)));
+ BuildBinaryOp(javascript()->BitwiseXor());
}
void BytecodeGraphBuilder::VisitBitwiseAnd() {
- BuildBinaryOp(javascript()->BitwiseAnd(
- GetBinaryOperationHint(kBinaryOperationHintIndex)));
+ BuildBinaryOp(javascript()->BitwiseAnd());
}
void BytecodeGraphBuilder::VisitShiftLeft() {
- BuildBinaryOp(javascript()->ShiftLeft(
- GetBinaryOperationHint(kBinaryOperationHintIndex)));
+ BuildBinaryOp(javascript()->ShiftLeft());
}
void BytecodeGraphBuilder::VisitShiftRight() {
- BuildBinaryOp(javascript()->ShiftRight(
- GetBinaryOperationHint(kBinaryOperationHintIndex)));
+ BuildBinaryOp(javascript()->ShiftRight());
}
void BytecodeGraphBuilder::VisitShiftRightLogical() {
- BuildBinaryOp(javascript()->ShiftRightLogical(
- GetBinaryOperationHint(kBinaryOperationHintIndex)));
+ BuildBinaryOp(javascript()->ShiftRightLogical());
}
-void BytecodeGraphBuilder::BuildBinaryOpWithImmediate(const Operator* js_op) {
+void BytecodeGraphBuilder::BuildBinaryOpWithImmediate(const Operator* op) {
PrepareEagerCheckpoint();
Node* left =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1));
Node* right = jsgraph()->Constant(bytecode_iterator().GetImmediateOperand(0));
- Node* node = NewNode(js_op, left, right);
+
+ Node* node = nullptr;
+ FeedbackSlot slot = feedback_vector()->ToSlot(
+ bytecode_iterator().GetIndexOperand(kBinaryOperationSmiHintIndex));
+ if (Node* simplified = TryBuildSimplifiedBinaryOp(op, left, right, slot)) {
+ node = simplified;
+ } else {
+ node = NewNode(op, left, right);
+ }
+
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
@@ -1533,62 +1644,73 @@ void BytecodeGraphBuilder::VisitAddSmi() {
}
void BytecodeGraphBuilder::VisitSubSmi() {
- BuildBinaryOpWithImmediate(javascript()->Subtract(
- GetBinaryOperationHint(kBinaryOperationSmiHintIndex)));
+ BuildBinaryOpWithImmediate(javascript()->Subtract());
}
void BytecodeGraphBuilder::VisitBitwiseOrSmi() {
- BuildBinaryOpWithImmediate(javascript()->BitwiseOr(
- GetBinaryOperationHint(kBinaryOperationSmiHintIndex)));
+ BuildBinaryOpWithImmediate(javascript()->BitwiseOr());
}
void BytecodeGraphBuilder::VisitBitwiseAndSmi() {
- BuildBinaryOpWithImmediate(javascript()->BitwiseAnd(
- GetBinaryOperationHint(kBinaryOperationSmiHintIndex)));
+ BuildBinaryOpWithImmediate(javascript()->BitwiseAnd());
}
void BytecodeGraphBuilder::VisitShiftLeftSmi() {
- BuildBinaryOpWithImmediate(javascript()->ShiftLeft(
- GetBinaryOperationHint(kBinaryOperationSmiHintIndex)));
+ BuildBinaryOpWithImmediate(javascript()->ShiftLeft());
}
void BytecodeGraphBuilder::VisitShiftRightSmi() {
- BuildBinaryOpWithImmediate(javascript()->ShiftRight(
- GetBinaryOperationHint(kBinaryOperationSmiHintIndex)));
+ BuildBinaryOpWithImmediate(javascript()->ShiftRight());
}
void BytecodeGraphBuilder::VisitInc() {
PrepareEagerCheckpoint();
// Note: Use subtract -1 here instead of add 1 to ensure we always convert to
// a number, not a string.
- const Operator* js_op =
- javascript()->Subtract(GetBinaryOperationHint(kCountOperationHintIndex));
- Node* node = NewNode(js_op, environment()->LookupAccumulator(),
- jsgraph()->Constant(-1));
+ Node* left = environment()->LookupAccumulator();
+ Node* right = jsgraph()->Constant(-1);
+ const Operator* op = javascript()->Subtract();
+
+ Node* node = nullptr;
+ FeedbackSlot slot = feedback_vector()->ToSlot(
+ bytecode_iterator().GetIndexOperand(kCountOperationHintIndex));
+ if (Node* simplified = TryBuildSimplifiedBinaryOp(op, left, right, slot)) {
+ node = simplified;
+ } else {
+ node = NewNode(op, left, right);
+ }
+
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitDec() {
PrepareEagerCheckpoint();
- const Operator* js_op =
- javascript()->Subtract(GetBinaryOperationHint(kCountOperationHintIndex));
- Node* node = NewNode(js_op, environment()->LookupAccumulator(),
- jsgraph()->OneConstant());
+ Node* left = environment()->LookupAccumulator();
+ Node* right = jsgraph()->OneConstant();
+ const Operator* op = javascript()->Subtract();
+
+ Node* node = nullptr;
+ FeedbackSlot slot = feedback_vector()->ToSlot(
+ bytecode_iterator().GetIndexOperand(kCountOperationHintIndex));
+ if (Node* simplified = TryBuildSimplifiedBinaryOp(op, left, right, slot)) {
+ node = simplified;
+ } else {
+ node = NewNode(op, left, right);
+ }
+
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitLogicalNot() {
Node* value = environment()->LookupAccumulator();
- Node* node = NewNode(common()->Select(MachineRepresentation::kTagged), value,
- jsgraph()->FalseConstant(), jsgraph()->TrueConstant());
+ Node* node = NewNode(simplified()->BooleanNot(), value);
environment()->BindAccumulator(node);
}
void BytecodeGraphBuilder::VisitToBooleanLogicalNot() {
Node* value = NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
environment()->LookupAccumulator());
- Node* node = NewNode(common()->Select(MachineRepresentation::kTagged), value,
- jsgraph()->FalseConstant(), jsgraph()->TrueConstant());
+ Node* node = NewNode(simplified()->BooleanNot(), value);
environment()->BindAccumulator(node);
}
@@ -1678,16 +1800,16 @@ void BytecodeGraphBuilder::VisitTestUndetectable() {
void BytecodeGraphBuilder::VisitTestNull() {
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- Node* result = NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
- object, jsgraph()->NullConstant());
+ Node* result = NewNode(simplified()->ReferenceEqual(), object,
+ jsgraph()->NullConstant());
environment()->BindAccumulator(result);
}
void BytecodeGraphBuilder::VisitTestUndefined() {
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- Node* result = NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
- object, jsgraph()->UndefinedConstant());
+ Node* result = NewNode(simplified()->ReferenceEqual(), object,
+ jsgraph()->UndefinedConstant());
environment()->BindAccumulator(result);
}
@@ -1789,9 +1911,8 @@ void BytecodeGraphBuilder::VisitReturn() {
void BytecodeGraphBuilder::VisitDebugger() {
PrepareEagerCheckpoint();
- Node* call =
- NewNode(javascript()->CallRuntime(Runtime::kHandleDebuggerStatement));
- environment()->BindAccumulator(call, Environment::kAttachFrameState);
+ Node* call = NewNode(javascript()->Debugger());
+ environment()->RecordAfterState(call, Environment::kAttachFrameState);
}
// We cannot create a graph from the debugger copy of the bytecode array.
@@ -1847,8 +1968,9 @@ void BytecodeGraphBuilder::VisitForInStep() {
PrepareEagerCheckpoint();
Node* index =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- index = NewNode(javascript()->Add(BinaryOperationHint::kSignedSmall), index,
- jsgraph()->OneConstant());
+ index = NewNode(
+ simplified()->SpeculativeNumberAdd(NumberOperationHint::kSignedSmall),
+ index, jsgraph()->OneConstant());
environment()->BindAccumulator(index, Environment::kAttachFrameState);
}
@@ -1917,6 +2039,7 @@ void BytecodeGraphBuilder::VisitNop() {}
void BytecodeGraphBuilder::SwitchToMergeEnvironment(int current_offset) {
auto it = merge_environments_.find(current_offset);
if (it != merge_environments_.end()) {
+ mark_as_needing_eager_checkpoint(true);
if (environment() != nullptr) {
it->second->Merge(environment());
}
@@ -1926,6 +2049,7 @@ void BytecodeGraphBuilder::SwitchToMergeEnvironment(int current_offset) {
void BytecodeGraphBuilder::BuildLoopHeaderEnvironment(int current_offset) {
if (bytecode_analysis()->IsLoopHeader(current_offset)) {
+ mark_as_needing_eager_checkpoint(true);
const LoopInfo& loop_info =
bytecode_analysis()->GetLoopInfoFor(current_offset);
@@ -2036,17 +2160,30 @@ void BytecodeGraphBuilder::BuildJumpIfNot(Node* condition) {
void BytecodeGraphBuilder::BuildJumpIfEqual(Node* comperand) {
Node* accumulator = environment()->LookupAccumulator();
Node* condition =
- NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
- accumulator, comperand);
+ NewNode(simplified()->ReferenceEqual(), accumulator, comperand);
BuildJumpIf(condition);
}
void BytecodeGraphBuilder::BuildJumpIfFalse() {
- BuildJumpIfNot(environment()->LookupAccumulator());
+ NewBranch(environment()->LookupAccumulator());
+ Environment* if_true_environment = environment()->Copy();
+ environment()->BindAccumulator(jsgraph()->FalseConstant());
+ NewIfFalse();
+ MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
+ if_true_environment->BindAccumulator(jsgraph()->TrueConstant());
+ set_environment(if_true_environment);
+ NewIfTrue();
}
void BytecodeGraphBuilder::BuildJumpIfTrue() {
- BuildJumpIf(environment()->LookupAccumulator());
+ NewBranch(environment()->LookupAccumulator());
+ Environment* if_false_environment = environment()->Copy();
+ environment()->BindAccumulator(jsgraph()->TrueConstant());
+ NewIfTrue();
+ MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
+ if_false_environment->BindAccumulator(jsgraph()->FalseConstant());
+ set_environment(if_false_environment);
+ NewIfFalse();
}
void BytecodeGraphBuilder::BuildJumpIfToBooleanTrue() {
@@ -2065,9 +2202,8 @@ void BytecodeGraphBuilder::BuildJumpIfToBooleanFalse() {
void BytecodeGraphBuilder::BuildJumpIfNotHole() {
Node* accumulator = environment()->LookupAccumulator();
- Node* condition =
- NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
- accumulator, jsgraph()->TheHoleConstant());
+ Node* condition = NewNode(simplified()->ReferenceEqual(), accumulator,
+ jsgraph()->TheHoleConstant());
BuildJumpIfNot(condition);
}
@@ -2182,6 +2318,10 @@ Node* BytecodeGraphBuilder::MakeNode(const Operator* op, int value_input_count,
Node* on_success = graph()->NewNode(if_success, result);
environment()->UpdateControlDependency(on_success);
}
+ // Ensure checkpoints are created after operations with side-effects.
+ if (has_effect && !result->op()->HasProperty(Operator::kNoWrite)) {
+ mark_as_needing_eager_checkpoint(true);
+ }
}
return result;
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.h b/deps/v8/src/compiler/bytecode-graph-builder.h
index bbc2424720..41fcf6851f 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.h
+++ b/deps/v8/src/compiler/bytecode-graph-builder.h
@@ -111,9 +111,14 @@ class BytecodeGraphBuilder {
Node* ProcessCallArguments(const Operator* call_op, Node* callee,
interpreter::Register receiver, size_t arity);
- Node* ProcessCallNewArguments(const Operator* call_new_op, Node* callee,
- Node* new_target,
- interpreter::Register first_arg, size_t arity);
+ Node* ProcessConstructArguments(const Operator* call_new_op, Node* callee,
+ Node* new_target,
+ interpreter::Register first_arg,
+ size_t arity);
+ Node* ProcessConstructWithSpreadArguments(const Operator* op, Node* callee,
+ Node* new_target,
+ interpreter::Register first_arg,
+ size_t arity);
Node* ProcessCallRuntimeArguments(const Operator* call_runtime_op,
interpreter::Register first_arg,
size_t arity);
@@ -132,7 +137,14 @@ class BytecodeGraphBuilder {
Node* BuildLoadGlobal(Handle<Name> name, uint32_t feedback_slot_index,
TypeofMode typeof_mode);
void BuildStoreGlobal(LanguageMode language_mode);
- void BuildNamedStore(LanguageMode language_mode);
+
+ enum class StoreMode {
+ // Check the prototype chain before storing.
+ kNormal,
+ // Store value to the receiver without checking the prototype chain.
+ kOwn,
+ };
+ void BuildNamedStore(LanguageMode language_mode, StoreMode store_mode);
void BuildKeyedStore(LanguageMode language_mode);
void BuildLdaLookupSlot(TypeofMode typeof_mode);
void BuildLdaLookupContextSlot(TypeofMode typeof_mode);
@@ -140,7 +152,6 @@ class BytecodeGraphBuilder {
void BuildStaLookupSlot(LanguageMode language_mode);
void BuildCall(TailCallMode tail_call_mode,
ConvertReceiverMode receiver_hint);
- void BuildThrow();
void BuildBinaryOp(const Operator* op);
void BuildBinaryOpWithImmediate(const Operator* op);
void BuildCompareOp(const Operator* op);
@@ -150,6 +161,13 @@ class BytecodeGraphBuilder {
void BuildForInNext();
void BuildInvokeIntrinsic();
+ // Optional early lowering to the simplified operator level. Returns the node
+ // representing the lowered operation or {nullptr} if no lowering available.
+ // Note that the result has already been wired into the environment just like
+ // any other invocation of {NewNode} would do.
+ Node* TryBuildSimplifiedBinaryOp(const Operator* op, Node* left, Node* right,
+ FeedbackSlot slot);
+
// Check the context chain for extensions, for lookup fast paths.
Environment* CheckContextExtensions(uint32_t depth);
@@ -257,8 +275,9 @@ class BytecodeGraphBuilder {
bytecode_analysis_ = bytecode_analysis;
}
- bool IsLivenessAnalysisEnabled() const {
- return this->is_liveness_analysis_enabled_;
+ bool needs_eager_checkpoint() const { return needs_eager_checkpoint_; }
+ void mark_as_needing_eager_checkpoint(bool value) {
+ needs_eager_checkpoint_ = value;
}
#define DECLARE_VISIT_BYTECODE(name, ...) void Visit##name();
@@ -291,6 +310,11 @@ class BytecodeGraphBuilder {
int input_buffer_size_;
Node** input_buffer_;
+ // Optimization to only create checkpoints when the current position in the
+ // control-flow is not effect-dominated by another checkpoint already. All
+ // operations that do not have observable side-effects can be re-evaluated.
+ bool needs_eager_checkpoint_;
+
// Nodes representing values in the activation record.
SetOncePointer<Node> function_context_;
SetOncePointer<Node> function_closure_;
@@ -299,8 +323,6 @@ class BytecodeGraphBuilder {
// Control nodes that exit the function body.
ZoneVector<Node*> exit_controls_;
- bool const is_liveness_analysis_enabled_;
-
StateValuesCache state_values_cache_;
// The source position table, to be populated.
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index 991ae3699d..1ace7dae5e 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -19,6 +19,7 @@
#include "src/interpreter/bytecodes.h"
#include "src/machine-type.h"
#include "src/macro-assembler.h"
+#include "src/objects-inl.h"
#include "src/utils.h"
#include "src/zone/zone.h"
@@ -187,6 +188,10 @@ Node* CodeAssembler::HeapConstant(Handle<HeapObject> object) {
return raw_assembler()->HeapConstant(object);
}
+Node* CodeAssembler::CStringConstant(const char* str) {
+ return HeapConstant(factory()->NewStringFromAsciiChecked(str, TENURED));
+}
+
Node* CodeAssembler::BooleanConstant(bool value) {
return raw_assembler()->BooleanConstant(value);
}
@@ -249,16 +254,36 @@ Node* CodeAssembler::Parameter(int value) {
return raw_assembler()->Parameter(value);
}
+Node* CodeAssembler::GetJSContextParameter() {
+ CallDescriptor* desc = raw_assembler()->call_descriptor();
+ DCHECK(desc->IsJSFunctionCall());
+ return Parameter(Linkage::GetJSCallContextParamIndex(
+ static_cast<int>(desc->JSParameterCount())));
+}
+
void CodeAssembler::Return(Node* value) {
return raw_assembler()->Return(value);
}
+void CodeAssembler::Return(Node* value1, Node* value2) {
+ return raw_assembler()->Return(value1, value2);
+}
+
+void CodeAssembler::Return(Node* value1, Node* value2, Node* value3) {
+ return raw_assembler()->Return(value1, value2, value3);
+}
+
void CodeAssembler::PopAndReturn(Node* pop, Node* value) {
return raw_assembler()->PopAndReturn(pop, value);
}
void CodeAssembler::DebugBreak() { raw_assembler()->DebugBreak(); }
+void CodeAssembler::Unreachable() {
+ DebugBreak();
+ raw_assembler()->Unreachable();
+}
+
void CodeAssembler::Comment(const char* format, ...) {
if (!FLAG_code_comments) return;
char buffer[4 * KB];
@@ -602,6 +627,12 @@ template V8_EXPORT_PRIVATE Node* CodeAssembler::TailCallBytecodeDispatch(
const CallInterfaceDescriptor& descriptor, Node* target, Node*, Node*,
Node*, Node*);
+Node* CodeAssembler::CallCFunctionN(Signature<MachineType>* signature,
+ int input_count, Node* const* inputs) {
+ CallDescriptor* desc = Linkage::GetSimplifiedCDescriptor(zone(), signature);
+ return raw_assembler()->CallN(desc, input_count, inputs);
+}
+
Node* CodeAssembler::CallCFunction2(MachineType return_type,
MachineType arg0_type,
MachineType arg1_type, Node* function,
@@ -630,7 +661,7 @@ void CodeAssembler::GotoIf(Node* condition, Label* true_label) {
Bind(&false_label);
}
-void CodeAssembler::GotoUnless(Node* condition, Label* false_label) {
+void CodeAssembler::GotoIfNot(Node* condition, Label* false_label) {
Label true_label(this);
Branch(condition, &true_label, false_label);
Bind(&true_label);
@@ -687,6 +718,13 @@ CodeAssemblerVariable::CodeAssemblerVariable(CodeAssembler* assembler,
state_->variables_.insert(impl_);
}
+CodeAssemblerVariable::CodeAssemblerVariable(CodeAssembler* assembler,
+ MachineRepresentation rep,
+ Node* initial_value)
+ : CodeAssemblerVariable(assembler, rep) {
+ Bind(initial_value);
+}
+
CodeAssemblerVariable::~CodeAssemblerVariable() {
state_->variables_.erase(impl_);
}
@@ -719,6 +757,8 @@ CodeAssemblerLabel::CodeAssemblerLabel(CodeAssembler* assembler,
}
}
+CodeAssemblerLabel::~CodeAssemblerLabel() { label_->~RawMachineLabel(); }
+
void CodeAssemblerLabel::MergeVariables() {
++merge_count_;
for (auto var : state_->variables_) {
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index 25b1fab4a7..8808a82f8e 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -216,6 +216,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Node* SmiConstant(Smi* value);
Node* SmiConstant(int value);
Node* HeapConstant(Handle<HeapObject> object);
+ Node* CStringConstant(const char* str);
Node* BooleanConstant(bool value);
Node* ExternalConstant(ExternalReference address);
Node* Float64Constant(double value);
@@ -227,16 +228,20 @@ class V8_EXPORT_PRIVATE CodeAssembler {
bool ToIntPtrConstant(Node* node, intptr_t& out_value);
Node* Parameter(int value);
+ Node* GetJSContextParameter();
void Return(Node* value);
+ void Return(Node* value1, Node* value2);
+ void Return(Node* value1, Node* value2, Node* value3);
void PopAndReturn(Node* pop, Node* value);
void DebugBreak();
+ void Unreachable();
void Comment(const char* format, ...);
void Bind(Label* label);
void Goto(Label* label);
void GotoIf(Node* condition, Label* true_label);
- void GotoUnless(Node* condition, Label* false_label);
+ void GotoIfNot(Node* condition, Label* false_label);
void Branch(Node* condition, Label* true_label, Label* false_label);
void Switch(Node* index, Label* default_label, const int32_t* case_values,
@@ -364,6 +369,9 @@ class V8_EXPORT_PRIVATE CodeAssembler {
args...);
}
+ Node* CallCFunctionN(Signature<MachineType>* signature, int input_count,
+ Node* const* inputs);
+
// Call to a C function with two arguments.
Node* CallCFunction2(MachineType return_type, MachineType arg0_type,
MachineType arg1_type, Node* function, Node* arg0,
@@ -409,6 +417,8 @@ class CodeAssemblerVariable {
public:
explicit CodeAssemblerVariable(CodeAssembler* assembler,
MachineRepresentation rep);
+ CodeAssemblerVariable(CodeAssembler* assembler, MachineRepresentation rep,
+ Node* initial_value);
~CodeAssemblerVariable();
void Bind(Node* value);
Node* value() const;
@@ -444,7 +454,7 @@ class CodeAssemblerLabel {
CodeAssembler* assembler, CodeAssemblerVariable* merged_variable,
CodeAssemblerLabel::Type type = CodeAssemblerLabel::kNonDeferred)
: CodeAssemblerLabel(assembler, 1, &merged_variable, type) {}
- ~CodeAssemblerLabel() {}
+ ~CodeAssemblerLabel();
private:
friend class CodeAssembler;
diff --git a/deps/v8/src/compiler/code-generator-impl.h b/deps/v8/src/compiler/code-generator-impl.h
index 8bf3a9ea4e..bdedbecf26 100644
--- a/deps/v8/src/compiler/code-generator-impl.h
+++ b/deps/v8/src/compiler/code-generator-impl.h
@@ -67,6 +67,14 @@ class InstructionOperandConverter {
return static_cast<int16_t>(InputInt32(index));
}
+ uint8_t InputInt3(size_t index) {
+ return static_cast<uint8_t>(InputInt32(index) & 0x7);
+ }
+
+ uint8_t InputInt4(size_t index) {
+ return static_cast<uint8_t>(InputInt32(index) & 0xF);
+ }
+
uint8_t InputInt5(size_t index) {
return static_cast<uint8_t>(InputInt32(index) & 0x1F);
}
diff --git a/deps/v8/src/compiler/code-generator.cc b/deps/v8/src/compiler/code-generator.cc
index 7863476871..bbd9452c84 100644
--- a/deps/v8/src/compiler/code-generator.cc
+++ b/deps/v8/src/compiler/code-generator.cc
@@ -33,10 +33,8 @@ class CodeGenerator::JumpTable final : public ZoneObject {
size_t const target_count_;
};
-CodeGenerator::CodeGenerator(
- Frame* frame, Linkage* linkage, InstructionSequence* code,
- CompilationInfo* info,
- ZoneVector<trap_handler::ProtectedInstructionData>* protected_instructions)
+CodeGenerator::CodeGenerator(Frame* frame, Linkage* linkage,
+ InstructionSequence* code, CompilationInfo* info)
: frame_access_state_(nullptr),
linkage_(linkage),
code_(code),
@@ -60,8 +58,7 @@ CodeGenerator::CodeGenerator(
osr_pc_offset_(-1),
optimized_out_literal_id_(-1),
source_position_table_builder_(code->zone(),
- info->SourcePositionRecordingMode()),
- protected_instructions_(protected_instructions) {
+ info->SourcePositionRecordingMode()) {
for (int i = 0; i < code->InstructionBlockCount(); ++i) {
new (&labels_[i]) Label;
}
@@ -75,14 +72,6 @@ void CodeGenerator::CreateFrameAccessState(Frame* frame) {
frame_access_state_ = new (code()->zone()) FrameAccessState(frame);
}
-void CodeGenerator::AddProtectedInstruction(int instr_offset,
- int landing_offset) {
- if (protected_instructions_ != nullptr) {
- trap_handler::ProtectedInstructionData data = {instr_offset,
- landing_offset};
- protected_instructions_->emplace_back(data);
- }
-}
Handle<Code> CodeGenerator::GenerateCode() {
CompilationInfo* info = this->info();
@@ -207,8 +196,7 @@ Handle<Code> CodeGenerator::GenerateCode() {
// Assemble all eager deoptimization exits.
for (DeoptimizationExit* exit : deoptimization_exits_) {
masm()->bind(exit->label());
- AssembleDeoptimizerCall(exit->deoptimization_id(), Deoptimizer::EAGER,
- exit->pos());
+ AssembleDeoptimizerCall(exit->deoptimization_id(), exit->pos());
}
// Ensure there is space for lazy deoptimization in the code.
@@ -683,6 +671,13 @@ DeoptimizationEntry const& CodeGenerator::GetDeoptimizationEntry(
return code()->GetDeoptimizationEntry(state_id);
}
+DeoptimizeKind CodeGenerator::GetDeoptimizationKind(
+ int deoptimization_id) const {
+ size_t const index = static_cast<size_t>(deoptimization_id);
+ DCHECK_LT(index, deoptimization_states_.size());
+ return deoptimization_states_[index]->kind();
+}
+
DeoptimizeReason CodeGenerator::GetDeoptimizationReason(
int deoptimization_id) const {
size_t const index = static_cast<size_t>(deoptimization_id);
@@ -703,6 +698,10 @@ void CodeGenerator::TranslateStateValueDescriptor(
TranslateStateValueDescriptor(field.desc, field.nested, translation,
iter);
}
+ } else if (desc->IsArguments()) {
+ if (translation != nullptr) {
+ translation->BeginArgumentsObject(0);
+ }
} else if (desc->IsDuplicate()) {
if (translation != nullptr) {
translation->DuplicateObject(static_cast<int>(desc->id()));
@@ -808,8 +807,9 @@ void CodeGenerator::BuildTranslationForFrameStateDescriptor(
translation->BeginTailCallerFrame(shared_info_id);
break;
case FrameStateType::kConstructStub:
+ DCHECK(descriptor->bailout_id().IsValidForConstructStub());
translation->BeginConstructStubFrame(
- shared_info_id,
+ descriptor->bailout_id(), shared_info_id,
static_cast<unsigned int>(descriptor->parameters_count()));
break;
case FrameStateType::kGetterStub:
@@ -843,7 +843,7 @@ int CodeGenerator::BuildTranslation(Instruction* instr, int pc_offset,
int deoptimization_id = static_cast<int>(deoptimization_states_.size());
deoptimization_states_.push_back(new (zone()) DeoptimizationState(
- descriptor->bailout_id(), translation.index(), pc_offset,
+ descriptor->bailout_id(), translation.index(), pc_offset, entry.kind(),
entry.reason()));
return deoptimization_id;
@@ -863,16 +863,15 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
} else if (type == MachineType::Uint8() || type == MachineType::Uint16() ||
type == MachineType::Uint32()) {
translation->StoreUint32StackSlot(LocationOperand::cast(op)->index());
- } else if (IsAnyTagged(type.representation())) {
- translation->StoreStackSlot(LocationOperand::cast(op)->index());
} else {
- CHECK(false);
+ CHECK_EQ(MachineRepresentation::kTagged, type.representation());
+ translation->StoreStackSlot(LocationOperand::cast(op)->index());
}
} else if (op->IsFPStackSlot()) {
if (type.representation() == MachineRepresentation::kFloat64) {
translation->StoreDoubleStackSlot(LocationOperand::cast(op)->index());
} else {
- DCHECK_EQ(MachineRepresentation::kFloat32, type.representation());
+ CHECK_EQ(MachineRepresentation::kFloat32, type.representation());
translation->StoreFloatStackSlot(LocationOperand::cast(op)->index());
}
} else if (op->IsRegister()) {
@@ -885,27 +884,26 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
} else if (type == MachineType::Uint8() || type == MachineType::Uint16() ||
type == MachineType::Uint32()) {
translation->StoreUint32Register(converter.ToRegister(op));
- } else if (IsAnyTagged(type.representation())) {
- translation->StoreRegister(converter.ToRegister(op));
} else {
- CHECK(false);
+ CHECK_EQ(MachineRepresentation::kTagged, type.representation());
+ translation->StoreRegister(converter.ToRegister(op));
}
} else if (op->IsFPRegister()) {
InstructionOperandConverter converter(this, instr);
if (type.representation() == MachineRepresentation::kFloat64) {
translation->StoreDoubleRegister(converter.ToDoubleRegister(op));
} else {
- DCHECK_EQ(MachineRepresentation::kFloat32, type.representation());
+ CHECK_EQ(MachineRepresentation::kFloat32, type.representation());
translation->StoreFloatRegister(converter.ToFloatRegister(op));
}
- } else if (op->IsImmediate()) {
+ } else {
+ CHECK(op->IsImmediate());
InstructionOperandConverter converter(this, instr);
Constant constant = converter.ToConstant(op);
Handle<Object> constant_object;
switch (constant.type()) {
case Constant::kInt32:
- if (type.representation() == MachineRepresentation::kTagged ||
- type.representation() == MachineRepresentation::kTaggedSigned) {
+ if (type.representation() == MachineRepresentation::kTagged) {
// When pointers are 4 bytes, we can use int32 constants to represent
// Smis.
DCHECK_EQ(4, kPointerSize);
@@ -928,9 +926,13 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
type.representation() == MachineRepresentation::kNone);
DCHECK(type.representation() != MachineRepresentation::kNone ||
constant.ToInt32() == FrameStateDescriptor::kImpossibleValue);
-
- constant_object =
- isolate()->factory()->NewNumberFromInt(constant.ToInt32());
+ if (type == MachineType::Uint32()) {
+ constant_object =
+ isolate()->factory()->NewNumberFromUint(constant.ToInt32());
+ } else {
+ constant_object =
+ isolate()->factory()->NewNumberFromInt(constant.ToInt32());
+ }
}
break;
case Constant::kInt64:
@@ -939,37 +941,28 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
// TODO(jarin,bmeurer): We currently pass in raw pointers to the
// JSFunction::entry here. We should really consider fixing this.
DCHECK(type.representation() == MachineRepresentation::kWord64 ||
- type.representation() == MachineRepresentation::kTagged ||
- type.representation() == MachineRepresentation::kTaggedSigned);
+ type.representation() == MachineRepresentation::kTagged);
DCHECK_EQ(8, kPointerSize);
constant_object =
handle(reinterpret_cast<Smi*>(constant.ToInt64()), isolate());
DCHECK(constant_object->IsSmi());
break;
case Constant::kFloat32:
- if (type.representation() == MachineRepresentation::kTaggedSigned) {
- DCHECK(IsSmiDouble(constant.ToFloat32()));
- } else {
- DCHECK(type.representation() == MachineRepresentation::kFloat32 ||
- CanBeTaggedPointer(type.representation()));
- }
+ DCHECK(type.representation() == MachineRepresentation::kFloat32 ||
+ type.representation() == MachineRepresentation::kTagged);
constant_object = isolate()->factory()->NewNumber(constant.ToFloat32());
break;
case Constant::kFloat64:
- if (type.representation() == MachineRepresentation::kTaggedSigned) {
- DCHECK(IsSmiDouble(constant.ToFloat64()));
- } else {
- DCHECK(type.representation() == MachineRepresentation::kFloat64 ||
- CanBeTaggedPointer(type.representation()));
- }
+ DCHECK(type.representation() == MachineRepresentation::kFloat64 ||
+ type.representation() == MachineRepresentation::kTagged);
constant_object = isolate()->factory()->NewNumber(constant.ToFloat64());
break;
case Constant::kHeapObject:
- DCHECK(CanBeTaggedPointer(type.representation()));
+ DCHECK_EQ(MachineRepresentation::kTagged, type.representation());
constant_object = constant.ToHeapObject();
break;
default:
- CHECK(false);
+ UNREACHABLE();
}
if (constant_object.is_identical_to(info()->closure())) {
translation->StoreJSFrameFunction();
@@ -977,8 +970,6 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
int literal_id = DefineDeoptimizationLiteral(constant_object);
translation->StoreLiteral(literal_id);
}
- } else {
- CHECK(false);
}
}
diff --git a/deps/v8/src/compiler/code-generator.h b/deps/v8/src/compiler/code-generator.h
index e20a8be774..74958d05f3 100644
--- a/deps/v8/src/compiler/code-generator.h
+++ b/deps/v8/src/compiler/code-generator.h
@@ -53,9 +53,7 @@ class InstructionOperandIterator {
class CodeGenerator final : public GapResolver::Assembler {
public:
explicit CodeGenerator(Frame* frame, Linkage* linkage,
- InstructionSequence* code, CompilationInfo* info,
- ZoneVector<trap_handler::ProtectedInstructionData>*
- protected_instructions = nullptr);
+ InstructionSequence* code, CompilationInfo* info);
// Generate native code.
Handle<Code> GenerateCode();
@@ -68,8 +66,6 @@ class CodeGenerator final : public GapResolver::Assembler {
Label* GetLabel(RpoNumber rpo) { return &labels_[rpo.ToSize()]; }
- void AddProtectedInstruction(int instr_offset, int landing_offset);
-
void AssembleSourcePosition(Instruction* instr);
void AssembleSourcePosition(SourcePosition source_position);
@@ -129,7 +125,6 @@ class CodeGenerator final : public GapResolver::Assembler {
void AssembleArchTableSwitch(Instruction* instr);
CodeGenResult AssembleDeoptimizerCall(int deoptimization_id,
- Deoptimizer::BailoutType bailout_type,
SourcePosition pos);
// Generates an architecture-specific, descriptor-specific prologue
@@ -214,6 +209,7 @@ class CodeGenerator final : public GapResolver::Assembler {
int DefineDeoptimizationLiteral(Handle<Object> literal);
DeoptimizationEntry const& GetDeoptimizationEntry(Instruction* instr,
size_t frame_state_offset);
+ DeoptimizeKind GetDeoptimizationKind(int deoptimization_id) const;
DeoptimizeReason GetDeoptimizationReason(int deoptimization_id) const;
int BuildTranslation(Instruction* instr, int pc_offset,
size_t frame_state_offset,
@@ -242,21 +238,24 @@ class CodeGenerator final : public GapResolver::Assembler {
class DeoptimizationState final : public ZoneObject {
public:
DeoptimizationState(BailoutId bailout_id, int translation_id, int pc_offset,
- DeoptimizeReason reason)
+ DeoptimizeKind kind, DeoptimizeReason reason)
: bailout_id_(bailout_id),
translation_id_(translation_id),
pc_offset_(pc_offset),
+ kind_(kind),
reason_(reason) {}
BailoutId bailout_id() const { return bailout_id_; }
int translation_id() const { return translation_id_; }
int pc_offset() const { return pc_offset_; }
+ DeoptimizeKind kind() const { return kind_; }
DeoptimizeReason reason() const { return reason_; }
private:
BailoutId bailout_id_;
int translation_id_;
int pc_offset_;
+ DeoptimizeKind kind_;
DeoptimizeReason reason_;
};
@@ -291,7 +290,6 @@ class CodeGenerator final : public GapResolver::Assembler {
int osr_pc_offset_;
int optimized_out_literal_id_;
SourcePositionTableBuilder source_position_table_builder_;
- ZoneVector<trap_handler::ProtectedInstructionData>* protected_instructions_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/common-operator-reducer.cc b/deps/v8/src/compiler/common-operator-reducer.cc
index 85d49b7ae6..70fdf71578 100644
--- a/deps/v8/src/compiler/common-operator-reducer.cc
+++ b/deps/v8/src/compiler/common-operator-reducer.cc
@@ -126,7 +126,7 @@ Reduction CommonOperatorReducer::ReduceDeoptimizeConditional(Node* node) {
DCHECK(node->opcode() == IrOpcode::kDeoptimizeIf ||
node->opcode() == IrOpcode::kDeoptimizeUnless);
bool condition_is_true = node->opcode() == IrOpcode::kDeoptimizeUnless;
- DeoptimizeReason reason = DeoptimizeReasonOf(node->op());
+ DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
Node* condition = NodeProperties::GetValueInput(node, 0);
Node* frame_state = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
@@ -137,9 +137,10 @@ Reduction CommonOperatorReducer::ReduceDeoptimizeConditional(Node* node) {
// (as guaranteed by the graph reduction logic).
if (condition->opcode() == IrOpcode::kBooleanNot) {
NodeProperties::ReplaceValueInput(node, condition->InputAt(0), 0);
- NodeProperties::ChangeOp(node, condition_is_true
- ? common()->DeoptimizeIf(reason)
- : common()->DeoptimizeUnless(reason));
+ NodeProperties::ChangeOp(
+ node, condition_is_true
+ ? common()->DeoptimizeIf(p.kind(), p.reason())
+ : common()->DeoptimizeUnless(p.kind(), p.reason()));
return Changed(node);
}
Decision const decision = DecideCondition(condition);
@@ -147,9 +148,8 @@ Reduction CommonOperatorReducer::ReduceDeoptimizeConditional(Node* node) {
if (condition_is_true == (decision == Decision::kTrue)) {
ReplaceWithValue(node, dead(), effect, control);
} else {
- control =
- graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager, reason),
- frame_state, effect, control);
+ control = graph()->NewNode(common()->Deoptimize(p.kind(), p.reason()),
+ frame_state, effect, control);
// TODO(bmeurer): This should be on the AdvancedReducer somehow.
NodeProperties::MergeControlToEnd(graph(), common(), control);
Revisit(graph()->end());
@@ -284,55 +284,91 @@ Reduction CommonOperatorReducer::ReducePhi(Node* node) {
return Replace(value);
}
-
Reduction CommonOperatorReducer::ReduceReturn(Node* node) {
DCHECK_EQ(IrOpcode::kReturn, node->opcode());
Node* effect = NodeProperties::GetEffectInput(node);
- Node* const control = NodeProperties::GetControlInput(node);
- bool changed = false;
if (effect->opcode() == IrOpcode::kCheckpoint) {
// Any {Return} node can never be used to insert a deoptimization point,
// hence checkpoints can be cut out of the effect chain flowing into it.
effect = NodeProperties::GetEffectInput(effect);
NodeProperties::ReplaceEffectInput(node, effect);
- changed = true;
+ Reduction const reduction = ReduceReturn(node);
+ return reduction.Changed() ? reduction : Changed(node);
}
// TODO(ahaas): Extend the reduction below to multiple return values.
if (ValueInputCountOfReturn(node->op()) != 1) {
return NoChange();
}
- Node* const value = node->InputAt(1);
+ Node* pop_count = NodeProperties::GetValueInput(node, 0);
+ Node* value = NodeProperties::GetValueInput(node, 1);
+ Node* control = NodeProperties::GetControlInput(node);
if (value->opcode() == IrOpcode::kPhi &&
NodeProperties::GetControlInput(value) == control &&
- effect->opcode() == IrOpcode::kEffectPhi &&
- NodeProperties::GetControlInput(effect) == control &&
control->opcode() == IrOpcode::kMerge) {
+ // This optimization pushes {Return} nodes through merges. It checks that
+ // the return value is actually a {Phi} and the return control dependency
+ // is the {Merge} to which the {Phi} belongs.
+
+ // Value1 ... ValueN Control1 ... ControlN
+ // ^ ^ ^ ^
+ // | | | |
+ // +----+-----+ +------+-----+
+ // | |
+ // Phi --------------> Merge
+ // ^ ^
+ // | |
+ // | +-----------------+
+ // | |
+ // Return -----> Effect
+ // ^
+ // |
+ // End
+
+ // Now the effect input to the {Return} node can be either an {EffectPhi}
+ // hanging off the same {Merge}, or the {Merge} node is only connected to
+ // the {Return} and the {Phi}, in which case we know that the effect input
+ // must somehow dominate all merged branches.
+
Node::Inputs control_inputs = control->inputs();
Node::Inputs value_inputs = value->inputs();
- Node::Inputs effect_inputs = effect->inputs();
DCHECK_NE(0, control_inputs.count());
DCHECK_EQ(control_inputs.count(), value_inputs.count() - 1);
- DCHECK_EQ(control_inputs.count(), effect_inputs.count() - 1);
DCHECK_EQ(IrOpcode::kEnd, graph()->end()->opcode());
DCHECK_NE(0, graph()->end()->InputCount());
- for (int i = 0; i < control_inputs.count(); ++i) {
- // Create a new {Return} and connect it to {end}. We don't need to mark
- // {end} as revisit, because we mark {node} as {Dead} below, which was
- // previously connected to {end}, so we know for sure that at some point
- // the reducer logic will visit {end} again.
- Node* ret = graph()->NewNode(common()->Return(), node->InputAt(0),
- value_inputs[i], effect_inputs[i],
- control_inputs[i]);
- NodeProperties::MergeControlToEnd(graph(), common(), ret);
+ if (control->OwnedBy(node, value)) {
+ for (int i = 0; i < control_inputs.count(); ++i) {
+ // Create a new {Return} and connect it to {end}. We don't need to mark
+ // {end} as revisit, because we mark {node} as {Dead} below, which was
+ // previously connected to {end}, so we know for sure that at some point
+ // the reducer logic will visit {end} again.
+ Node* ret = graph()->NewNode(node->op(), pop_count, value_inputs[i],
+ effect, control_inputs[i]);
+ NodeProperties::MergeControlToEnd(graph(), common(), ret);
+ }
+ // Mark the Merge {control} and Return {node} as {dead}.
+ Replace(control, dead());
+ return Replace(dead());
+ } else if (effect->opcode() == IrOpcode::kEffectPhi &&
+ NodeProperties::GetControlInput(effect) == control) {
+ Node::Inputs effect_inputs = effect->inputs();
+ DCHECK_EQ(control_inputs.count(), effect_inputs.count() - 1);
+ for (int i = 0; i < control_inputs.count(); ++i) {
+ // Create a new {Return} and connect it to {end}. We don't need to mark
+ // {end} as revisit, because we mark {node} as {Dead} below, which was
+ // previously connected to {end}, so we know for sure that at some point
+ // the reducer logic will visit {end} again.
+ Node* ret = graph()->NewNode(node->op(), pop_count, value_inputs[i],
+ effect_inputs[i], control_inputs[i]);
+ NodeProperties::MergeControlToEnd(graph(), common(), ret);
+ }
+ // Mark the Merge {control} and Return {node} as {dead}.
+ Replace(control, dead());
+ return Replace(dead());
}
- // Mark the merge {control} and return {node} as {dead}.
- Replace(control, dead());
- return Replace(dead());
}
- return changed ? Changed(node) : NoChange();
+ return NoChange();
}
-
Reduction CommonOperatorReducer::ReduceSelect(Node* node) {
DCHECK_EQ(IrOpcode::kSelect, node->opcode());
Node* const cond = node->InputAt(0);
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index 2cd63314cf..637b0646b5 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -37,12 +37,6 @@ BranchHint BranchHintOf(const Operator* const op) {
return OpParameter<BranchHint>(op);
}
-DeoptimizeReason DeoptimizeReasonOf(Operator const* const op) {
- DCHECK(op->opcode() == IrOpcode::kDeoptimizeIf ||
- op->opcode() == IrOpcode::kDeoptimizeUnless);
- return OpParameter<DeoptimizeReason>(op);
-}
-
int ValueInputCountOfReturn(Operator const* const op) {
DCHECK(op->opcode() == IrOpcode::kReturn);
// Return nodes have a hidden input at index 0 which we ignore in the value
@@ -50,19 +44,6 @@ int ValueInputCountOfReturn(Operator const* const op) {
return op->ValueInputCount() - 1;
}
-size_t hash_value(DeoptimizeKind kind) { return static_cast<size_t>(kind); }
-
-std::ostream& operator<<(std::ostream& os, DeoptimizeKind kind) {
- switch (kind) {
- case DeoptimizeKind::kEager:
- return os << "Eager";
- case DeoptimizeKind::kSoft:
- return os << "Soft";
- }
- UNREACHABLE();
- return os;
-}
-
bool operator==(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
return lhs.kind() == rhs.kind() && lhs.reason() == rhs.reason();
}
@@ -80,7 +61,9 @@ std::ostream& operator<<(std::ostream& os, DeoptimizeParameters p) {
}
DeoptimizeParameters const& DeoptimizeParametersOf(Operator const* const op) {
- DCHECK_EQ(IrOpcode::kDeoptimize, op->opcode());
+ DCHECK(op->opcode() == IrOpcode::kDeoptimize ||
+ op->opcode() == IrOpcode::kDeoptimizeIf ||
+ op->opcode() == IrOpcode::kDeoptimizeUnless);
return OpParameter<DeoptimizeParameters>(op);
}
@@ -436,22 +419,22 @@ ZoneVector<MachineType> const* MachineTypesOf(Operator const* op) {
V(Soft, InsufficientTypeFeedbackForGenericNamedAccess)
#define CACHED_DEOPTIMIZE_IF_LIST(V) \
- V(DivisionByZero) \
- V(Hole) \
- V(MinusZero) \
- V(Overflow) \
- V(Smi)
+ V(Eager, DivisionByZero) \
+ V(Eager, Hole) \
+ V(Eager, MinusZero) \
+ V(Eager, Overflow) \
+ V(Eager, Smi)
#define CACHED_DEOPTIMIZE_UNLESS_LIST(V) \
- V(LostPrecision) \
- V(LostPrecisionOrNaN) \
- V(NoReason) \
- V(NotAHeapNumber) \
- V(NotANumberOrOddball) \
- V(NotASmi) \
- V(OutOfBounds) \
- V(WrongInstanceType) \
- V(WrongMap)
+ V(Eager, LostPrecision) \
+ V(Eager, LostPrecisionOrNaN) \
+ V(Eager, NoReason) \
+ V(Eager, NotAHeapNumber) \
+ V(Eager, NotANumberOrOddball) \
+ V(Eager, NotASmi) \
+ V(Eager, OutOfBounds) \
+ V(Eager, WrongInstanceType) \
+ V(Eager, WrongMap)
#define CACHED_TRAP_IF_LIST(V) \
V(TrapDivUnrepresentable) \
@@ -635,35 +618,37 @@ struct CommonOperatorGlobalCache final {
CACHED_DEOPTIMIZE_LIST(CACHED_DEOPTIMIZE)
#undef CACHED_DEOPTIMIZE
- template <DeoptimizeReason kReason>
- struct DeoptimizeIfOperator final : public Operator1<DeoptimizeReason> {
+ template <DeoptimizeKind kKind, DeoptimizeReason kReason>
+ struct DeoptimizeIfOperator final : public Operator1<DeoptimizeParameters> {
DeoptimizeIfOperator()
- : Operator1<DeoptimizeReason>( // --
+ : Operator1<DeoptimizeParameters>( // --
IrOpcode::kDeoptimizeIf, // opcode
Operator::kFoldable | Operator::kNoThrow, // properties
"DeoptimizeIf", // name
2, 1, 1, 0, 1, 1, // counts
- kReason) {} // parameter
+ DeoptimizeParameters(kKind, kReason)) {} // parameter
};
-#define CACHED_DEOPTIMIZE_IF(Reason) \
- DeoptimizeIfOperator<DeoptimizeReason::k##Reason> \
- kDeoptimizeIf##Reason##Operator;
+#define CACHED_DEOPTIMIZE_IF(Kind, Reason) \
+ DeoptimizeIfOperator<DeoptimizeKind::k##Kind, DeoptimizeReason::k##Reason> \
+ kDeoptimizeIf##Kind##Reason##Operator;
CACHED_DEOPTIMIZE_IF_LIST(CACHED_DEOPTIMIZE_IF)
#undef CACHED_DEOPTIMIZE_IF
- template <DeoptimizeReason kReason>
- struct DeoptimizeUnlessOperator final : public Operator1<DeoptimizeReason> {
+ template <DeoptimizeKind kKind, DeoptimizeReason kReason>
+ struct DeoptimizeUnlessOperator final
+ : public Operator1<DeoptimizeParameters> {
DeoptimizeUnlessOperator()
- : Operator1<DeoptimizeReason>( // --
+ : Operator1<DeoptimizeParameters>( // --
IrOpcode::kDeoptimizeUnless, // opcode
Operator::kFoldable | Operator::kNoThrow, // properties
"DeoptimizeUnless", // name
2, 1, 1, 0, 1, 1, // counts
- kReason) {} // parameter
+ DeoptimizeParameters(kKind, kReason)) {} // parameter
};
-#define CACHED_DEOPTIMIZE_UNLESS(Reason) \
- DeoptimizeUnlessOperator<DeoptimizeReason::k##Reason> \
- kDeoptimizeUnless##Reason##Operator;
+#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason) \
+ DeoptimizeUnlessOperator<DeoptimizeKind::k##Kind, \
+ DeoptimizeReason::k##Reason> \
+ kDeoptimizeUnless##Kind##Reason##Operator;
CACHED_DEOPTIMIZE_UNLESS_LIST(CACHED_DEOPTIMIZE_UNLESS)
#undef CACHED_DEOPTIMIZE_UNLESS
@@ -677,8 +662,8 @@ struct CommonOperatorGlobalCache final {
1, 1, 1, 0, 0, 1, // counts
trap_id) {} // parameter
};
-#define CACHED_TRAP_IF(Trap) \
- TrapIfOperator<static_cast<int32_t>(Runtime::kThrowWasm##Trap)> \
+#define CACHED_TRAP_IF(Trap) \
+ TrapIfOperator<static_cast<int32_t>(Builtins::kThrowWasm##Trap)> \
kTrapIf##Trap##Operator;
CACHED_TRAP_IF_LIST(CACHED_TRAP_IF)
#undef CACHED_TRAP_IF
@@ -693,8 +678,8 @@ struct CommonOperatorGlobalCache final {
1, 1, 1, 0, 0, 1, // counts
trap_id) {} // parameter
};
-#define CACHED_TRAP_UNLESS(Trap) \
- TrapUnlessOperator<static_cast<int32_t>(Runtime::kThrowWasm##Trap)> \
+#define CACHED_TRAP_UNLESS(Trap) \
+ TrapUnlessOperator<static_cast<int32_t>(Builtins::kThrowWasm##Trap)> \
kTrapUnless##Trap##Operator;
CACHED_TRAP_UNLESS_LIST(CACHED_TRAP_UNLESS)
#undef CACHED_TRAP_UNLESS
@@ -859,49 +844,48 @@ const Operator* CommonOperatorBuilder::Deoptimize(DeoptimizeKind kind,
parameter); // parameter
}
-const Operator* CommonOperatorBuilder::DeoptimizeIf(DeoptimizeReason reason) {
- switch (reason) {
-#define CACHED_DEOPTIMIZE_IF(Reason) \
- case DeoptimizeReason::k##Reason: \
- return &cache_.kDeoptimizeIf##Reason##Operator;
- CACHED_DEOPTIMIZE_IF_LIST(CACHED_DEOPTIMIZE_IF)
-#undef CACHED_DEOPTIMIZE_IF
- default:
- break;
+const Operator* CommonOperatorBuilder::DeoptimizeIf(DeoptimizeKind kind,
+ DeoptimizeReason reason) {
+#define CACHED_DEOPTIMIZE_IF(Kind, Reason) \
+ if (kind == DeoptimizeKind::k##Kind && \
+ reason == DeoptimizeReason::k##Reason) { \
+ return &cache_.kDeoptimizeIf##Kind##Reason##Operator; \
}
+ CACHED_DEOPTIMIZE_IF_LIST(CACHED_DEOPTIMIZE_IF)
+#undef CACHED_DEOPTIMIZE_IF
// Uncached
- return new (zone()) Operator1<DeoptimizeReason>( // --
- IrOpcode::kDeoptimizeIf, // opcode
- Operator::kFoldable | Operator::kNoThrow, // properties
- "DeoptimizeIf", // name
- 2, 1, 1, 0, 1, 1, // counts
- reason); // parameter
+ DeoptimizeParameters parameter(kind, reason);
+ return new (zone()) Operator1<DeoptimizeParameters>( // --
+ IrOpcode::kDeoptimizeIf, // opcode
+ Operator::kFoldable | Operator::kNoThrow, // properties
+ "DeoptimizeIf", // name
+ 2, 1, 1, 0, 1, 1, // counts
+ parameter); // parameter
}
const Operator* CommonOperatorBuilder::DeoptimizeUnless(
- DeoptimizeReason reason) {
- switch (reason) {
-#define CACHED_DEOPTIMIZE_UNLESS(Reason) \
- case DeoptimizeReason::k##Reason: \
- return &cache_.kDeoptimizeUnless##Reason##Operator;
- CACHED_DEOPTIMIZE_UNLESS_LIST(CACHED_DEOPTIMIZE_UNLESS)
-#undef CACHED_DEOPTIMIZE_UNLESS
- default:
- break;
+ DeoptimizeKind kind, DeoptimizeReason reason) {
+#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason) \
+ if (kind == DeoptimizeKind::k##Kind && \
+ reason == DeoptimizeReason::k##Reason) { \
+ return &cache_.kDeoptimizeUnless##Kind##Reason##Operator; \
}
+ CACHED_DEOPTIMIZE_UNLESS_LIST(CACHED_DEOPTIMIZE_UNLESS)
+#undef CACHED_DEOPTIMIZE_UNLESS
// Uncached
- return new (zone()) Operator1<DeoptimizeReason>( // --
- IrOpcode::kDeoptimizeUnless, // opcode
- Operator::kFoldable | Operator::kNoThrow, // properties
- "DeoptimizeUnless", // name
- 2, 1, 1, 0, 1, 1, // counts
- reason); // parameter
+ DeoptimizeParameters parameter(kind, reason);
+ return new (zone()) Operator1<DeoptimizeParameters>( // --
+ IrOpcode::kDeoptimizeUnless, // opcode
+ Operator::kFoldable | Operator::kNoThrow, // properties
+ "DeoptimizeUnless", // name
+ 2, 1, 1, 0, 1, 1, // counts
+ parameter); // parameter
}
const Operator* CommonOperatorBuilder::TrapIf(int32_t trap_id) {
switch (trap_id) {
-#define CACHED_TRAP_IF(Trap) \
- case Runtime::kThrowWasm##Trap: \
+#define CACHED_TRAP_IF(Trap) \
+ case Builtins::kThrowWasm##Trap: \
return &cache_.kTrapIf##Trap##Operator;
CACHED_TRAP_IF_LIST(CACHED_TRAP_IF)
#undef CACHED_TRAP_IF
@@ -919,8 +903,8 @@ const Operator* CommonOperatorBuilder::TrapIf(int32_t trap_id) {
const Operator* CommonOperatorBuilder::TrapUnless(int32_t trap_id) {
switch (trap_id) {
-#define CACHED_TRAP_UNLESS(Trap) \
- case Runtime::kThrowWasm##Trap: \
+#define CACHED_TRAP_UNLESS(Trap) \
+ case Builtins::kThrowWasm##Trap: \
return &cache_.kTrapUnless##Trap##Operator;
CACHED_TRAP_UNLESS_LIST(CACHED_TRAP_UNLESS)
#undef CACHED_TRAP_UNLESS
@@ -1248,6 +1232,13 @@ const Operator* CommonOperatorBuilder::TypedStateValues(
TypedStateValueInfo(types, bitmask)); // parameters
}
+const Operator* CommonOperatorBuilder::ArgumentsObjectState() {
+ return new (zone()) Operator( // --
+ IrOpcode::kArgumentsObjectState, Operator::kPure, // opcode
+ "ArgumentsObjectState", // name
+ 0, 0, 0, 1, 0, 0); // counts
+}
+
const Operator* CommonOperatorBuilder::ObjectState(int pointer_slots) {
return new (zone()) Operator1<int>( // --
IrOpcode::kObjectState, Operator::kPure, // opcode
@@ -1353,44 +1344,6 @@ const Operator* CommonOperatorBuilder::ResizeMergeOrPhi(const Operator* op,
}
}
-const Operator* CommonOperatorBuilder::Int32x4ExtractLane(int32_t lane_number) {
- DCHECK(0 <= lane_number && lane_number < 4);
- return new (zone()) Operator1<int32_t>( // --
- IrOpcode::kInt32x4ExtractLane, Operator::kPure, // opcode
- "Int32x4ExtractLane", // name
- 1, 0, 0, 1, 0, 0, // counts
- lane_number); // parameter
-}
-
-const Operator* CommonOperatorBuilder::Int32x4ReplaceLane(int32_t lane_number) {
- DCHECK(0 <= lane_number && lane_number < 4);
- return new (zone()) Operator1<int32_t>( // --
- IrOpcode::kInt32x4ReplaceLane, Operator::kPure, // opcode
- "Int32x4ReplaceLane", // name
- 2, 0, 0, 1, 0, 0, // counts
- lane_number); // parameter
-}
-
-const Operator* CommonOperatorBuilder::Float32x4ExtractLane(
- int32_t lane_number) {
- DCHECK(0 <= lane_number && lane_number < 4);
- return new (zone()) Operator1<int32_t>( // --
- IrOpcode::kFloat32x4ExtractLane, Operator::kPure, // opcode
- "Float32x4ExtractLane", // name
- 1, 0, 0, 1, 0, 0, // counts
- lane_number); // parameter
-}
-
-const Operator* CommonOperatorBuilder::Float32x4ReplaceLane(
- int32_t lane_number) {
- DCHECK(0 <= lane_number && lane_number < 4);
- return new (zone()) Operator1<int32_t>( // --
- IrOpcode::kFloat32x4ReplaceLane, Operator::kPure, // opcode
- "Float32x4ReplaceLane", // name
- 2, 0, 0, 1, 0, 0, // counts
- lane_number); // parameter
-}
-
const FrameStateFunctionInfo*
CommonOperatorBuilder::CreateFrameStateFunctionInfo(
FrameStateType type, int parameter_count, int local_count,
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index 5d0a6df31d..46829593a4 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -46,19 +46,9 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, BranchHint);
V8_EXPORT_PRIVATE BranchHint BranchHintOf(const Operator* const);
-// Deoptimize reason for Deoptimize, DeoptimizeIf and DeoptimizeUnless.
-DeoptimizeReason DeoptimizeReasonOf(Operator const* const);
-
// Helper function for return nodes, because returns have a hidden value input.
int ValueInputCountOfReturn(Operator const* const op);
-// Deoptimize bailout kind.
-enum class DeoptimizeKind : uint8_t { kEager, kSoft };
-
-size_t hash_value(DeoptimizeKind kind);
-
-std::ostream& operator<<(std::ostream&, DeoptimizeKind);
-
// Parameters for the {Deoptimize} operator.
class DeoptimizeParameters final {
public:
@@ -326,8 +316,9 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* IfDefault();
const Operator* Throw();
const Operator* Deoptimize(DeoptimizeKind kind, DeoptimizeReason reason);
- const Operator* DeoptimizeIf(DeoptimizeReason reason);
- const Operator* DeoptimizeUnless(DeoptimizeReason reason);
+ const Operator* DeoptimizeIf(DeoptimizeKind kind, DeoptimizeReason reason);
+ const Operator* DeoptimizeUnless(DeoptimizeKind kind,
+ DeoptimizeReason reason);
const Operator* TrapIf(int32_t trap_id);
const Operator* TrapUnless(int32_t trap_id);
const Operator* Return(int value_input_count = 1);
@@ -371,6 +362,7 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* StateValues(int arguments, SparseInputMask bitmask);
const Operator* TypedStateValues(const ZoneVector<MachineType>* types,
SparseInputMask bitmask);
+ const Operator* ArgumentsObjectState();
const Operator* ObjectState(int pointer_slots);
const Operator* TypedObjectState(const ZoneVector<MachineType>* types);
const Operator* FrameState(BailoutId bailout_id,
@@ -386,12 +378,6 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
// with {size} inputs.
const Operator* ResizeMergeOrPhi(const Operator* op, int size);
- // Simd Operators
- const Operator* Int32x4ExtractLane(int32_t);
- const Operator* Int32x4ReplaceLane(int32_t);
- const Operator* Float32x4ExtractLane(int32_t);
- const Operator* Float32x4ReplaceLane(int32_t);
-
// Constructs function info for frame state construction.
const FrameStateFunctionInfo* CreateFrameStateFunctionInfo(
FrameStateType type, int parameter_count, int local_count,
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index b88906cfc1..865e909ad8 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -13,6 +13,7 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
#include "src/compiler/schedule.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -633,6 +634,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kChangeTaggedToFloat64:
result = LowerChangeTaggedToFloat64(node);
break;
+ case IrOpcode::kChangeTaggedToTaggedSigned:
+ result = LowerChangeTaggedToTaggedSigned(node);
+ break;
case IrOpcode::kTruncateTaggedToBit:
result = LowerTruncateTaggedToBit(node);
break;
@@ -648,6 +652,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kCheckNumber:
result = LowerCheckNumber(node, frame_state);
break;
+ case IrOpcode::kCheckReceiver:
+ result = LowerCheckReceiver(node, frame_state);
+ break;
case IrOpcode::kCheckString:
result = LowerCheckString(node, frame_state);
break;
@@ -711,8 +718,11 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kCheckedTruncateTaggedToWord32:
result = LowerCheckedTruncateTaggedToWord32(node, frame_state);
break;
- case IrOpcode::kObjectIsCallable:
- result = LowerObjectIsCallable(node);
+ case IrOpcode::kObjectIsDetectableCallable:
+ result = LowerObjectIsDetectableCallable(node);
+ break;
+ case IrOpcode::kObjectIsNonCallable:
+ result = LowerObjectIsNonCallable(node);
break;
case IrOpcode::kObjectIsNumber:
result = LowerObjectIsNumber(node);
@@ -744,6 +754,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kStringFromCodePoint:
result = LowerStringFromCodePoint(node);
break;
+ case IrOpcode::kStringIndexOf:
+ result = LowerStringIndexOf(node);
+ break;
case IrOpcode::kStringCharAt:
result = LowerStringCharAt(node);
break;
@@ -910,70 +923,56 @@ Node* EffectControlLinearizer::LowerTruncateTaggedToBit(Node* node) {
Node* value = node->InputAt(0);
auto if_smi = __ MakeDeferredLabel<1>();
- auto if_not_oddball = __ MakeDeferredLabel<1>();
- auto if_not_string = __ MakeDeferredLabel<1>();
- auto if_not_heapnumber = __ MakeDeferredLabel<1>();
- auto done = __ MakeLabel<5>(MachineRepresentation::kBit);
+ auto if_heapnumber = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<6>(MachineRepresentation::kBit);
Node* zero = __ Int32Constant(0);
Node* fzero = __ Float64Constant(0.0);
+ // Check if {value} is false.
+ __ GotoIf(__ WordEqual(value, __ FalseConstant()), &done, zero);
+
// Check if {value} is a Smi.
Node* check_smi = ObjectIsSmi(value);
__ GotoIf(check_smi, &if_smi);
- // Load the map instance type of {value}.
+ // Check if {value} is the empty string.
+ __ GotoIf(__ WordEqual(value, __ EmptyStringConstant()), &done, zero);
+
+ // Load the map of {value}.
Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
- Node* value_instance_type =
- __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
- // Check if {value} is an Oddball.
- Node* check_oddball =
- __ Word32Equal(value_instance_type, __ Int32Constant(ODDBALL_TYPE));
-
- __ GotoUnless(check_oddball, &if_not_oddball);
- // The only Oddball {value} that is trueish is true itself.
- __ Goto(&done, __ WordEqual(value, __ TrueConstant()));
-
- __ Bind(&if_not_oddball);
- // Check if {value} is a String.
- Node* check_string = __ Int32LessThan(value_instance_type,
- __ Int32Constant(FIRST_NONSTRING_TYPE));
- __ GotoUnless(check_string, &if_not_string);
- // For String {value}, we need to check that the length is not zero.
- Node* value_length = __ LoadField(AccessBuilder::ForStringLength(), value);
- __ Goto(&done, __ Word32Equal(
- __ WordEqual(value_length, __ IntPtrConstant(0)), zero));
-
- __ Bind(&if_not_string);
- // Check if {value} is a HeapNumber.
- Node* check_heapnumber =
- __ Word32Equal(value_instance_type, __ Int32Constant(HEAP_NUMBER_TYPE));
- __ GotoUnless(check_heapnumber, &if_not_heapnumber);
-
- // For HeapNumber {value}, just check that its value is not 0.0, -0.0 or
- // NaN.
- // Load the raw value of {value}.
- Node* value_value = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
- __ Goto(&done, __ Float64LessThan(fzero, __ Float64Abs(value_value)));
-
- // The {value} is either a JSReceiver, a Symbol or some Simd128Value. In
- // those cases we can just the undetectable bit on the map, which will only
- // be set for certain JSReceivers, i.e. document.all.
- __ Bind(&if_not_heapnumber);
-
- // Load the {value} map bit field.
+ // Check if the {value} is undetectable and immediately return false.
Node* value_map_bitfield =
__ LoadField(AccessBuilder::ForMapBitField(), value_map);
- __ Goto(&done, __ Word32Equal(
- __ Word32And(value_map_bitfield,
+ __ GotoUnless(
+ __ Word32Equal(__ Word32And(value_map_bitfield,
__ Int32Constant(1 << Map::kIsUndetectable)),
- zero));
+ zero),
+ &done, zero);
+
+ // Check if {value} is a HeapNumber.
+ __ GotoIf(__ WordEqual(value_map, __ HeapNumberMapConstant()),
+ &if_heapnumber);
+
+ // All other values that reach here are true.
+ __ Goto(&done, __ Int32Constant(1));
+
+ __ Bind(&if_heapnumber);
+ {
+ // For HeapNumber {value}, just check that its value is not 0.0, -0.0 or
+ // NaN.
+ Node* value_value =
+ __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
+ __ Goto(&done, __ Float64LessThan(fzero, __ Float64Abs(value_value)));
+ }
__ Bind(&if_smi);
- // If {value} is a Smi, then we only need to check that it's not zero.
- __ Goto(&done,
- __ Word32Equal(__ WordEqual(value, __ IntPtrConstant(0)), zero));
+ {
+ // If {value} is a Smi, then we only need to check that it's not zero.
+ __ Goto(&done,
+ __ Word32Equal(__ WordEqual(value, __ IntPtrConstant(0)), zero));
+ }
__ Bind(&done);
return done.PhiAt(0);
@@ -1023,6 +1022,27 @@ Node* EffectControlLinearizer::LowerChangeTaggedToFloat64(Node* node) {
return LowerTruncateTaggedToFloat64(node);
}
+Node* EffectControlLinearizer::LowerChangeTaggedToTaggedSigned(Node* node) {
+ Node* value = node->InputAt(0);
+
+ auto if_not_smi = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kWord32);
+
+ Node* check = ObjectIsSmi(value);
+ __ GotoUnless(check, &if_not_smi);
+ __ Goto(&done, value);
+
+ __ Bind(&if_not_smi);
+ STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+ Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
+ vfalse = __ ChangeFloat64ToInt32(vfalse);
+ vfalse = ChangeInt32ToSmi(vfalse);
+ __ Goto(&done, vfalse);
+
+ __ Bind(&done);
+ return done.PhiAt(0);
+}
+
Node* EffectControlLinearizer::LowerTruncateTaggedToFloat64(Node* node) {
Node* value = node->InputAt(0);
@@ -1083,6 +1103,15 @@ Node* EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
// Perform the (deferred) instance migration.
__ Bind(&migrate);
{
+ // If map is not deprecated the migration attempt does not make sense.
+ Node* bitfield3 =
+ __ LoadField(AccessBuilder::ForMapBitField3(), value_map);
+ Node* if_not_deprecated = __ WordEqual(
+ __ Word32And(bitfield3, __ Int32Constant(Map::Deprecated::kMask)),
+ __ Int32Constant(0));
+ __ DeoptimizeIf(DeoptimizeReason::kWrongMap, if_not_deprecated,
+ frame_state);
+
Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
Runtime::FunctionId id = Runtime::kTryMigrateInstance;
CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
@@ -1154,47 +1183,56 @@ Node* EffectControlLinearizer::LowerCheckNumber(Node* node, Node* frame_state) {
return value;
}
-Node* EffectControlLinearizer::LowerCheckString(Node* node, Node* frame_state) {
+Node* EffectControlLinearizer::LowerCheckReceiver(Node* node,
+ Node* frame_state) {
Node* value = node->InputAt(0);
- Node* check0 = ObjectIsSmi(value);
- __ DeoptimizeIf(DeoptimizeReason::kSmi, check0, frame_state);
-
Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
Node* value_instance_type =
__ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
- Node* check1 = __ Uint32LessThan(value_instance_type,
- __ Uint32Constant(FIRST_NONSTRING_TYPE));
- __ DeoptimizeUnless(DeoptimizeReason::kWrongInstanceType, check1,
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ Node* check = __ Uint32LessThanOrEqual(
+ __ Uint32Constant(FIRST_JS_RECEIVER_TYPE), value_instance_type);
+ __ DeoptimizeUnless(DeoptimizeReason::kNotAJavaScriptObject, check,
frame_state);
return value;
}
+Node* EffectControlLinearizer::LowerCheckString(Node* node, Node* frame_state) {
+ Node* value = node->InputAt(0);
+
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* value_instance_type =
+ __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
+
+ Node* check = __ Uint32LessThan(value_instance_type,
+ __ Uint32Constant(FIRST_NONSTRING_TYPE));
+ __ DeoptimizeUnless(DeoptimizeReason::kWrongInstanceType, check, frame_state);
+ return value;
+}
+
Node* EffectControlLinearizer::LowerCheckInternalizedString(Node* node,
Node* frame_state) {
Node* value = node->InputAt(0);
- Node* check0 = ObjectIsSmi(value);
- __ DeoptimizeIf(DeoptimizeReason::kSmi, check0, frame_state);
-
Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
Node* value_instance_type =
__ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
- Node* check1 = __ Word32Equal(
+ Node* check = __ Word32Equal(
__ Word32And(value_instance_type,
__ Int32Constant(kIsNotStringMask | kIsNotInternalizedMask)),
__ Int32Constant(kInternalizedTag));
- __ DeoptimizeUnless(DeoptimizeReason::kWrongInstanceType, check1,
- frame_state);
+ __ DeoptimizeUnless(DeoptimizeReason::kWrongInstanceType, check, frame_state);
return value;
}
Node* EffectControlLinearizer::LowerCheckIf(Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
- __ DeoptimizeUnless(DeoptimizeReason::kNoReason, value, frame_state);
+ __ DeoptimizeUnless(DeoptimizeKind::kEager, DeoptimizeReason::kNoReason,
+ value, frame_state);
return value;
}
@@ -1651,7 +1689,7 @@ Node* EffectControlLinearizer::LowerCheckedTruncateTaggedToWord32(
return done.PhiAt(0);
}
-Node* EffectControlLinearizer::LowerObjectIsCallable(Node* node) {
+Node* EffectControlLinearizer::LowerObjectIsDetectableCallable(Node* node) {
Node* value = node->InputAt(0);
auto if_smi = __ MakeDeferredLabel<1>();
@@ -1677,6 +1715,37 @@ Node* EffectControlLinearizer::LowerObjectIsCallable(Node* node) {
return done.PhiAt(0);
}
+Node* EffectControlLinearizer::LowerObjectIsNonCallable(Node* node) {
+ Node* value = node->InputAt(0);
+
+ auto if_primitive = __ MakeDeferredLabel<2>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kBit);
+
+ Node* check0 = ObjectIsSmi(value);
+ __ GotoIf(check0, &if_primitive);
+
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* value_instance_type =
+ __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ Node* check1 = __ Uint32LessThanOrEqual(
+ __ Uint32Constant(FIRST_JS_RECEIVER_TYPE), value_instance_type);
+ __ GotoUnless(check1, &if_primitive);
+
+ Node* value_bit_field =
+ __ LoadField(AccessBuilder::ForMapBitField(), value_map);
+ Node* check2 = __ Word32Equal(
+ __ Int32Constant(0),
+ __ Word32And(value_bit_field, __ Int32Constant(1 << Map::kIsCallable)));
+ __ Goto(&done, check2);
+
+ __ Bind(&if_primitive);
+ __ Goto(&done, __ Int32Constant(0));
+
+ __ Bind(&done);
+ return done.PhiAt(0);
+}
+
Node* EffectControlLinearizer::LowerObjectIsNumber(Node* node) {
Node* value = node->InputAt(0);
@@ -2011,6 +2080,20 @@ Node* EffectControlLinearizer::LowerStringFromCodePoint(Node* node) {
return done.PhiAt(0);
}
+Node* EffectControlLinearizer::LowerStringIndexOf(Node* node) {
+ Node* subject = node->InputAt(0);
+ Node* search_string = node->InputAt(1);
+ Node* position = node->InputAt(2);
+
+ Callable callable = CodeFactory::StringIndexOf(isolate());
+ Operator::Properties properties = Operator::kEliminatable;
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+ return __ Call(desc, __ HeapConstant(callable.code()), subject, search_string,
+ position, __ NoContextConstant());
+}
+
Node* EffectControlLinearizer::LowerStringComparison(Callable const& callable,
Node* node) {
Node* lhs = node->InputAt(0);
@@ -2217,8 +2300,8 @@ Node* EffectControlLinearizer::LowerMaybeGrowFastElements(Node* node,
auto done = __ MakeLabel<2>(MachineRepresentation::kTagged);
auto done_grow = __ MakeLabel<2>(MachineRepresentation::kTagged);
+ auto if_grow = __ MakeDeferredLabel<1>();
auto if_not_grow = __ MakeLabel<1>();
- auto if_not_grow_backing_store = __ MakeLabel<1>();
Node* check0 = (flags & GrowFastElementsFlag::kHoleyElements)
? __ Uint32LessThanOrEqual(length, index)
@@ -2232,10 +2315,10 @@ Node* EffectControlLinearizer::LowerMaybeGrowFastElements(Node* node,
// Check if we need to grow the {elements} backing store.
Node* check1 = __ Uint32LessThan(index, elements_length);
- __ GotoUnless(check1, &if_not_grow_backing_store);
+ __ GotoUnless(check1, &if_grow);
__ Goto(&done_grow, elements);
- __ Bind(&if_not_grow_backing_store);
+ __ Bind(&if_grow);
// We need to grow the {elements} for {object}.
Operator::Properties properties = Operator::kEliminatable;
Callable callable =
diff --git a/deps/v8/src/compiler/effect-control-linearizer.h b/deps/v8/src/compiler/effect-control-linearizer.h
index 9d991cfb4b..016d6025c1 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.h
+++ b/deps/v8/src/compiler/effect-control-linearizer.h
@@ -51,10 +51,12 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* LowerChangeTaggedToBit(Node* node);
Node* LowerChangeTaggedToInt32(Node* node);
Node* LowerChangeTaggedToUint32(Node* node);
+ Node* LowerChangeTaggedToTaggedSigned(Node* node);
Node* LowerCheckBounds(Node* node, Node* frame_state);
Node* LowerCheckInternalizedString(Node* node, Node* frame_state);
Node* LowerCheckMaps(Node* node, Node* frame_state);
Node* LowerCheckNumber(Node* node, Node* frame_state);
+ Node* LowerCheckReceiver(Node* node, Node* frame_state);
Node* LowerCheckString(Node* node, Node* frame_state);
Node* LowerCheckIf(Node* node, Node* frame_state);
Node* LowerCheckedInt32Add(Node* node, Node* frame_state);
@@ -78,7 +80,8 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* LowerTruncateTaggedToFloat64(Node* node);
Node* LowerTruncateTaggedToWord32(Node* node);
Node* LowerCheckedTruncateTaggedToWord32(Node* node, Node* frame_state);
- Node* LowerObjectIsCallable(Node* node);
+ Node* LowerObjectIsDetectableCallable(Node* node);
+ Node* LowerObjectIsNonCallable(Node* node);
Node* LowerObjectIsNumber(Node* node);
Node* LowerObjectIsReceiver(Node* node);
Node* LowerObjectIsSmi(Node* node);
@@ -91,6 +94,7 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* LowerStringCharCodeAt(Node* node);
Node* LowerStringFromCharCode(Node* node);
Node* LowerStringFromCodePoint(Node* node);
+ Node* LowerStringIndexOf(Node* node);
Node* LowerStringEqual(Node* node);
Node* LowerStringLessThan(Node* node);
Node* LowerStringLessThanOrEqual(Node* node);
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.cc b/deps/v8/src/compiler/escape-analysis-reducer.cc
index 10b7f285a6..c05092e06e 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.cc
+++ b/deps/v8/src/compiler/escape-analysis-reducer.cc
@@ -139,7 +139,6 @@ Reduction EscapeAnalysisReducer::ReduceLoad(Node* node) {
if (escape_analysis()->IsVirtual(
SkipTypeGuards(NodeProperties::GetValueInput(node, 0)))) {
if (Node* rep = escape_analysis()->GetReplacement(node)) {
- isolate()->counters()->turbo_escape_loads_replaced()->Increment();
TRACE("Replaced #%d (%s) with #%d (%s)\n", node->id(),
node->op()->mnemonic(), rep->id(), rep->op()->mnemonic());
rep = MaybeGuard(jsgraph(), zone(), node, rep);
@@ -175,7 +174,6 @@ Reduction EscapeAnalysisReducer::ReduceAllocate(Node* node) {
}
if (escape_analysis()->IsVirtual(node)) {
RelaxEffectsAndControls(node);
- isolate()->counters()->turbo_escape_allocs_replaced()->Increment();
TRACE("Removed allocate #%d from effect chain\n", node->id());
return Changed(node);
}
@@ -382,8 +380,6 @@ void EscapeAnalysisReducer::VerifyReplacement() const {
#endif // DEBUG
}
-Isolate* EscapeAnalysisReducer::isolate() const { return jsgraph_->isolate(); }
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.h b/deps/v8/src/compiler/escape-analysis-reducer.h
index 746d84030e..01c2ae118b 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.h
+++ b/deps/v8/src/compiler/escape-analysis-reducer.h
@@ -49,7 +49,6 @@ class V8_EXPORT_PRIVATE EscapeAnalysisReducer final
JSGraph* jsgraph() const { return jsgraph_; }
EscapeAnalysis* escape_analysis() const { return escape_analysis_; }
Zone* zone() const { return zone_; }
- Isolate* isolate() const;
JSGraph* const jsgraph_;
EscapeAnalysis* escape_analysis_;
diff --git a/deps/v8/src/compiler/escape-analysis.cc b/deps/v8/src/compiler/escape-analysis.cc
index 52c7e74c10..255e74eac1 100644
--- a/deps/v8/src/compiler/escape-analysis.cc
+++ b/deps/v8/src/compiler/escape-analysis.cc
@@ -694,6 +694,15 @@ void EscapeStatusAnalysis::Process(Node* node) {
RevisitInputs(rep);
RevisitUses(rep);
}
+ } else {
+ Node* from = NodeProperties::GetValueInput(node, 0);
+ from = object_analysis_->ResolveReplacement(from);
+ if (SetEscaped(from)) {
+ TRACE("Setting #%d (%s) to escaped because of unresolved load #%i\n",
+ from->id(), from->op()->mnemonic(), node->id());
+ RevisitInputs(from);
+ RevisitUses(from);
+ }
}
RevisitUses(node);
break;
@@ -828,7 +837,9 @@ bool EscapeStatusAnalysis::CheckUsesForEscape(Node* uses, Node* rep,
case IrOpcode::kPlainPrimitiveToFloat64:
case IrOpcode::kStringCharAt:
case IrOpcode::kStringCharCodeAt:
- case IrOpcode::kObjectIsCallable:
+ case IrOpcode::kStringIndexOf:
+ case IrOpcode::kObjectIsDetectableCallable:
+ case IrOpcode::kObjectIsNonCallable:
case IrOpcode::kObjectIsNumber:
case IrOpcode::kObjectIsReceiver:
case IrOpcode::kObjectIsString:
@@ -844,9 +855,9 @@ bool EscapeStatusAnalysis::CheckUsesForEscape(Node* uses, Node* rep,
if (use->op()->EffectInputCount() == 0 &&
uses->op()->EffectInputCount() > 0 &&
!IrOpcode::IsJsOpcode(use->opcode())) {
- TRACE("Encountered unaccounted use by #%d (%s)\n", use->id(),
- use->op()->mnemonic());
- UNREACHABLE();
+ V8_Fatal(__FILE__, __LINE__,
+ "Encountered unaccounted use by #%d (%s)\n", use->id(),
+ use->op()->mnemonic());
}
if (SetEscaped(rep)) {
TRACE("Setting #%d (%s) to escaped because of use by #%d (%s)\n",
@@ -867,6 +878,7 @@ void EscapeStatusAnalysis::ProcessFinishRegion(Node* node) {
}
if (CheckUsesForEscape(node, true)) {
RevisitInputs(node);
+ RevisitUses(node);
}
}
@@ -896,7 +908,7 @@ EscapeAnalysis::EscapeAnalysis(Graph* graph, CommonOperatorBuilder* common,
EscapeAnalysis::~EscapeAnalysis() {}
-void EscapeAnalysis::Run() {
+bool EscapeAnalysis::Run() {
replacements_.resize(graph()->NodeCount());
status_analysis_->AssignAliases();
if (status_analysis_->AliasCount() > 0) {
@@ -905,6 +917,9 @@ void EscapeAnalysis::Run() {
status_analysis_->ResizeStatusVector();
RunObjectAnalysis();
status_analysis_->RunStatusAnalysis();
+ return true;
+ } else {
+ return false;
}
}
diff --git a/deps/v8/src/compiler/escape-analysis.h b/deps/v8/src/compiler/escape-analysis.h
index 34960dde83..52edc4be0b 100644
--- a/deps/v8/src/compiler/escape-analysis.h
+++ b/deps/v8/src/compiler/escape-analysis.h
@@ -26,9 +26,10 @@ class V8_EXPORT_PRIVATE EscapeAnalysis {
EscapeAnalysis(Graph* graph, CommonOperatorBuilder* common, Zone* zone);
~EscapeAnalysis();
- void Run();
+ bool Run();
Node* GetReplacement(Node* node);
+ Node* ResolveReplacement(Node* node);
bool IsVirtual(Node* node);
bool IsEscaped(Node* node);
bool CompareVirtualObjects(Node* left, Node* right);
@@ -59,7 +60,6 @@ class V8_EXPORT_PRIVATE EscapeAnalysis {
Node* node);
Node* replacement(Node* node);
- Node* ResolveReplacement(Node* node);
bool UpdateReplacement(VirtualState* state, Node* node, Node* rep);
VirtualObject* GetVirtualObject(VirtualState* state, Node* node);
diff --git a/deps/v8/src/compiler/frame-elider.cc b/deps/v8/src/compiler/frame-elider.cc
index dd8db83dd5..35d292b4e3 100644
--- a/deps/v8/src/compiler/frame-elider.cc
+++ b/deps/v8/src/compiler/frame-elider.cc
@@ -2,9 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/base/adapters.h"
#include "src/compiler/frame-elider.h"
+#include "src/base/adapters.h"
+
namespace v8 {
namespace internal {
namespace compiler {
diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc
index 235826e746..dbeff87ee0 100644
--- a/deps/v8/src/compiler/graph-assembler.cc
+++ b/deps/v8/src/compiler/graph-assembler.cc
@@ -6,6 +6,7 @@
#include "src/code-factory.h"
#include "src/compiler/linkage.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -157,16 +158,23 @@ Node* GraphAssembler::ToNumber(Node* value) {
Node* GraphAssembler::DeoptimizeIf(DeoptimizeReason reason, Node* condition,
Node* frame_state) {
- return current_control_ = current_effect_ =
- graph()->NewNode(common()->DeoptimizeIf(reason), condition,
- frame_state, current_effect_, current_control_);
+ return current_control_ = current_effect_ = graph()->NewNode(
+ common()->DeoptimizeIf(DeoptimizeKind::kEager, reason), condition,
+ frame_state, current_effect_, current_control_);
+}
+
+Node* GraphAssembler::DeoptimizeUnless(DeoptimizeKind kind,
+ DeoptimizeReason reason, Node* condition,
+ Node* frame_state) {
+ return current_control_ = current_effect_ = graph()->NewNode(
+ common()->DeoptimizeUnless(kind, reason), condition, frame_state,
+ current_effect_, current_control_);
}
Node* GraphAssembler::DeoptimizeUnless(DeoptimizeReason reason, Node* condition,
Node* frame_state) {
- return current_control_ = current_effect_ =
- graph()->NewNode(common()->DeoptimizeUnless(reason), condition,
- frame_state, current_effect_, current_control_);
+ return DeoptimizeUnless(DeoptimizeKind::kEager, reason, condition,
+ frame_state);
}
void GraphAssembler::Branch(Node* condition,
diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h
index 61f8f5b61d..057e78184e 100644
--- a/deps/v8/src/compiler/graph-assembler.h
+++ b/deps/v8/src/compiler/graph-assembler.h
@@ -275,6 +275,8 @@ class GraphAssembler {
Node* DeoptimizeIf(DeoptimizeReason reason, Node* condition,
Node* frame_state);
+ Node* DeoptimizeUnless(DeoptimizeKind kind, DeoptimizeReason reason,
+ Node* condition, Node* frame_state);
Node* DeoptimizeUnless(DeoptimizeReason reason, Node* condition,
Node* frame_state);
template <typename... Args>
diff --git a/deps/v8/src/compiler/graph-visualizer.cc b/deps/v8/src/compiler/graph-visualizer.cc
index 1043c91e2a..2cd10a744d 100644
--- a/deps/v8/src/compiler/graph-visualizer.cc
+++ b/deps/v8/src/compiler/graph-visualizer.cc
@@ -22,6 +22,7 @@
#include "src/compiler/schedule.h"
#include "src/compiler/scheduler.h"
#include "src/interpreter/bytecodes.h"
+#include "src/objects-inl.h"
#include "src/ostreams.h"
namespace v8 {
@@ -34,9 +35,15 @@ std::unique_ptr<char[]> GetVisualizerLogFileName(CompilationInfo* info,
EmbeddedVector<char, 256> filename(0);
std::unique_ptr<char[]> debug_name = info->GetDebugName();
if (strlen(debug_name.get()) > 0) {
- SNPrintF(filename, "turbo-%s", debug_name.get());
+ if (info->has_shared_info()) {
+ int attempt = info->shared_info()->opt_count();
+ SNPrintF(filename, "turbo-%s-%i", debug_name.get(), attempt);
+ } else {
+ SNPrintF(filename, "turbo-%s", debug_name.get());
+ }
} else if (info->has_shared_info()) {
- SNPrintF(filename, "turbo-%p", static_cast<void*>(info));
+ int attempt = info->shared_info()->opt_count();
+ SNPrintF(filename, "turbo-%p-%i", static_cast<void*>(info), attempt);
} else {
SNPrintF(filename, "turbo-none-%s", phase);
}
diff --git a/deps/v8/src/compiler/graph.h b/deps/v8/src/compiler/graph.h
index 1e861c7b15..6fb7cfa644 100644
--- a/deps/v8/src/compiler/graph.h
+++ b/deps/v8/src/compiler/graph.h
@@ -104,6 +104,59 @@ class V8_EXPORT_PRIVATE Graph final : public NON_EXPORTED_BASE(ZoneObject) {
Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7, n8, n9};
return NewNode(op, arraysize(nodes), nodes);
}
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+ Node* n5, Node* n6, Node* n7, Node* n8, Node* n9, Node* n10) {
+ Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7, n8, n9, n10};
+ return NewNode(op, arraysize(nodes), nodes);
+ }
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+ Node* n5, Node* n6, Node* n7, Node* n8, Node* n9, Node* n10,
+ Node* n11) {
+ Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7, n8, n9, n10, n11};
+ return NewNode(op, arraysize(nodes), nodes);
+ }
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+ Node* n5, Node* n6, Node* n7, Node* n8, Node* n9, Node* n10,
+ Node* n11, Node* n12) {
+ Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7, n8, n9, n10, n11, n12};
+ return NewNode(op, arraysize(nodes), nodes);
+ }
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+ Node* n5, Node* n6, Node* n7, Node* n8, Node* n9, Node* n10,
+ Node* n11, Node* n12, Node* n13) {
+ Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7, n8, n9, n10, n11, n12, n13};
+ return NewNode(op, arraysize(nodes), nodes);
+ }
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+ Node* n5, Node* n6, Node* n7, Node* n8, Node* n9, Node* n10,
+ Node* n11, Node* n12, Node* n13, Node* n14) {
+ Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7,
+ n8, n9, n10, n11, n12, n13, n14};
+ return NewNode(op, arraysize(nodes), nodes);
+ }
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+ Node* n5, Node* n6, Node* n7, Node* n8, Node* n9, Node* n10,
+ Node* n11, Node* n12, Node* n13, Node* n14, Node* n15) {
+ Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7, n8,
+ n9, n10, n11, n12, n13, n14, n15};
+ return NewNode(op, arraysize(nodes), nodes);
+ }
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+ Node* n5, Node* n6, Node* n7, Node* n8, Node* n9, Node* n10,
+ Node* n11, Node* n12, Node* n13, Node* n14, Node* n15,
+ Node* n16) {
+ Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7, n8,
+ n9, n10, n11, n12, n13, n14, n15, n16};
+ return NewNode(op, arraysize(nodes), nodes);
+ }
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+ Node* n5, Node* n6, Node* n7, Node* n8, Node* n9, Node* n10,
+ Node* n11, Node* n12, Node* n13, Node* n14, Node* n15,
+ Node* n16, Node* n17) {
+ Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7, n8, n9,
+ n10, n11, n12, n13, n14, n15, n16, n17};
+ return NewNode(op, arraysize(nodes), nodes);
+ }
// Clone the {node}, and assign a new node id to the copy.
Node* CloneNode(const Node* node);
diff --git a/deps/v8/src/compiler/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
index e004896ea2..369699067e 100644
--- a/deps/v8/src/compiler/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
@@ -183,9 +183,9 @@ bool HasImmediateInput(Instruction* instr, size_t index) {
return instr->InputAt(index)->IsImmediate();
}
-class OutOfLineLoadInteger final : public OutOfLineCode {
+class OutOfLineLoadZero final : public OutOfLineCode {
public:
- OutOfLineLoadInteger(CodeGenerator* gen, Register result)
+ OutOfLineLoadZero(CodeGenerator* gen, Register result)
: OutOfLineCode(gen), result_(result) {}
void Generate() final { __ xor_(result_, result_); }
@@ -283,66 +283,423 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
} // namespace
-#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, OutOfLineLoadNaN) \
- do { \
- auto result = i.OutputDoubleRegister(); \
- auto offset = i.InputRegister(0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- OutOfLineCode* ool = new (zone()) OutOfLineLoadNaN(this, result); \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(result, i.MemoryOperand(2)); \
- __ bind(ool->exit()); \
+#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, OutOfLineLoadNaN, \
+ SingleOrDouble) \
+ do { \
+ auto result = i.OutputDoubleRegister(); \
+ if (instr->InputAt(0)->IsRegister()) { \
+ auto offset = i.InputRegister(0); \
+ if (instr->InputAt(1)->IsRegister()) { \
+ __ cmp(offset, i.InputRegister(1)); \
+ } else { \
+ __ cmp(offset, i.InputImmediate(1)); \
+ } \
+ OutOfLineCode* ool = new (zone()) OutOfLineLoadNaN(this, result); \
+ __ j(above_equal, ool->entry()); \
+ __ asm_instr(result, i.MemoryOperand(2)); \
+ __ bind(ool->exit()); \
+ } else { \
+ auto index2 = i.InputInt32(0); \
+ auto length = i.InputInt32(1); \
+ auto index1 = i.InputRegister(2); \
+ RelocInfo::Mode rmode_length = i.ToConstant(instr->InputAt(1)).rmode(); \
+ RelocInfo::Mode rmode_buffer = i.ToConstant(instr->InputAt(3)).rmode(); \
+ DCHECK_LE(index2, length); \
+ __ cmp(index1, Immediate(reinterpret_cast<Address>(length - index2), \
+ rmode_length)); \
+ class OutOfLineLoadFloat final : public OutOfLineCode { \
+ public: \
+ OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result, \
+ Register buffer, Register index1, int32_t index2, \
+ int32_t length, RelocInfo::Mode rmode_length, \
+ RelocInfo::Mode rmode_buffer) \
+ : OutOfLineCode(gen), \
+ result_(result), \
+ buffer_reg_(buffer), \
+ buffer_int_(0), \
+ index1_(index1), \
+ index2_(index2), \
+ length_(length), \
+ rmode_length_(rmode_length), \
+ rmode_buffer_(rmode_buffer) {} \
+ \
+ OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result, \
+ int32_t buffer, Register index1, int32_t index2, \
+ int32_t length, RelocInfo::Mode rmode_length, \
+ RelocInfo::Mode rmode_buffer) \
+ : OutOfLineCode(gen), \
+ result_(result), \
+ buffer_reg_({-1}), \
+ buffer_int_(buffer), \
+ index1_(index1), \
+ index2_(index2), \
+ length_(length), \
+ rmode_length_(rmode_length), \
+ rmode_buffer_(rmode_buffer) {} \
+ \
+ void Generate() final { \
+ Label oob; \
+ __ push(index1_); \
+ __ lea(index1_, Operand(index1_, index2_)); \
+ __ cmp(index1_, Immediate(reinterpret_cast<Address>(length_), \
+ rmode_length_)); \
+ __ j(above_equal, &oob, Label::kNear); \
+ if (buffer_reg_.is_valid()) { \
+ __ asm_instr(result_, Operand(buffer_reg_, index1_, times_1, 0)); \
+ } else { \
+ __ asm_instr(result_, \
+ Operand(index1_, buffer_int_, rmode_buffer_)); \
+ } \
+ __ pop(index1_); \
+ __ jmp(exit()); \
+ __ bind(&oob); \
+ __ pop(index1_); \
+ __ xorp##SingleOrDouble(result_, result_); \
+ __ divs##SingleOrDouble(result_, result_); \
+ } \
+ \
+ private: \
+ XMMRegister const result_; \
+ Register const buffer_reg_; \
+ int32_t const buffer_int_; \
+ Register const index1_; \
+ int32_t const index2_; \
+ int32_t const length_; \
+ RelocInfo::Mode rmode_length_; \
+ RelocInfo::Mode rmode_buffer_; \
+ }; \
+ if (instr->InputAt(3)->IsRegister()) { \
+ auto buffer = i.InputRegister(3); \
+ OutOfLineCode* ool = new (zone()) \
+ OutOfLineLoadFloat(this, result, buffer, index1, index2, length, \
+ rmode_length, rmode_buffer); \
+ __ j(above_equal, ool->entry()); \
+ __ asm_instr(result, Operand(buffer, index1, times_1, index2)); \
+ __ bind(ool->exit()); \
+ } else { \
+ auto buffer = i.InputInt32(3); \
+ OutOfLineCode* ool = new (zone()) \
+ OutOfLineLoadFloat(this, result, buffer, index1, index2, length, \
+ rmode_length, rmode_buffer); \
+ __ j(above_equal, ool->entry()); \
+ __ asm_instr(result, Operand(index1, buffer + index2, rmode_buffer)); \
+ __ bind(ool->exit()); \
+ } \
+ } \
} while (false)
-#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
- do { \
- auto result = i.OutputRegister(); \
- auto offset = i.InputRegister(0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- OutOfLineCode* ool = new (zone()) OutOfLineLoadInteger(this, result); \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(result, i.MemoryOperand(2)); \
- __ bind(ool->exit()); \
+#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
+ do { \
+ auto result = i.OutputRegister(); \
+ if (instr->InputAt(0)->IsRegister()) { \
+ auto offset = i.InputRegister(0); \
+ if (instr->InputAt(1)->IsRegister()) { \
+ __ cmp(offset, i.InputRegister(1)); \
+ } else { \
+ __ cmp(offset, i.InputImmediate(1)); \
+ } \
+ OutOfLineCode* ool = new (zone()) OutOfLineLoadZero(this, result); \
+ __ j(above_equal, ool->entry()); \
+ __ asm_instr(result, i.MemoryOperand(2)); \
+ __ bind(ool->exit()); \
+ } else { \
+ auto index2 = i.InputInt32(0); \
+ auto length = i.InputInt32(1); \
+ auto index1 = i.InputRegister(2); \
+ RelocInfo::Mode rmode_length = i.ToConstant(instr->InputAt(1)).rmode(); \
+ RelocInfo::Mode rmode_buffer = i.ToConstant(instr->InputAt(3)).rmode(); \
+ DCHECK_LE(index2, length); \
+ __ cmp(index1, Immediate(reinterpret_cast<Address>(length - index2), \
+ rmode_length)); \
+ class OutOfLineLoadInteger final : public OutOfLineCode { \
+ public: \
+ OutOfLineLoadInteger(CodeGenerator* gen, Register result, \
+ Register buffer, Register index1, int32_t index2, \
+ int32_t length, RelocInfo::Mode rmode_length, \
+ RelocInfo::Mode rmode_buffer) \
+ : OutOfLineCode(gen), \
+ result_(result), \
+ buffer_reg_(buffer), \
+ buffer_int_(0), \
+ index1_(index1), \
+ index2_(index2), \
+ length_(length), \
+ rmode_length_(rmode_length), \
+ rmode_buffer_(rmode_buffer) {} \
+ \
+ OutOfLineLoadInteger(CodeGenerator* gen, Register result, \
+ int32_t buffer, Register index1, int32_t index2, \
+ int32_t length, RelocInfo::Mode rmode_length, \
+ RelocInfo::Mode rmode_buffer) \
+ : OutOfLineCode(gen), \
+ result_(result), \
+ buffer_reg_({-1}), \
+ buffer_int_(buffer), \
+ index1_(index1), \
+ index2_(index2), \
+ length_(length), \
+ rmode_length_(rmode_length), \
+ rmode_buffer_(rmode_buffer) {} \
+ \
+ void Generate() final { \
+ Label oob; \
+ bool need_cache = !result_.is(index1_); \
+ if (need_cache) __ push(index1_); \
+ __ lea(index1_, Operand(index1_, index2_)); \
+ __ cmp(index1_, Immediate(reinterpret_cast<Address>(length_), \
+ rmode_length_)); \
+ __ j(above_equal, &oob, Label::kNear); \
+ if (buffer_reg_.is_valid()) { \
+ __ asm_instr(result_, Operand(buffer_reg_, index1_, times_1, 0)); \
+ } else { \
+ __ asm_instr(result_, \
+ Operand(index1_, buffer_int_, rmode_buffer_)); \
+ } \
+ if (need_cache) __ pop(index1_); \
+ __ jmp(exit()); \
+ __ bind(&oob); \
+ if (need_cache) __ pop(index1_); \
+ __ xor_(result_, result_); \
+ } \
+ \
+ private: \
+ Register const result_; \
+ Register const buffer_reg_; \
+ int32_t const buffer_int_; \
+ Register const index1_; \
+ int32_t const index2_; \
+ int32_t const length_; \
+ RelocInfo::Mode rmode_length_; \
+ RelocInfo::Mode rmode_buffer_; \
+ }; \
+ if (instr->InputAt(3)->IsRegister()) { \
+ auto buffer = i.InputRegister(3); \
+ OutOfLineCode* ool = new (zone()) \
+ OutOfLineLoadInteger(this, result, buffer, index1, index2, length, \
+ rmode_length, rmode_buffer); \
+ __ j(above_equal, ool->entry()); \
+ __ asm_instr(result, Operand(buffer, index1, times_1, index2)); \
+ __ bind(ool->exit()); \
+ } else { \
+ auto buffer = i.InputInt32(3); \
+ OutOfLineCode* ool = new (zone()) \
+ OutOfLineLoadInteger(this, result, buffer, index1, index2, length, \
+ rmode_length, rmode_buffer); \
+ __ j(above_equal, ool->entry()); \
+ __ asm_instr(result, Operand(index1, buffer + index2, rmode_buffer)); \
+ __ bind(ool->exit()); \
+ } \
+ } \
+ } while (false)
+
+#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr) \
+ do { \
+ auto value = i.InputDoubleRegister(2); \
+ if (instr->InputAt(0)->IsRegister()) { \
+ auto offset = i.InputRegister(0); \
+ if (instr->InputAt(1)->IsRegister()) { \
+ __ cmp(offset, i.InputRegister(1)); \
+ } else { \
+ __ cmp(offset, i.InputImmediate(1)); \
+ } \
+ Label done; \
+ __ j(above_equal, &done, Label::kNear); \
+ __ asm_instr(i.MemoryOperand(3), value); \
+ __ bind(&done); \
+ } else { \
+ auto index2 = i.InputInt32(0); \
+ auto length = i.InputInt32(1); \
+ auto index1 = i.InputRegister(3); \
+ RelocInfo::Mode rmode_length = i.ToConstant(instr->InputAt(1)).rmode(); \
+ RelocInfo::Mode rmode_buffer = i.ToConstant(instr->InputAt(4)).rmode(); \
+ DCHECK_LE(index2, length); \
+ __ cmp(index1, Immediate(reinterpret_cast<Address>(length - index2), \
+ rmode_length)); \
+ class OutOfLineStoreFloat final : public OutOfLineCode { \
+ public: \
+ OutOfLineStoreFloat(CodeGenerator* gen, Register buffer, \
+ Register index1, int32_t index2, int32_t length, \
+ XMMRegister value, RelocInfo::Mode rmode_length, \
+ RelocInfo::Mode rmode_buffer) \
+ : OutOfLineCode(gen), \
+ buffer_reg_(buffer), \
+ buffer_int_(0), \
+ index1_(index1), \
+ index2_(index2), \
+ length_(length), \
+ value_(value), \
+ rmode_length_(rmode_length), \
+ rmode_buffer_(rmode_buffer) {} \
+ \
+ OutOfLineStoreFloat(CodeGenerator* gen, int32_t buffer, \
+ Register index1, int32_t index2, int32_t length, \
+ XMMRegister value, RelocInfo::Mode rmode_length, \
+ RelocInfo::Mode rmode_buffer) \
+ : OutOfLineCode(gen), \
+ buffer_reg_({-1}), \
+ buffer_int_(buffer), \
+ index1_(index1), \
+ index2_(index2), \
+ length_(length), \
+ value_(value), \
+ rmode_length_(rmode_length), \
+ rmode_buffer_(rmode_buffer) {} \
+ \
+ void Generate() final { \
+ Label oob; \
+ __ push(index1_); \
+ __ lea(index1_, Operand(index1_, index2_)); \
+ __ cmp(index1_, Immediate(reinterpret_cast<Address>(length_), \
+ rmode_length_)); \
+ __ j(above_equal, &oob, Label::kNear); \
+ if (buffer_reg_.is_valid()) { \
+ __ asm_instr(Operand(buffer_reg_, index1_, times_1, 0), value_); \
+ } else { \
+ __ asm_instr(Operand(index1_, buffer_int_, rmode_buffer_), \
+ value_); \
+ } \
+ __ bind(&oob); \
+ __ pop(index1_); \
+ } \
+ \
+ private: \
+ Register const buffer_reg_; \
+ int32_t const buffer_int_; \
+ Register const index1_; \
+ int32_t const index2_; \
+ int32_t const length_; \
+ XMMRegister const value_; \
+ RelocInfo::Mode rmode_length_; \
+ RelocInfo::Mode rmode_buffer_; \
+ }; \
+ if (instr->InputAt(4)->IsRegister()) { \
+ auto buffer = i.InputRegister(4); \
+ OutOfLineCode* ool = new (zone()) \
+ OutOfLineStoreFloat(this, buffer, index1, index2, length, value, \
+ rmode_length, rmode_buffer); \
+ __ j(above_equal, ool->entry()); \
+ __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
+ __ bind(ool->exit()); \
+ } else { \
+ auto buffer = i.InputInt32(4); \
+ OutOfLineCode* ool = new (zone()) \
+ OutOfLineStoreFloat(this, buffer, index1, index2, length, value, \
+ rmode_length, rmode_buffer); \
+ __ j(above_equal, ool->entry()); \
+ __ asm_instr(Operand(index1, buffer + index2, rmode_buffer), value); \
+ __ bind(ool->exit()); \
+ } \
+ } \
} while (false)
-#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr) \
- do { \
- auto offset = i.InputRegister(0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- Label done; \
- __ j(above_equal, &done, Label::kNear); \
- __ asm_instr(i.MemoryOperand(3), i.InputDoubleRegister(2)); \
- __ bind(&done); \
+#define ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Value) \
+ do { \
+ if (instr->InputAt(0)->IsRegister()) { \
+ auto offset = i.InputRegister(0); \
+ if (instr->InputAt(1)->IsRegister()) { \
+ __ cmp(offset, i.InputRegister(1)); \
+ } else { \
+ __ cmp(offset, i.InputImmediate(1)); \
+ } \
+ Label done; \
+ __ j(above_equal, &done, Label::kNear); \
+ __ asm_instr(i.MemoryOperand(3), value); \
+ __ bind(&done); \
+ } else { \
+ auto index2 = i.InputInt32(0); \
+ auto length = i.InputInt32(1); \
+ auto index1 = i.InputRegister(3); \
+ RelocInfo::Mode rmode_length = i.ToConstant(instr->InputAt(1)).rmode(); \
+ RelocInfo::Mode rmode_buffer = i.ToConstant(instr->InputAt(4)).rmode(); \
+ DCHECK_LE(index2, length); \
+ __ cmp(index1, Immediate(reinterpret_cast<Address>(length - index2), \
+ rmode_length)); \
+ class OutOfLineStoreInteger final : public OutOfLineCode { \
+ public: \
+ OutOfLineStoreInteger(CodeGenerator* gen, Register buffer, \
+ Register index1, int32_t index2, int32_t length, \
+ Value value, RelocInfo::Mode rmode_length, \
+ RelocInfo::Mode rmode_buffer) \
+ : OutOfLineCode(gen), \
+ buffer_reg_(buffer), \
+ buffer_int_(0), \
+ index1_(index1), \
+ index2_(index2), \
+ length_(length), \
+ value_(value), \
+ rmode_length_(rmode_length), \
+ rmode_buffer_(rmode_buffer) {} \
+ \
+ OutOfLineStoreInteger(CodeGenerator* gen, int32_t buffer, \
+ Register index1, int32_t index2, int32_t length, \
+ Value value, RelocInfo::Mode rmode_length, \
+ RelocInfo::Mode rmode_buffer) \
+ : OutOfLineCode(gen), \
+ buffer_reg_({-1}), \
+ buffer_int_(buffer), \
+ index1_(index1), \
+ index2_(index2), \
+ length_(length), \
+ value_(value), \
+ rmode_length_(rmode_length), \
+ rmode_buffer_(rmode_buffer) {} \
+ \
+ void Generate() final { \
+ Label oob; \
+ __ push(index1_); \
+ __ lea(index1_, Operand(index1_, index2_)); \
+ __ cmp(index1_, Immediate(reinterpret_cast<Address>(length_), \
+ rmode_length_)); \
+ __ j(above_equal, &oob, Label::kNear); \
+ if (buffer_reg_.is_valid()) { \
+ __ asm_instr(Operand(buffer_reg_, index1_, times_1, 0), value_); \
+ } else { \
+ __ asm_instr(Operand(index1_, buffer_int_, rmode_buffer_), \
+ value_); \
+ } \
+ __ bind(&oob); \
+ __ pop(index1_); \
+ } \
+ \
+ private: \
+ Register const buffer_reg_; \
+ int32_t const buffer_int_; \
+ Register const index1_; \
+ int32_t const index2_; \
+ int32_t const length_; \
+ Value const value_; \
+ RelocInfo::Mode rmode_length_; \
+ RelocInfo::Mode rmode_buffer_; \
+ }; \
+ if (instr->InputAt(4)->IsRegister()) { \
+ auto buffer = i.InputRegister(4); \
+ OutOfLineCode* ool = new (zone()) \
+ OutOfLineStoreInteger(this, buffer, index1, index2, length, value, \
+ rmode_length, rmode_buffer); \
+ __ j(above_equal, ool->entry()); \
+ __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
+ __ bind(ool->exit()); \
+ } else { \
+ auto buffer = i.InputInt32(4); \
+ OutOfLineCode* ool = new (zone()) \
+ OutOfLineStoreInteger(this, buffer, index1, index2, length, value, \
+ rmode_length, rmode_buffer); \
+ __ j(above_equal, ool->entry()); \
+ __ asm_instr(Operand(index1, buffer + index2, rmode_buffer), value); \
+ __ bind(ool->exit()); \
+ } \
+ } \
} while (false)
-#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
- do { \
- auto offset = i.InputRegister(0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- Label done; \
- __ j(above_equal, &done, Label::kNear); \
- if (instr->InputAt(2)->IsRegister()) { \
- __ asm_instr(i.MemoryOperand(3), i.InputRegister(2)); \
- } else { \
- __ asm_instr(i.MemoryOperand(3), i.InputImmediate(2)); \
- } \
- __ bind(&done); \
+#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
+ do { \
+ if (instr->InputAt(2)->IsRegister()) { \
+ Register value = i.InputRegister(2); \
+ ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Register); \
+ } else { \
+ Immediate value = i.InputImmediate(2); \
+ ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Immediate); \
+ } \
} while (false)
#define ASSEMBLE_COMPARE(asm_instr) \
@@ -429,7 +786,7 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
// Check if current frame is an arguments adaptor frame.
__ cmp(Operand(ebp, StandardFrameConstants::kContextOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(not_equal, &done, Label::kNear);
__ push(scratch1);
@@ -636,10 +993,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchDeoptimize: {
int deopt_state_id =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
- Deoptimizer::BailoutType bailout_type =
- Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
- CodeGenResult result = AssembleDeoptimizerCall(
- deopt_state_id, bailout_type, current_source_position_);
+ CodeGenResult result =
+ AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
if (result != kSuccess) return result;
break;
}
@@ -1507,7 +1862,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kIA32Push:
- if (instr->InputAt(0)->IsFPRegister()) {
+ if (AddressingModeField::decode(instr->opcode()) != kMode_None) {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ push(operand);
+ frame_access_state()->IncreaseSPDelta(kFloatSize / kPointerSize);
+ } else if (instr->InputAt(0)->IsFPRegister()) {
__ sub(esp, Immediate(kFloatSize));
__ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
frame_access_state()->IncreaseSPDelta(kFloatSize / kPointerSize);
@@ -1562,10 +1922,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_CHECKED_LOAD_INTEGER(mov);
break;
case kCheckedLoadFloat32:
- ASSEMBLE_CHECKED_LOAD_FLOAT(movss, OutOfLineLoadFloat32NaN);
+ ASSEMBLE_CHECKED_LOAD_FLOAT(movss, OutOfLineLoadFloat32NaN, s);
break;
case kCheckedLoadFloat64:
- ASSEMBLE_CHECKED_LOAD_FLOAT(movsd, OutOfLineLoadFloat64NaN);
+ ASSEMBLE_CHECKED_LOAD_FLOAT(movsd, OutOfLineLoadFloat64NaN, d);
break;
case kCheckedStoreWord8:
ASSEMBLE_CHECKED_STORE_INTEGER(mov_b);
@@ -1688,8 +2048,8 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
void Generate() final {
IA32OperandConverter i(gen_, instr_);
- Runtime::FunctionId trap_id = static_cast<Runtime::FunctionId>(
- i.InputInt32(instr_->InputCount() - 1));
+ Builtins::Name trap_id =
+ static_cast<Builtins::Name>(i.InputInt32(instr_->InputCount() - 1));
bool old_has_frame = __ has_frame();
if (frame_elided_) {
__ set_has_frame(true);
@@ -1697,30 +2057,32 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
}
GenerateCallToTrap(trap_id);
if (frame_elided_) {
- ReferenceMap* reference_map =
- new (gen_->zone()) ReferenceMap(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
- Safepoint::kNoLazyDeopt);
__ set_has_frame(old_has_frame);
}
- if (FLAG_debug_code) {
- __ ud2();
- }
}
private:
- void GenerateCallToTrap(Runtime::FunctionId trap_id) {
- if (trap_id == Runtime::kNumFunctions) {
+ void GenerateCallToTrap(Builtins::Name trap_id) {
+ if (trap_id == Builtins::builtin_count) {
// We cannot test calls to the runtime in cctest/test-run-wasm.
// Therefore we emit a call to C here instead of a call to the runtime.
__ PrepareCallCFunction(0, esi);
__ CallCFunction(
ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
0);
+ __ LeaveFrame(StackFrame::WASM_COMPILED);
+ __ Ret();
} else {
- __ Move(esi, isolate()->native_context());
gen_->AssembleSourcePosition(instr_);
- __ CallRuntime(trap_id);
+ __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
+ RelocInfo::CODE_TARGET);
+ ReferenceMap* reference_map =
+ new (gen_->zone()) ReferenceMap(gen_->zone());
+ gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ if (FLAG_debug_code) {
+ __ ud2();
+ }
}
}
@@ -1807,13 +2169,16 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
}
CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
- int deoptimization_id, Deoptimizer::BailoutType bailout_type,
- SourcePosition pos) {
+ int deoptimization_id, SourcePosition pos) {
+ DeoptimizeKind deoptimization_kind = GetDeoptimizationKind(deoptimization_id);
+ DeoptimizeReason deoptimization_reason =
+ GetDeoptimizationReason(deoptimization_id);
+ Deoptimizer::BailoutType bailout_type =
+ deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
+ : Deoptimizer::EAGER;
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
- DeoptimizeReason deoptimization_reason =
- GetDeoptimizationReason(deoptimization_id);
__ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
__ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
return kSuccess;
diff --git a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
index 5548f55a1e..a5f72c70b2 100644
--- a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
@@ -234,6 +234,9 @@ void InstructionSelector::VisitLoad(Node* node) {
break;
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kSimd1x4: // Fall through.
+ case MachineRepresentation::kSimd1x8: // Fall through.
+ case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -324,6 +327,9 @@ void InstructionSelector::VisitStore(Node* node) {
break;
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kSimd1x4: // Fall through.
+ case MachineRepresentation::kSimd1x8: // Fall through.
+ case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -391,10 +397,37 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kSimd1x4: // Fall through.
+ case MachineRepresentation::kSimd1x8: // Fall through.
+ case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
+ if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
+ Int32BinopMatcher moffset(offset);
+ InstructionOperand buffer_operand = g.CanBeImmediate(buffer)
+ ? g.UseImmediate(buffer)
+ : g.UseRegister(buffer);
+ Int32Matcher mlength(length);
+ if (mlength.HasValue() && moffset.right().HasValue() &&
+ moffset.right().Value() >= 0 &&
+ mlength.Value() >= moffset.right().Value()) {
+ Emit(opcode, g.DefineAsRegister(node),
+ g.UseImmediate(moffset.right().node()), g.UseImmediate(length),
+ g.UseRegister(moffset.left().node()), buffer_operand);
+ return;
+ }
+ IntMatcher<int32_t, IrOpcode::kRelocatableInt32Constant> mmlength(length);
+ if (mmlength.HasValue() && moffset.right().HasValue() &&
+ moffset.right().Value() >= 0 &&
+ mmlength.Value() >= moffset.right().Value()) {
+ Emit(opcode, g.DefineAsRegister(node),
+ g.UseImmediate(moffset.right().node()), g.UseImmediate(length),
+ g.UseRegister(moffset.left().node()), buffer_operand);
+ return;
+ }
+ }
InstructionOperand offset_operand = g.UseRegister(offset);
InstructionOperand length_operand =
g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
@@ -440,6 +473,9 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kSimd1x4: // Fall through.
+ case MachineRepresentation::kSimd1x8: // Fall through.
+ case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -450,6 +486,30 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
rep == MachineRepresentation::kBit)
? g.UseByteRegister(value)
: g.UseRegister(value));
+ if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
+ Int32BinopMatcher moffset(offset);
+ InstructionOperand buffer_operand = g.CanBeImmediate(buffer)
+ ? g.UseImmediate(buffer)
+ : g.UseRegister(buffer);
+ Int32Matcher mlength(length);
+ if (mlength.HasValue() && moffset.right().HasValue() &&
+ moffset.right().Value() >= 0 &&
+ mlength.Value() >= moffset.right().Value()) {
+ Emit(opcode, g.NoOutput(), g.UseImmediate(moffset.right().node()),
+ g.UseImmediate(length), value_operand,
+ g.UseRegister(moffset.left().node()), buffer_operand);
+ return;
+ }
+ IntMatcher<int32_t, IrOpcode::kRelocatableInt32Constant> mmlength(length);
+ if (mmlength.HasValue() && moffset.right().HasValue() &&
+ moffset.right().Value() >= 0 &&
+ mmlength.Value() >= moffset.right().Value()) {
+ Emit(opcode, g.NoOutput(), g.UseImmediate(moffset.right().node()),
+ g.UseImmediate(length), value_operand,
+ g.UseRegister(moffset.left().node()), buffer_operand);
+ return;
+ }
+ }
InstructionOperand offset_operand = g.UseRegister(offset);
InstructionOperand length_operand =
g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
@@ -520,7 +580,7 @@ void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -771,18 +831,83 @@ void InstructionSelector::VisitWord32Ror(Node* node) {
VisitShift(this, node, kIA32Ror);
}
+#define RO_OP_LIST(V) \
+ V(Word32Clz, kIA32Lzcnt) \
+ V(Word32Ctz, kIA32Tzcnt) \
+ V(Word32Popcnt, kIA32Popcnt) \
+ V(ChangeFloat32ToFloat64, kSSEFloat32ToFloat64) \
+ V(RoundInt32ToFloat32, kSSEInt32ToFloat32) \
+ V(ChangeInt32ToFloat64, kSSEInt32ToFloat64) \
+ V(ChangeUint32ToFloat64, kSSEUint32ToFloat64) \
+ V(TruncateFloat32ToInt32, kSSEFloat32ToInt32) \
+ V(TruncateFloat32ToUint32, kSSEFloat32ToUint32) \
+ V(ChangeFloat64ToInt32, kSSEFloat64ToInt32) \
+ V(ChangeFloat64ToUint32, kSSEFloat64ToUint32) \
+ V(TruncateFloat64ToUint32, kSSEFloat64ToUint32) \
+ V(TruncateFloat64ToFloat32, kSSEFloat64ToFloat32) \
+ V(RoundFloat64ToInt32, kSSEFloat64ToInt32) \
+ V(BitcastFloat32ToInt32, kIA32BitcastFI) \
+ V(BitcastInt32ToFloat32, kIA32BitcastIF) \
+ V(Float32Sqrt, kSSEFloat32Sqrt) \
+ V(Float64Sqrt, kSSEFloat64Sqrt) \
+ V(Float64ExtractLowWord32, kSSEFloat64ExtractLowWord32) \
+ V(Float64ExtractHighWord32, kSSEFloat64ExtractHighWord32)
+
+#define RR_OP_LIST(V) \
+ V(TruncateFloat64ToWord32, kArchTruncateDoubleToI) \
+ V(Float32RoundDown, kSSEFloat32Round | MiscField::encode(kRoundDown)) \
+ V(Float64RoundDown, kSSEFloat64Round | MiscField::encode(kRoundDown)) \
+ V(Float32RoundUp, kSSEFloat32Round | MiscField::encode(kRoundUp)) \
+ V(Float64RoundUp, kSSEFloat64Round | MiscField::encode(kRoundUp)) \
+ V(Float32RoundTruncate, kSSEFloat32Round | MiscField::encode(kRoundToZero)) \
+ V(Float64RoundTruncate, kSSEFloat64Round | MiscField::encode(kRoundToZero)) \
+ V(Float32RoundTiesEven, \
+ kSSEFloat32Round | MiscField::encode(kRoundToNearest)) \
+ V(Float64RoundTiesEven, kSSEFloat64Round | MiscField::encode(kRoundToNearest))
+
+#define RRO_FLOAT_OP_LIST(V) \
+ V(Float32Add, kAVXFloat32Add, kSSEFloat32Add) \
+ V(Float64Add, kAVXFloat64Add, kSSEFloat64Add) \
+ V(Float32Sub, kAVXFloat32Sub, kSSEFloat32Sub) \
+ V(Float64Sub, kAVXFloat64Sub, kSSEFloat64Sub) \
+ V(Float32Mul, kAVXFloat32Mul, kSSEFloat32Mul) \
+ V(Float64Mul, kAVXFloat64Mul, kSSEFloat64Mul) \
+ V(Float32Div, kAVXFloat32Div, kSSEFloat32Div) \
+ V(Float64Div, kAVXFloat64Div, kSSEFloat64Div)
+
+#define FLOAT_UNOP_LIST(V) \
+ V(Float32Abs, kAVXFloat32Abs, kSSEFloat32Abs) \
+ V(Float64Abs, kAVXFloat64Abs, kSSEFloat64Abs) \
+ V(Float32Neg, kAVXFloat32Neg, kSSEFloat32Neg) \
+ V(Float64Neg, kAVXFloat64Neg, kSSEFloat64Neg)
+
+#define RO_VISITOR(Name, opcode) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRO(this, node, opcode); \
+ }
+RO_OP_LIST(RO_VISITOR)
+#undef RO_VISITOR
-void InstructionSelector::VisitWord32Clz(Node* node) {
- IA32OperandGenerator g(this);
- Emit(kIA32Lzcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
+#define RR_VISITOR(Name, opcode) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRR(this, node, opcode); \
+ }
+RR_OP_LIST(RR_VISITOR)
+#undef RR_VISITOR
-void InstructionSelector::VisitWord32Ctz(Node* node) {
- IA32OperandGenerator g(this);
- Emit(kIA32Tzcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
+#define RRO_FLOAT_VISITOR(Name, avx, sse) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRROFloat(this, node, avx, sse); \
+ }
+RRO_FLOAT_OP_LIST(RRO_FLOAT_VISITOR)
+#undef RRO_FLOAT_VISITOR
+#define FLOAT_UNOP_VISITOR(Name, avx, sse) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitFloatUnop(this, node, node->InputAt(0), avx, sse); \
+ }
+FLOAT_UNOP_LIST(FLOAT_UNOP_VISITOR)
+#undef FLOAT_UNOP_VISITOR
void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
@@ -790,12 +915,6 @@ void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitWord32Popcnt(Node* node) {
- IA32OperandGenerator g(this);
- Emit(kIA32Popcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
void InstructionSelector::VisitInt32Add(Node* node) {
IA32OperandGenerator g(this);
@@ -890,16 +1009,6 @@ void InstructionSelector::VisitUint32Mod(Node* node) {
}
-void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
- VisitRO(this, node, kSSEFloat32ToFloat64);
-}
-
-
-void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
- VisitRO(this, node, kSSEInt32ToFloat32);
-}
-
-
void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
IA32OperandGenerator g(this);
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
@@ -907,103 +1016,6 @@ void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
arraysize(temps), temps);
}
-
-void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
- VisitRO(this, node, kSSEInt32ToFloat64);
-}
-
-
-void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
- VisitRO(this, node, kSSEUint32ToFloat64);
-}
-
-
-void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
- VisitRO(this, node, kSSEFloat32ToInt32);
-}
-
-
-void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
- VisitRO(this, node, kSSEFloat32ToUint32);
-}
-
-
-void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
- VisitRO(this, node, kSSEFloat64ToInt32);
-}
-
-
-void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
- VisitRO(this, node, kSSEFloat64ToUint32);
-}
-
-void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
- VisitRO(this, node, kSSEFloat64ToUint32);
-}
-
-void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
- VisitRO(this, node, kSSEFloat64ToFloat32);
-}
-
-void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
- VisitRR(this, node, kArchTruncateDoubleToI);
-}
-
-void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
- VisitRO(this, node, kSSEFloat64ToInt32);
-}
-
-
-void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
- IA32OperandGenerator g(this);
- Emit(kIA32BitcastFI, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
- IA32OperandGenerator g(this);
- Emit(kIA32BitcastIF, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitFloat32Add(Node* node) {
- VisitRROFloat(this, node, kAVXFloat32Add, kSSEFloat32Add);
-}
-
-
-void InstructionSelector::VisitFloat64Add(Node* node) {
- VisitRROFloat(this, node, kAVXFloat64Add, kSSEFloat64Add);
-}
-
-
-void InstructionSelector::VisitFloat32Sub(Node* node) {
- VisitRROFloat(this, node, kAVXFloat32Sub, kSSEFloat32Sub);
-}
-
-void InstructionSelector::VisitFloat64Sub(Node* node) {
- VisitRROFloat(this, node, kAVXFloat64Sub, kSSEFloat64Sub);
-}
-
-void InstructionSelector::VisitFloat32Mul(Node* node) {
- VisitRROFloat(this, node, kAVXFloat32Mul, kSSEFloat32Mul);
-}
-
-
-void InstructionSelector::VisitFloat64Mul(Node* node) {
- VisitRROFloat(this, node, kAVXFloat64Mul, kSSEFloat64Mul);
-}
-
-
-void InstructionSelector::VisitFloat32Div(Node* node) {
- VisitRROFloat(this, node, kAVXFloat32Div, kSSEFloat32Div);
-}
-
-
-void InstructionSelector::VisitFloat64Div(Node* node) {
- VisitRROFloat(this, node, kAVXFloat64Div, kSSEFloat64Div);
-}
-
-
void InstructionSelector::VisitFloat64Mod(Node* node) {
IA32OperandGenerator g(this);
InstructionOperand temps[] = {g.TempRegister(eax)};
@@ -1044,80 +1056,10 @@ void InstructionSelector::VisitFloat64Min(Node* node) {
arraysize(temps), temps);
}
-
-void InstructionSelector::VisitFloat32Abs(Node* node) {
- IA32OperandGenerator g(this);
- VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Abs, kSSEFloat32Abs);
-}
-
-
-void InstructionSelector::VisitFloat64Abs(Node* node) {
- IA32OperandGenerator g(this);
- VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Abs, kSSEFloat64Abs);
-}
-
-void InstructionSelector::VisitFloat32Sqrt(Node* node) {
- VisitRO(this, node, kSSEFloat32Sqrt);
-}
-
-
-void InstructionSelector::VisitFloat64Sqrt(Node* node) {
- VisitRO(this, node, kSSEFloat64Sqrt);
-}
-
-
-void InstructionSelector::VisitFloat32RoundDown(Node* node) {
- VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundDown));
-}
-
-
-void InstructionSelector::VisitFloat64RoundDown(Node* node) {
- VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundDown));
-}
-
-
-void InstructionSelector::VisitFloat32RoundUp(Node* node) {
- VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundUp));
-}
-
-
-void InstructionSelector::VisitFloat64RoundUp(Node* node) {
- VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundUp));
-}
-
-
-void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
- VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundToZero));
-}
-
-
-void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
- VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToZero));
-}
-
-
void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
UNREACHABLE();
}
-
-void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
- VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundToNearest));
-}
-
-
-void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
- VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToNearest));
-}
-
-void InstructionSelector::VisitFloat32Neg(Node* node) {
- VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Neg, kSSEFloat32Neg);
-}
-
-void InstructionSelector::VisitFloat64Neg(Node* node) {
- VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Neg, kSSEFloat64Neg);
-}
-
void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
InstructionCode opcode) {
IA32OperandGenerator g(this);
@@ -1159,22 +1101,35 @@ void InstructionSelector::EmitPrepareArguments(
}
} else {
// Push any stack arguments.
+ int effect_level = GetEffectLevel(node);
for (PushParameter input : base::Reversed(*arguments)) {
// Skip any alignment holes in pushed nodes.
+ Node* input_node = input.node();
if (input.node() == nullptr) continue;
- InstructionOperand value =
- g.CanBeImmediate(input.node())
- ? g.UseImmediate(input.node())
- : IsSupported(ATOM) ||
- sequence()->IsFP(GetVirtualRegister(input.node()))
- ? g.UseRegister(input.node())
- : g.Use(input.node());
- if (input.type() == MachineType::Float32()) {
- Emit(kIA32PushFloat32, g.NoOutput(), value);
- } else if (input.type() == MachineType::Float64()) {
- Emit(kIA32PushFloat64, g.NoOutput(), value);
+ if (g.CanBeMemoryOperand(kIA32Push, node, input_node, effect_level)) {
+ InstructionOperand outputs[1];
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ InstructionCode opcode = kIA32Push;
+ AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
+ input_node, inputs, &input_count);
+ opcode |= AddressingModeField::encode(mode);
+ Emit(opcode, 0, outputs, input_count, inputs);
} else {
- Emit(kIA32Push, g.NoOutput(), value);
+ InstructionOperand value =
+ g.CanBeImmediate(input.node())
+ ? g.UseImmediate(input.node())
+ : IsSupported(ATOM) ||
+ sequence()->IsFP(GetVirtualRegister(input.node()))
+ ? g.UseRegister(input.node())
+ : g.Use(input.node());
+ if (input.type() == MachineType::Float32()) {
+ Emit(kIA32PushFloat32, g.NoOutput(), value);
+ } else if (input.type() == MachineType::Float64()) {
+ Emit(kIA32PushFloat64, g.NoOutput(), value);
+ } else {
+ Emit(kIA32Push, g.NoOutput(), value);
+ }
}
}
}
@@ -1207,7 +1162,7 @@ void VisitCompareWithMemoryOperand(InstructionSelector* selector,
selector->Emit(opcode, 0, nullptr, input_count, inputs);
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
- cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->frame_state());
} else if (cont->IsSet()) {
InstructionOperand output = g.DefineAsRegister(cont->result());
selector->Emit(opcode, 1, &output, input_count, inputs);
@@ -1228,8 +1183,8 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
selector->Emit(opcode, g.NoOutput(), left, right,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
- cont->frame_state());
+ selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
+ cont->reason(), cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsByteRegister(cont->result()), left, right);
} else {
@@ -1414,8 +1369,8 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->reason(),
- cont->frame_state());
+ selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->kind(),
+ cont->reason(), cont->frame_state());
} else {
DCHECK(cont->IsSet());
selector->Emit(opcode, g.DefineAsRegister(cont->result()));
@@ -1527,14 +1482,16 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+ DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+ kNotEqual, p.kind(), p.reason(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+ DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+ kEqual, p.kind(), p.reason(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
@@ -1683,19 +1640,6 @@ void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
}
-void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
- IA32OperandGenerator g(this);
- Emit(kSSEFloat64ExtractLowWord32, g.DefineAsRegister(node),
- g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
- IA32OperandGenerator g(this);
- Emit(kSSEFloat64ExtractHighWord32, g.DefineAsRegister(node),
- g.Use(node->InputAt(0)));
-}
-
void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
IA32OperandGenerator g(this);
diff --git a/deps/v8/src/compiler/instruction-codes.h b/deps/v8/src/compiler/instruction-codes.h
index 00b2733b3b..360069c5d5 100644
--- a/deps/v8/src/compiler/instruction-codes.h
+++ b/deps/v8/src/compiler/instruction-codes.h
@@ -206,11 +206,11 @@ typedef int32_t InstructionCode;
// for code generation. We encode the instruction, addressing mode, and flags
// continuation into a single InstructionCode which is stored as part of
// the instruction.
-typedef BitField<ArchOpcode, 0, 8> ArchOpcodeField;
-typedef BitField<AddressingMode, 8, 5> AddressingModeField;
-typedef BitField<FlagsMode, 13, 3> FlagsModeField;
-typedef BitField<FlagsCondition, 16, 5> FlagsConditionField;
-typedef BitField<int, 21, 11> MiscField;
+typedef BitField<ArchOpcode, 0, 9> ArchOpcodeField;
+typedef BitField<AddressingMode, 9, 5> AddressingModeField;
+typedef BitField<FlagsMode, 14, 3> FlagsModeField;
+typedef BitField<FlagsCondition, 17, 5> FlagsConditionField;
+typedef BitField<int, 22, 10> MiscField;
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/instruction-selector-impl.h b/deps/v8/src/compiler/instruction-selector-impl.h
index 1b1fa12e6e..ecda453351 100644
--- a/deps/v8/src/compiler/instruction-selector-impl.h
+++ b/deps/v8/src/compiler/instruction-selector-impl.h
@@ -182,6 +182,21 @@ class OperandGenerator {
sequence()->NextVirtualRegister());
}
+ int AllocateVirtualRegister() { return sequence()->NextVirtualRegister(); }
+
+ InstructionOperand DefineSameAsFirstForVreg(int vreg) {
+ return UnallocatedOperand(UnallocatedOperand::SAME_AS_FIRST_INPUT, vreg);
+ }
+
+ InstructionOperand DefineAsRegistertForVreg(int vreg) {
+ return UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER, vreg);
+ }
+
+ InstructionOperand UseRegisterForVreg(int vreg) {
+ return UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
+ UnallocatedOperand::USED_AT_START, vreg);
+ }
+
InstructionOperand TempDoubleRegister() {
UnallocatedOperand op = UnallocatedOperand(
UnallocatedOperand::MUST_HAVE_REGISTER,
@@ -335,9 +350,10 @@ class FlagsContinuation final {
// Creates a new flags continuation for an eager deoptimization exit.
static FlagsContinuation ForDeoptimize(FlagsCondition condition,
+ DeoptimizeKind kind,
DeoptimizeReason reason,
Node* frame_state) {
- return FlagsContinuation(condition, reason, frame_state);
+ return FlagsContinuation(condition, kind, reason, frame_state);
}
// Creates a new flags continuation for a boolean value.
@@ -360,6 +376,10 @@ class FlagsContinuation final {
DCHECK(!IsNone());
return condition_;
}
+ DeoptimizeKind kind() const {
+ DCHECK(IsDeoptimize());
+ return kind_;
+ }
DeoptimizeReason reason() const {
DCHECK(IsDeoptimize());
return reason_;
@@ -433,10 +453,11 @@ class FlagsContinuation final {
}
private:
- FlagsContinuation(FlagsCondition condition, DeoptimizeReason reason,
- Node* frame_state)
+ FlagsContinuation(FlagsCondition condition, DeoptimizeKind kind,
+ DeoptimizeReason reason, Node* frame_state)
: mode_(kFlags_deoptimize),
condition_(condition),
+ kind_(kind),
reason_(reason),
frame_state_or_result_(frame_state) {
DCHECK_NOT_NULL(frame_state);
@@ -459,7 +480,8 @@ class FlagsContinuation final {
FlagsMode const mode_;
FlagsCondition condition_;
- DeoptimizeReason reason_; // Only value if mode_ == kFlags_deoptimize
+ DeoptimizeKind kind_; // Only valid if mode_ == kFlags_deoptimize
+ DeoptimizeReason reason_; // Only valid if mode_ == kFlags_deoptimize
Node* frame_state_or_result_; // Only valid if mode_ == kFlags_deoptimize
// or mode_ == kFlags_set.
BasicBlock* true_block_; // Only valid if mode_ == kFlags_branch.
diff --git a/deps/v8/src/compiler/instruction-selector.cc b/deps/v8/src/compiler/instruction-selector.cc
index ae96b9106f..57b6028a1b 100644
--- a/deps/v8/src/compiler/instruction-selector.cc
+++ b/deps/v8/src/compiler/instruction-selector.cc
@@ -14,6 +14,7 @@
#include "src/compiler/schedule.h"
#include "src/compiler/state-values-utils.h"
#include "src/deoptimizer.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -452,6 +453,7 @@ InstructionOperand OperandForDeopt(Isolate* isolate, OperandGenerator* g,
return g->UseImmediate(input);
}
+ case IrOpcode::kArgumentsObjectState:
case IrOpcode::kObjectState:
case IrOpcode::kTypedObjectState:
UNREACHABLE();
@@ -507,6 +509,10 @@ size_t InstructionSelector::AddOperandToStateValueDescriptor(
}
switch (input->opcode()) {
+ case IrOpcode::kArgumentsObjectState: {
+ values->PushArguments();
+ return 0;
+ }
case IrOpcode::kObjectState: {
UNREACHABLE();
return 0;
@@ -760,7 +766,8 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
}
int const state_id = sequence()->AddDeoptimizationEntry(
- buffer->frame_state_descriptor, DeoptimizeReason::kNoReason);
+ buffer->frame_state_descriptor, DeoptimizeKind::kEager,
+ DeoptimizeReason::kNoReason);
buffer->instruction_args.push_back(g.TempImmediate(state_id));
StateObjectDeduplicator deduplicator(instruction_zone());
@@ -841,6 +848,7 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
int effect_level = 0;
for (Node* const node : *block) {
+ SetEffectLevel(node, effect_level);
if (node->opcode() == IrOpcode::kStore ||
node->opcode() == IrOpcode::kUnalignedStore ||
node->opcode() == IrOpcode::kCheckedStore ||
@@ -849,7 +857,6 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
node->opcode() == IrOpcode::kProtectedStore) {
++effect_level;
}
- SetEffectLevel(node, effect_level);
}
// We visit the control first, then the nodes in the block, so the block's
@@ -907,6 +914,8 @@ void InstructionSelector::VisitControl(BasicBlock* block) {
if (block->SuccessorCount() > 1) {
for (BasicBlock* const successor : block->successors()) {
for (Node* const node : *successor) {
+ // If this CHECK fails, you might have specified merged variables
+ // for a label with only one predecessor.
CHECK(!IrOpcode::IsPhiOpcode(node->opcode()));
}
}
@@ -1467,9 +1476,9 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kFloat32x4Sub:
return MarkAsSimd128(node), VisitFloat32x4Sub(node);
case IrOpcode::kFloat32x4Equal:
- return MarkAsSimd128(node), VisitFloat32x4Equal(node);
+ return MarkAsSimd1x4(node), VisitFloat32x4Equal(node);
case IrOpcode::kFloat32x4NotEqual:
- return MarkAsSimd128(node), VisitFloat32x4NotEqual(node);
+ return MarkAsSimd1x4(node), VisitFloat32x4NotEqual(node);
case IrOpcode::kCreateInt32x4:
return MarkAsSimd128(node), VisitCreateInt32x4(node);
case IrOpcode::kInt32x4ExtractLane:
@@ -1480,16 +1489,150 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitInt32x4FromFloat32x4(node);
case IrOpcode::kUint32x4FromFloat32x4:
return MarkAsSimd128(node), VisitUint32x4FromFloat32x4(node);
+ case IrOpcode::kInt32x4Neg:
+ return MarkAsSimd128(node), VisitInt32x4Neg(node);
+ case IrOpcode::kInt32x4ShiftLeftByScalar:
+ return MarkAsSimd128(node), VisitInt32x4ShiftLeftByScalar(node);
+ case IrOpcode::kInt32x4ShiftRightByScalar:
+ return MarkAsSimd128(node), VisitInt32x4ShiftRightByScalar(node);
case IrOpcode::kInt32x4Add:
return MarkAsSimd128(node), VisitInt32x4Add(node);
case IrOpcode::kInt32x4Sub:
return MarkAsSimd128(node), VisitInt32x4Sub(node);
+ case IrOpcode::kInt32x4Mul:
+ return MarkAsSimd128(node), VisitInt32x4Mul(node);
+ case IrOpcode::kInt32x4Min:
+ return MarkAsSimd128(node), VisitInt32x4Min(node);
+ case IrOpcode::kInt32x4Max:
+ return MarkAsSimd128(node), VisitInt32x4Max(node);
case IrOpcode::kInt32x4Equal:
- return MarkAsSimd128(node), VisitInt32x4Equal(node);
+ return MarkAsSimd1x4(node), VisitInt32x4Equal(node);
case IrOpcode::kInt32x4NotEqual:
- return MarkAsSimd128(node), VisitInt32x4NotEqual(node);
+ return MarkAsSimd1x4(node), VisitInt32x4NotEqual(node);
+ case IrOpcode::kInt32x4GreaterThan:
+ return MarkAsSimd1x4(node), VisitInt32x4GreaterThan(node);
+ case IrOpcode::kInt32x4GreaterThanOrEqual:
+ return MarkAsSimd1x4(node), VisitInt32x4GreaterThanOrEqual(node);
+ case IrOpcode::kUint32x4ShiftRightByScalar:
+ return MarkAsSimd128(node), VisitUint32x4ShiftRightByScalar(node);
+ case IrOpcode::kUint32x4Min:
+ return MarkAsSimd128(node), VisitUint32x4Min(node);
+ case IrOpcode::kUint32x4Max:
+ return MarkAsSimd128(node), VisitUint32x4Max(node);
+ case IrOpcode::kUint32x4GreaterThan:
+ return MarkAsSimd1x4(node), VisitUint32x4GreaterThan(node);
+ case IrOpcode::kUint32x4GreaterThanOrEqual:
+ return MarkAsSimd1x4(node), VisitUint32x4GreaterThanOrEqual(node);
+ case IrOpcode::kCreateInt16x8:
+ return MarkAsSimd128(node), VisitCreateInt16x8(node);
+ case IrOpcode::kInt16x8ExtractLane:
+ return MarkAsWord32(node), VisitInt16x8ExtractLane(node);
+ case IrOpcode::kInt16x8ReplaceLane:
+ return MarkAsSimd128(node), VisitInt16x8ReplaceLane(node);
+ case IrOpcode::kInt16x8Neg:
+ return MarkAsSimd128(node), VisitInt16x8Neg(node);
+ case IrOpcode::kInt16x8ShiftLeftByScalar:
+ return MarkAsSimd128(node), VisitInt16x8ShiftLeftByScalar(node);
+ case IrOpcode::kInt16x8ShiftRightByScalar:
+ return MarkAsSimd128(node), VisitInt16x8ShiftRightByScalar(node);
+ case IrOpcode::kInt16x8Add:
+ return MarkAsSimd128(node), VisitInt16x8Add(node);
+ case IrOpcode::kInt16x8AddSaturate:
+ return MarkAsSimd128(node), VisitInt16x8AddSaturate(node);
+ case IrOpcode::kInt16x8Sub:
+ return MarkAsSimd128(node), VisitInt16x8Sub(node);
+ case IrOpcode::kInt16x8SubSaturate:
+ return MarkAsSimd128(node), VisitInt16x8SubSaturate(node);
+ case IrOpcode::kInt16x8Mul:
+ return MarkAsSimd128(node), VisitInt16x8Mul(node);
+ case IrOpcode::kInt16x8Min:
+ return MarkAsSimd128(node), VisitInt16x8Min(node);
+ case IrOpcode::kInt16x8Max:
+ return MarkAsSimd128(node), VisitInt16x8Max(node);
+ case IrOpcode::kInt16x8Equal:
+ return MarkAsSimd1x8(node), VisitInt16x8Equal(node);
+ case IrOpcode::kInt16x8NotEqual:
+ return MarkAsSimd1x8(node), VisitInt16x8NotEqual(node);
+ case IrOpcode::kInt16x8GreaterThan:
+ return MarkAsSimd1x8(node), VisitInt16x8GreaterThan(node);
+ case IrOpcode::kInt16x8GreaterThanOrEqual:
+ return MarkAsSimd1x8(node), VisitInt16x8GreaterThanOrEqual(node);
+ case IrOpcode::kUint16x8ShiftRightByScalar:
+ return MarkAsSimd128(node), VisitUint16x8ShiftRightByScalar(node);
+ case IrOpcode::kUint16x8AddSaturate:
+ return MarkAsSimd128(node), VisitUint16x8AddSaturate(node);
+ case IrOpcode::kUint16x8SubSaturate:
+ return MarkAsSimd128(node), VisitUint16x8SubSaturate(node);
+ case IrOpcode::kUint16x8Min:
+ return MarkAsSimd128(node), VisitUint16x8Min(node);
+ case IrOpcode::kUint16x8Max:
+ return MarkAsSimd128(node), VisitUint16x8Max(node);
+ case IrOpcode::kUint16x8GreaterThan:
+ return MarkAsSimd1x8(node), VisitUint16x8GreaterThan(node);
+ case IrOpcode::kUint16x8GreaterThanOrEqual:
+ return MarkAsSimd1x8(node), VisitUint16x8GreaterThanOrEqual(node);
+ case IrOpcode::kCreateInt8x16:
+ return MarkAsSimd128(node), VisitCreateInt8x16(node);
+ case IrOpcode::kInt8x16ExtractLane:
+ return MarkAsWord32(node), VisitInt8x16ExtractLane(node);
+ case IrOpcode::kInt8x16ReplaceLane:
+ return MarkAsSimd128(node), VisitInt8x16ReplaceLane(node);
+ case IrOpcode::kInt8x16Neg:
+ return MarkAsSimd128(node), VisitInt8x16Neg(node);
+ case IrOpcode::kInt8x16ShiftLeftByScalar:
+ return MarkAsSimd128(node), VisitInt8x16ShiftLeftByScalar(node);
+ case IrOpcode::kInt8x16ShiftRightByScalar:
+ return MarkAsSimd128(node), VisitInt8x16ShiftRightByScalar(node);
+ case IrOpcode::kInt8x16Add:
+ return MarkAsSimd128(node), VisitInt8x16Add(node);
+ case IrOpcode::kInt8x16AddSaturate:
+ return MarkAsSimd128(node), VisitInt8x16AddSaturate(node);
+ case IrOpcode::kInt8x16Sub:
+ return MarkAsSimd128(node), VisitInt8x16Sub(node);
+ case IrOpcode::kInt8x16SubSaturate:
+ return MarkAsSimd128(node), VisitInt8x16SubSaturate(node);
+ case IrOpcode::kInt8x16Mul:
+ return MarkAsSimd128(node), VisitInt8x16Mul(node);
+ case IrOpcode::kInt8x16Min:
+ return MarkAsSimd128(node), VisitInt8x16Min(node);
+ case IrOpcode::kInt8x16Max:
+ return MarkAsSimd128(node), VisitInt8x16Max(node);
+ case IrOpcode::kInt8x16Equal:
+ return MarkAsSimd1x16(node), VisitInt8x16Equal(node);
+ case IrOpcode::kInt8x16NotEqual:
+ return MarkAsSimd1x16(node), VisitInt8x16NotEqual(node);
+ case IrOpcode::kInt8x16GreaterThan:
+ return MarkAsSimd1x16(node), VisitInt8x16GreaterThan(node);
+ case IrOpcode::kInt8x16GreaterThanOrEqual:
+ return MarkAsSimd1x16(node), VisitInt8x16GreaterThanOrEqual(node);
+ case IrOpcode::kUint8x16ShiftRightByScalar:
+ return MarkAsSimd128(node), VisitUint8x16ShiftRightByScalar(node);
+ case IrOpcode::kUint8x16AddSaturate:
+ return MarkAsSimd128(node), VisitUint8x16AddSaturate(node);
+ case IrOpcode::kUint8x16SubSaturate:
+ return MarkAsSimd128(node), VisitUint8x16SubSaturate(node);
+ case IrOpcode::kUint8x16Min:
+ return MarkAsSimd128(node), VisitUint8x16Min(node);
+ case IrOpcode::kUint8x16Max:
+ return MarkAsSimd128(node), VisitUint8x16Max(node);
+ case IrOpcode::kUint8x16GreaterThan:
+ return MarkAsSimd1x16(node), VisitUint8x16GreaterThan(node);
+ case IrOpcode::kUint8x16GreaterThanOrEqual:
+ return MarkAsSimd1x16(node), VisitUint16x8GreaterThanOrEqual(node);
+ case IrOpcode::kSimd128And:
+ return MarkAsSimd128(node), VisitSimd128And(node);
+ case IrOpcode::kSimd128Or:
+ return MarkAsSimd128(node), VisitSimd128Or(node);
+ case IrOpcode::kSimd128Xor:
+ return MarkAsSimd128(node), VisitSimd128Xor(node);
+ case IrOpcode::kSimd128Not:
+ return MarkAsSimd128(node), VisitSimd128Not(node);
case IrOpcode::kSimd32x4Select:
return MarkAsSimd128(node), VisitSimd32x4Select(node);
+ case IrOpcode::kSimd16x8Select:
+ return MarkAsSimd128(node), VisitSimd16x8Select(node);
+ case IrOpcode::kSimd8x16Select:
+ return MarkAsSimd128(node), VisitSimd8x16Select(node);
default:
V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
node->opcode(), node->op()->mnemonic(), node->id());
@@ -1790,7 +1933,6 @@ void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
UNIMPLEMENTED();
}
-
void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
UNIMPLEMENTED();
}
@@ -1874,11 +2016,229 @@ void InstructionSelector::VisitUint32x4FromFloat32x4(Node* node) {
UNIMPLEMENTED();
}
+void InstructionSelector::VisitInt32x4Neg(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt32x4ShiftLeftByScalar(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt32x4ShiftRightByScalar(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt32x4Mul(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt32x4Max(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt32x4Min(Node* node) { UNIMPLEMENTED(); }
+
void InstructionSelector::VisitInt32x4Equal(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitInt32x4NotEqual(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitInt32x4LessThan(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt32x4LessThanOrEqual(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt32x4GreaterThan(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt32x4GreaterThanOrEqual(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint32x4ShiftRightByScalar(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint32x4Max(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitUint32x4Min(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitUint32x4GreaterThan(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint32x4GreaterThanOrEqual(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitCreateInt16x8(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt16x8ExtractLane(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt16x8ReplaceLane(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt16x8Neg(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt16x8ShiftLeftByScalar(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt16x8ShiftRightByScalar(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt16x8Add(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt16x8AddSaturate(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt16x8Sub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt16x8SubSaturate(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt16x8Mul(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt16x8Max(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt16x8Min(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt16x8Equal(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt16x8NotEqual(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt16x8LessThan(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt16x8LessThanOrEqual(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt16x8GreaterThan(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt16x8GreaterThanOrEqual(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint16x8ShiftRightByScalar(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint16x8AddSaturate(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint16x8SubSaturate(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint16x8Max(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitUint16x8Min(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitUint16x8GreaterThan(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint16x8GreaterThanOrEqual(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitCreateInt8x16(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt8x16ExtractLane(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt8x16ReplaceLane(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt8x16Neg(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt8x16ShiftLeftByScalar(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt8x16ShiftRightByScalar(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt8x16Add(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt8x16AddSaturate(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt8x16Sub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt8x16SubSaturate(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt8x16Mul(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt8x16Max(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt8x16Min(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt8x16Equal(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt8x16NotEqual(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt8x16LessThan(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt8x16LessThanOrEqual(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt8x16GreaterThan(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt8x16GreaterThanOrEqual(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint8x16ShiftRightByScalar(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint8x16AddSaturate(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint8x16SubSaturate(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint8x16Max(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitUint8x16Min(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitUint8x16GreaterThan(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint8x16GreaterThanOrEqual(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitSimd128And(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitSimd128Or(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitSimd128Xor(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitSimd128Not(Node* node) { UNIMPLEMENTED(); }
+
void InstructionSelector::VisitSimd32x4Select(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitSimd16x8Select(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitSimd8x16Select(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitFinishRegion(Node* node) { EmitIdentity(node); }
@@ -2131,32 +2491,31 @@ void InstructionSelector::VisitReturn(Node* ret) {
Emit(kArchRet, 0, nullptr, input_count, value_locations);
}
-Instruction* InstructionSelector::EmitDeoptimize(InstructionCode opcode,
- InstructionOperand output,
- InstructionOperand a,
- DeoptimizeReason reason,
- Node* frame_state) {
+Instruction* InstructionSelector::EmitDeoptimize(
+ InstructionCode opcode, InstructionOperand output, InstructionOperand a,
+ DeoptimizeKind kind, DeoptimizeReason reason, Node* frame_state) {
size_t output_count = output.IsInvalid() ? 0 : 1;
InstructionOperand inputs[] = {a};
size_t input_count = arraysize(inputs);
return EmitDeoptimize(opcode, output_count, &output, input_count, inputs,
- reason, frame_state);
+ kind, reason, frame_state);
}
Instruction* InstructionSelector::EmitDeoptimize(
InstructionCode opcode, InstructionOperand output, InstructionOperand a,
- InstructionOperand b, DeoptimizeReason reason, Node* frame_state) {
+ InstructionOperand b, DeoptimizeKind kind, DeoptimizeReason reason,
+ Node* frame_state) {
size_t output_count = output.IsInvalid() ? 0 : 1;
InstructionOperand inputs[] = {a, b};
size_t input_count = arraysize(inputs);
return EmitDeoptimize(opcode, output_count, &output, input_count, inputs,
- reason, frame_state);
+ kind, reason, frame_state);
}
Instruction* InstructionSelector::EmitDeoptimize(
InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
- size_t input_count, InstructionOperand* inputs, DeoptimizeReason reason,
- Node* frame_state) {
+ size_t input_count, InstructionOperand* inputs, DeoptimizeKind kind,
+ DeoptimizeReason reason, Node* frame_state) {
OperandGenerator g(this);
FrameStateDescriptor* const descriptor = GetFrameStateDescriptor(frame_state);
InstructionOperandVector args(instruction_zone());
@@ -2165,7 +2524,8 @@ Instruction* InstructionSelector::EmitDeoptimize(
args.push_back(inputs[i]);
}
opcode |= MiscField::encode(static_cast<int>(input_count));
- int const state_id = sequence()->AddDeoptimizationEntry(descriptor, reason);
+ int const state_id =
+ sequence()->AddDeoptimizationEntry(descriptor, kind, reason);
args.push_back(g.TempImmediate(state_id));
StateObjectDeduplicator deduplicator(instruction_zone());
AddInputsToFrameStateDescriptor(descriptor, frame_state, &g, &deduplicator,
@@ -2184,16 +2544,7 @@ void InstructionSelector::EmitIdentity(Node* node) {
void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind,
DeoptimizeReason reason,
Node* value) {
- InstructionCode opcode = kArchDeoptimize;
- switch (kind) {
- case DeoptimizeKind::kEager:
- opcode |= MiscField::encode(Deoptimizer::EAGER);
- break;
- case DeoptimizeKind::kSoft:
- opcode |= MiscField::encode(Deoptimizer::SOFT);
- break;
- }
- EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, reason, value);
+ EmitDeoptimize(kArchDeoptimize, 0, nullptr, 0, nullptr, kind, reason, value);
}
diff --git a/deps/v8/src/compiler/instruction-selector.h b/deps/v8/src/compiler/instruction-selector.h
index b7753ce7b7..d811aa4741 100644
--- a/deps/v8/src/compiler/instruction-selector.h
+++ b/deps/v8/src/compiler/instruction-selector.h
@@ -114,14 +114,15 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
// ===========================================================================
Instruction* EmitDeoptimize(InstructionCode opcode, InstructionOperand output,
- InstructionOperand a, DeoptimizeReason reason,
- Node* frame_state);
+ InstructionOperand a, DeoptimizeKind kind,
+ DeoptimizeReason reason, Node* frame_state);
Instruction* EmitDeoptimize(InstructionCode opcode, InstructionOperand output,
InstructionOperand a, InstructionOperand b,
- DeoptimizeReason reason, Node* frame_state);
+ DeoptimizeKind kind, DeoptimizeReason reason,
+ Node* frame_state);
Instruction* EmitDeoptimize(InstructionCode opcode, size_t output_count,
InstructionOperand* outputs, size_t input_count,
- InstructionOperand* inputs,
+ InstructionOperand* inputs, DeoptimizeKind kind,
DeoptimizeReason reason, Node* frame_state);
// ===========================================================================
@@ -262,6 +263,27 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
void MarkAsSimd128(Node* node) {
MarkAsRepresentation(MachineRepresentation::kSimd128, node);
}
+ void MarkAsSimd1x4(Node* node) {
+ if (kSimdMaskRegisters) {
+ MarkAsRepresentation(MachineRepresentation::kSimd1x4, node);
+ } else {
+ MarkAsSimd128(node);
+ }
+ }
+ void MarkAsSimd1x8(Node* node) {
+ if (kSimdMaskRegisters) {
+ MarkAsRepresentation(MachineRepresentation::kSimd1x8, node);
+ } else {
+ MarkAsSimd128(node);
+ }
+ }
+ void MarkAsSimd1x16(Node* node) {
+ if (kSimdMaskRegisters) {
+ MarkAsRepresentation(MachineRepresentation::kSimd1x16, node);
+ } else {
+ MarkAsSimd128(node);
+ }
+ }
void MarkAsReference(Node* node) {
MarkAsRepresentation(MachineRepresentation::kTagged, node);
}
diff --git a/deps/v8/src/compiler/instruction.cc b/deps/v8/src/compiler/instruction.cc
index c4560b6e76..1067d2030a 100644
--- a/deps/v8/src/compiler/instruction.cc
+++ b/deps/v8/src/compiler/instruction.cc
@@ -2,11 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/compiler/instruction.h"
+
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
-#include "src/compiler/instruction.h"
#include "src/compiler/schedule.h"
#include "src/compiler/state-values-utils.h"
+#include "src/source-position.h"
namespace v8 {
namespace internal {
@@ -208,6 +210,15 @@ std::ostream& operator<<(std::ostream& os,
case MachineRepresentation::kSimd128:
os << "|s128";
break;
+ case MachineRepresentation::kSimd1x4:
+ os << "|s1x4";
+ break;
+ case MachineRepresentation::kSimd1x8:
+ os << "|s1x8";
+ break;
+ case MachineRepresentation::kSimd1x16:
+ os << "|s1x16";
+ break;
case MachineRepresentation::kTaggedSigned:
os << "|ts";
break;
@@ -888,6 +899,9 @@ static MachineRepresentation FilterRepresentation(MachineRepresentation rep) {
case MachineRepresentation::kFloat32:
case MachineRepresentation::kFloat64:
case MachineRepresentation::kSimd128:
+ case MachineRepresentation::kSimd1x4:
+ case MachineRepresentation::kSimd1x8:
+ case MachineRepresentation::kSimd1x16:
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
@@ -926,9 +940,11 @@ void InstructionSequence::MarkAsRepresentation(MachineRepresentation rep,
}
int InstructionSequence::AddDeoptimizationEntry(
- FrameStateDescriptor* descriptor, DeoptimizeReason reason) {
+ FrameStateDescriptor* descriptor, DeoptimizeKind kind,
+ DeoptimizeReason reason) {
int deoptimization_id = static_cast<int>(deoptimization_entries_.size());
- deoptimization_entries_.push_back(DeoptimizationEntry(descriptor, reason));
+ deoptimization_entries_.push_back(
+ DeoptimizationEntry(descriptor, kind, reason));
return deoptimization_id;
}
diff --git a/deps/v8/src/compiler/instruction.h b/deps/v8/src/compiler/instruction.h
index d62ffc43bd..ee7865dec0 100644
--- a/deps/v8/src/compiler/instruction.h
+++ b/deps/v8/src/compiler/instruction.h
@@ -484,6 +484,9 @@ class LocationOperand : public InstructionOperand {
case MachineRepresentation::kFloat32:
case MachineRepresentation::kFloat64:
case MachineRepresentation::kSimd128:
+ case MachineRepresentation::kSimd1x4:
+ case MachineRepresentation::kSimd1x8:
+ case MachineRepresentation::kSimd1x16:
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
@@ -1122,6 +1125,7 @@ std::ostream& operator<<(std::ostream& os, const Constant& constant);
class FrameStateDescriptor;
enum class StateValueKind : uint8_t {
+ kArguments,
kPlain,
kOptimizedOut,
kNested,
@@ -1135,6 +1139,10 @@ class StateValueDescriptor {
type_(MachineType::AnyTagged()),
id_(0) {}
+ static StateValueDescriptor Arguments() {
+ return StateValueDescriptor(StateValueKind::kArguments,
+ MachineType::AnyTagged(), 0);
+ }
static StateValueDescriptor Plain(MachineType type) {
return StateValueDescriptor(StateValueKind::kPlain, type, 0);
}
@@ -1151,10 +1159,11 @@ class StateValueDescriptor {
MachineType::AnyTagged(), id);
}
- int IsPlain() { return kind_ == StateValueKind::kPlain; }
- int IsOptimizedOut() { return kind_ == StateValueKind::kOptimizedOut; }
- int IsNested() { return kind_ == StateValueKind::kNested; }
- int IsDuplicate() { return kind_ == StateValueKind::kDuplicate; }
+ bool IsArguments() const { return kind_ == StateValueKind::kArguments; }
+ bool IsPlain() const { return kind_ == StateValueKind::kPlain; }
+ bool IsOptimizedOut() const { return kind_ == StateValueKind::kOptimizedOut; }
+ bool IsNested() const { return kind_ == StateValueKind::kNested; }
+ bool IsDuplicate() const { return kind_ == StateValueKind::kDuplicate; }
MachineType type() const { return type_; }
size_t id() const { return id_; }
@@ -1223,6 +1232,7 @@ class StateValueList {
nested_.push_back(nested);
return nested;
}
+ void PushArguments() { fields_.push_back(StateValueDescriptor::Arguments()); }
void PushDuplicate(size_t id) {
fields_.push_back(StateValueDescriptor::Duplicate(id));
}
@@ -1289,14 +1299,17 @@ class FrameStateDescriptor : public ZoneObject {
class DeoptimizationEntry final {
public:
DeoptimizationEntry() {}
- DeoptimizationEntry(FrameStateDescriptor* descriptor, DeoptimizeReason reason)
- : descriptor_(descriptor), reason_(reason) {}
+ DeoptimizationEntry(FrameStateDescriptor* descriptor, DeoptimizeKind kind,
+ DeoptimizeReason reason)
+ : descriptor_(descriptor), kind_(kind), reason_(reason) {}
FrameStateDescriptor* descriptor() const { return descriptor_; }
+ DeoptimizeKind kind() const { return kind_; }
DeoptimizeReason reason() const { return reason_; }
private:
FrameStateDescriptor* descriptor_ = nullptr;
+ DeoptimizeKind kind_ = DeoptimizeKind::kEager;
DeoptimizeReason reason_ = DeoptimizeReason::kNoReason;
};
@@ -1556,7 +1569,7 @@ class V8_EXPORT_PRIVATE InstructionSequence final
}
int AddDeoptimizationEntry(FrameStateDescriptor* descriptor,
- DeoptimizeReason reason);
+ DeoptimizeKind kind, DeoptimizeReason reason);
DeoptimizationEntry const& GetDeoptimizationEntry(int deoptimization_id);
int GetDeoptimizationEntryCount() const {
return static_cast<int>(deoptimization_entries_.size());
diff --git a/deps/v8/src/compiler/int64-lowering.cc b/deps/v8/src/compiler/int64-lowering.cc
index ff61aa765d..06c927289e 100644
--- a/deps/v8/src/compiler/int64-lowering.cc
+++ b/deps/v8/src/compiler/int64-lowering.cc
@@ -12,6 +12,7 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
+#include "src/objects-inl.h"
#include "src/wasm/wasm-module.h"
#include "src/zone/zone.h"
@@ -237,7 +238,7 @@ void Int64Lowering::LowerNode(Node* node) {
NodeProperties::ChangeOp(node, store_op);
ReplaceNode(node, node, high_node);
} else {
- DefaultLowering(node);
+ DefaultLowering(node, true);
}
break;
}
@@ -826,7 +827,7 @@ void Int64Lowering::LowerComparison(Node* node, const Operator* high_word_op,
ReplaceNode(node, replacement, nullptr);
}
-bool Int64Lowering::DefaultLowering(Node* node) {
+bool Int64Lowering::DefaultLowering(Node* node, bool low_word_only) {
bool something_changed = false;
for (int i = NodeProperties::PastValueIndex(node) - 1; i >= 0; i--) {
Node* input = node->InputAt(i);
@@ -834,7 +835,7 @@ bool Int64Lowering::DefaultLowering(Node* node) {
something_changed = true;
node->ReplaceInput(i, GetReplacementLow(input));
}
- if (HasReplacementHigh(input)) {
+ if (!low_word_only && HasReplacementHigh(input)) {
something_changed = true;
node->InsertInput(zone(), i + 1, GetReplacementHigh(input));
}
diff --git a/deps/v8/src/compiler/int64-lowering.h b/deps/v8/src/compiler/int64-lowering.h
index 66a54e9c3f..811c2b2046 100644
--- a/deps/v8/src/compiler/int64-lowering.h
+++ b/deps/v8/src/compiler/int64-lowering.h
@@ -47,7 +47,7 @@ class V8_EXPORT_PRIVATE Int64Lowering {
void PrepareReplacements(Node* node);
void PushNode(Node* node);
void LowerNode(Node* node);
- bool DefaultLowering(Node* node);
+ bool DefaultLowering(Node* node, bool low_word_only = false);
void LowerComparison(Node* node, const Operator* signed_op,
const Operator* unsigned_op);
void PrepareProjectionReplacements(Node* node);
diff --git a/deps/v8/src/compiler/js-builtin-reducer.cc b/deps/v8/src/compiler/js-builtin-reducer.cc
index ec1b01a2a1..24eb5cea8e 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.cc
+++ b/deps/v8/src/compiler/js-builtin-reducer.cc
@@ -5,9 +5,11 @@
#include "src/compiler/js-builtin-reducer.h"
#include "src/base/bits.h"
+#include "src/code-factory.h"
#include "src/compilation-dependencies.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/js-graph.h"
+#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
@@ -19,17 +21,16 @@ namespace v8 {
namespace internal {
namespace compiler {
-
-// Helper class to access JSCallFunction nodes that are potential candidates
+// Helper class to access JSCall nodes that are potential candidates
// for reduction when they have a BuiltinFunctionId associated with them.
class JSCallReduction {
public:
explicit JSCallReduction(Node* node) : node_(node) {}
- // Determines whether the node is a JSCallFunction operation that targets a
+ // Determines whether the node is a JSCall operation that targets a
// constant callee being a well-known builtin with a BuiltinFunctionId.
bool HasBuiltinFunctionId() {
- if (node_->opcode() != IrOpcode::kJSCallFunction) return false;
+ if (node_->opcode() != IrOpcode::kJSCall) return false;
HeapObjectMatcher m(NodeProperties::GetValueInput(node_, 0));
if (!m.HasValue() || !m.Value()->IsJSFunction()) return false;
Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
@@ -38,7 +39,7 @@ class JSCallReduction {
// Retrieves the BuiltinFunctionId as described above.
BuiltinFunctionId GetBuiltinFunctionId() {
- DCHECK_EQ(IrOpcode::kJSCallFunction, node_->opcode());
+ DCHECK_EQ(IrOpcode::kJSCall, node_->opcode());
HeapObjectMatcher m(NodeProperties::GetValueInput(node_, 0));
Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
return function->shared()->builtin_function_id();
@@ -79,13 +80,13 @@ class JSCallReduction {
Node* right() { return GetJSCallInput(1); }
int GetJSCallArity() {
- DCHECK_EQ(IrOpcode::kJSCallFunction, node_->opcode());
+ DCHECK_EQ(IrOpcode::kJSCall, node_->opcode());
// Skip first (i.e. callee) and second (i.e. receiver) operand.
return node_->op()->ValueInputCount() - 2;
}
Node* GetJSCallInput(int index) {
- DCHECK_EQ(IrOpcode::kJSCallFunction, node_->opcode());
+ DCHECK_EQ(IrOpcode::kJSCall, node_->opcode());
DCHECK_LT(index, GetJSCallArity());
// Skip first (i.e. callee) and second (i.e. receiver) operand.
return NodeProperties::GetValueInput(node_, index + 2);
@@ -108,38 +109,14 @@ JSBuiltinReducer::JSBuiltinReducer(Editor* editor, JSGraph* jsgraph,
namespace {
-// TODO(turbofan): Shall we move this to the NodeProperties? Or some (untyped)
-// alias analyzer?
-bool IsSame(Node* a, Node* b) {
- if (a == b) {
- return true;
- } else if (a->opcode() == IrOpcode::kCheckHeapObject) {
- return IsSame(a->InputAt(0), b);
- } else if (b->opcode() == IrOpcode::kCheckHeapObject) {
- return IsSame(a, b->InputAt(0));
- }
- return false;
-}
-
MaybeHandle<Map> GetMapWitness(Node* node) {
+ ZoneHandleSet<Map> maps;
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
- // Check if the {node} is dominated by a CheckMaps with a single map
- // for the {receiver}, and if so use that map for the lowering below.
- for (Node* dominator = effect;;) {
- if (dominator->opcode() == IrOpcode::kCheckMaps &&
- IsSame(dominator->InputAt(0), receiver)) {
- ZoneHandleSet<Map> const& maps =
- CheckMapsParametersOf(dominator->op()).maps();
- return (maps.size() == 1) ? MaybeHandle<Map>(maps[0])
- : MaybeHandle<Map>();
- }
- if (dominator->op()->EffectInputCount() != 1) {
- // Didn't find any appropriate CheckMaps node.
- return MaybeHandle<Map>();
- }
- dominator = NodeProperties::GetEffectInput(dominator);
+ if (NodeProperties::InferReceiverMaps(receiver, effect, &maps)) {
+ if (maps.size() == 1) return MaybeHandle<Map>(maps[0]);
}
+ return MaybeHandle<Map>();
}
// TODO(turbofan): This was copied from Crankshaft, might be too restrictive.
@@ -838,20 +815,42 @@ Reduction JSBuiltinReducer::ReduceArrayPop(Node* node) {
// ES6 section 22.1.3.18 Array.prototype.push ( )
Reduction JSBuiltinReducer::ReduceArrayPush(Node* node) {
- Handle<Map> receiver_map;
// We need exactly target, receiver and value parameters.
if (node->op()->ValueInputCount() != 3) return NoChange();
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* value = NodeProperties::GetValueInput(node, 2);
- if (GetMapWitness(node).ToHandle(&receiver_map) &&
- CanInlineArrayResizeOperation(receiver_map)) {
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (receiver_maps.size() != 1) return NoChange();
+ DCHECK_NE(NodeProperties::kNoReceiverMaps, result);
+
+ // TODO(turbofan): Relax this to deal with multiple {receiver} maps.
+ Handle<Map> receiver_map = receiver_maps[0];
+ if (CanInlineArrayResizeOperation(receiver_map)) {
// Install code dependencies on the {receiver} prototype maps and the
// global array protector cell.
dependencies()->AssumePropertyCell(factory()->array_protector());
dependencies()->AssumePrototypeMapsStable(receiver_map);
+ // If the {receiver_maps} information is not reliable, we need
+ // to check that the {receiver} still has one of these maps.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ if (receiver_map->is_stable()) {
+ dependencies()->AssumeMapStable(receiver_map);
+ } else {
+ // TODO(turbofan): This is a potential - yet unlikely - deoptimization
+ // loop, since we might not learn from this deoptimization in baseline
+ // code. We need a way to learn from deoptimizations in optimized to
+ // address these problems.
+ effect = graph()->NewNode(
+ simplified()->CheckMaps(CheckMapsFlag::kNone, receiver_maps),
+ receiver, effect, control);
+ }
+ }
+
// TODO(turbofan): Perform type checks on the {value}. We are not guaranteed
// to learn from these checks in case they fail, as the witness (i.e. the
// map check from the LoadIC for a.push) might not be executed in baseline
@@ -915,7 +914,7 @@ bool HasInstanceTypeWitness(Node* receiver, Node* effect,
InstanceType instance_type) {
for (Node* dominator = effect;;) {
if (dominator->opcode() == IrOpcode::kCheckMaps &&
- IsSame(dominator->InputAt(0), receiver)) {
+ NodeProperties::IsSame(dominator->InputAt(0), receiver)) {
ZoneHandleSet<Map> const& maps =
CheckMapsParametersOf(dominator->op()).maps();
// Check if all maps have the given {instance_type}.
@@ -924,27 +923,15 @@ bool HasInstanceTypeWitness(Node* receiver, Node* effect,
}
return true;
}
- switch (dominator->opcode()) {
- case IrOpcode::kStoreField: {
- FieldAccess const& access = FieldAccessOf(dominator->op());
- if (access.base_is_tagged == kTaggedBase &&
- access.offset == HeapObject::kMapOffset) {
- return false;
- }
- break;
- }
- case IrOpcode::kStoreElement:
- case IrOpcode::kStoreTypedElement:
- break;
- default: {
- DCHECK_EQ(1, dominator->op()->EffectOutputCount());
- if (dominator->op()->EffectInputCount() != 1 ||
- !dominator->op()->HasProperty(Operator::kNoWrite)) {
- // Didn't find any appropriate CheckMaps node.
- return false;
- }
- break;
- }
+ // The instance type doesn't change for JSReceiver values, so we
+ // don't need to pay attention to potentially side-effecting nodes
+ // here. Strings and internal structures like FixedArray and
+ // FixedDoubleArray are weird here, but we don't use this function then.
+ DCHECK_LE(FIRST_JS_RECEIVER_TYPE, instance_type);
+ DCHECK_EQ(1, dominator->op()->EffectOutputCount());
+ if (dominator->op()->EffectInputCount() != 1) {
+ // Didn't find any appropriate CheckMaps node.
+ return false;
}
dominator = NodeProperties::GetEffectInput(dominator);
}
@@ -1622,7 +1609,7 @@ Node* GetStringWitness(Node* node) {
// the lowering below.
for (Node* dominator = effect;;) {
if (dominator->opcode() == IrOpcode::kCheckString &&
- IsSame(dominator->InputAt(0), receiver)) {
+ NodeProperties::IsSame(dominator->InputAt(0), receiver)) {
return dominator;
}
if (dominator->op()->EffectInputCount() != 1) {
@@ -1743,6 +1730,34 @@ Reduction JSBuiltinReducer::ReduceStringCharCodeAt(Node* node) {
return NoChange();
}
+// ES6 String.prototype.indexOf(searchString [, position])
+// #sec-string.prototype.indexof
+Reduction JSBuiltinReducer::ReduceStringIndexOf(Node* node) {
+ // We need at least target, receiver and search_string parameters.
+ if (node->op()->ValueInputCount() >= 3) {
+ Node* search_string = NodeProperties::GetValueInput(node, 2);
+ Type* search_string_type = NodeProperties::GetType(search_string);
+ Node* position = (node->op()->ValueInputCount() >= 4)
+ ? NodeProperties::GetValueInput(node, 3)
+ : jsgraph()->ZeroConstant();
+ Type* position_type = NodeProperties::GetType(position);
+
+ if (search_string_type->Is(Type::String()) &&
+ position_type->Is(Type::SignedSmall())) {
+ if (Node* receiver = GetStringWitness(node)) {
+ RelaxEffectsAndControls(node);
+ node->ReplaceInput(0, receiver);
+ node->ReplaceInput(1, search_string);
+ node->ReplaceInput(2, position);
+ node->TrimInputCount(3);
+ NodeProperties::ChangeOp(node, simplified()->StringIndexOf());
+ return Changed(node);
+ }
+ }
+ }
+ return NoChange();
+}
+
Reduction JSBuiltinReducer::ReduceStringIterator(Node* node) {
if (Node* receiver = GetStringWitness(node)) {
Node* effect = NodeProperties::GetEffectInput(node);
@@ -2114,6 +2129,8 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
return ReduceStringCharAt(node);
case kStringCharCodeAt:
return ReduceStringCharCodeAt(node);
+ case kStringIndexOf:
+ return ReduceStringIndexOf(node);
case kStringIterator:
return ReduceStringIterator(node);
case kStringIteratorNext:
diff --git a/deps/v8/src/compiler/js-builtin-reducer.h b/deps/v8/src/compiler/js-builtin-reducer.h
index 295da8d1bc..6ff06e3bf3 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.h
+++ b/deps/v8/src/compiler/js-builtin-reducer.h
@@ -103,6 +103,7 @@ class V8_EXPORT_PRIVATE JSBuiltinReducer final
Reduction ReduceStringCharAt(Node* node);
Reduction ReduceStringCharCodeAt(Node* node);
Reduction ReduceStringFromCharCode(Node* node);
+ Reduction ReduceStringIndexOf(Node* node);
Reduction ReduceStringIterator(Node* node);
Reduction ReduceStringIteratorNext(Node* node);
Reduction ReduceArrayBufferViewAccessor(Node* node,
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index eaecf8fc5f..c0deb915f8 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -6,6 +6,7 @@
#include "src/code-factory.h"
#include "src/code-stubs.h"
+#include "src/compilation-dependencies.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
@@ -19,10 +20,14 @@ namespace compiler {
Reduction JSCallReducer::Reduce(Node* node) {
switch (node->opcode()) {
- case IrOpcode::kJSCallConstruct:
- return ReduceJSCallConstruct(node);
- case IrOpcode::kJSCallFunction:
- return ReduceJSCallFunction(node);
+ case IrOpcode::kJSConstruct:
+ return ReduceJSConstruct(node);
+ case IrOpcode::kJSConstructWithSpread:
+ return ReduceJSConstructWithSpread(node);
+ case IrOpcode::kJSCall:
+ return ReduceJSCall(node);
+ case IrOpcode::kJSCallWithSpread:
+ return ReduceJSCallWithSpread(node);
default:
break;
}
@@ -32,9 +37,9 @@ Reduction JSCallReducer::Reduce(Node* node) {
// ES6 section 22.1.1 The Array Constructor
Reduction JSCallReducer::ReduceArrayConstructor(Node* node) {
- DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
Node* target = NodeProperties::GetValueInput(node, 0);
- CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+ CallParameters const& p = CallParametersOf(node->op());
// Check if we have an allocation site from the CallIC.
Handle<AllocationSite> site;
@@ -61,8 +66,8 @@ Reduction JSCallReducer::ReduceArrayConstructor(Node* node) {
// ES6 section 20.1.1 The Number Constructor
Reduction JSCallReducer::ReduceNumberConstructor(Node* node) {
- DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
- CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
// Turn the {node} into a {JSToNumber} call.
DCHECK_LE(2u, p.arity());
@@ -76,9 +81,13 @@ Reduction JSCallReducer::ReduceNumberConstructor(Node* node) {
// ES6 section 19.2.3.1 Function.prototype.apply ( thisArg, argArray )
Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
- DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
Node* target = NodeProperties::GetValueInput(node, 0);
- CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+ CallParameters const& p = CallParametersOf(node->op());
+ // Tail calls to Function.prototype.apply are not properly supported
+ // down the pipeline, so we disable this optimization completely for
+ // tail calls (for now).
+ if (p.tail_call_mode() == TailCallMode::kAllow) return NoChange();
Handle<JSFunction> apply =
Handle<JSFunction>::cast(HeapObjectMatcher(target).Value());
size_t arity = p.arity();
@@ -104,35 +113,65 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
if (edge.from() == node) continue;
return NoChange();
}
+ // Check if the arguments can be handled in the fast case (i.e. we don't
+ // have aliased sloppy arguments), and compute the {start_index} for
+ // rest parameters.
+ CreateArgumentsType const type = CreateArgumentsTypeOf(arg_array->op());
+ Node* frame_state = NodeProperties::GetFrameStateInput(arg_array);
+ FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
+ int formal_parameter_count;
+ int start_index = 0;
+ {
+ Handle<SharedFunctionInfo> shared;
+ if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
+ formal_parameter_count = shared->internal_formal_parameter_count();
+ }
+ if (type == CreateArgumentsType::kMappedArguments) {
+ // Mapped arguments (sloppy mode) cannot be handled if they are aliased.
+ if (formal_parameter_count != 0) return NoChange();
+ } else if (type == CreateArgumentsType::kRestParameter) {
+ start_index = formal_parameter_count;
+ }
+ // Check if are applying to inlined arguments or to the arguments of
+ // the outermost function.
+ Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
+ if (outer_state->opcode() != IrOpcode::kFrameState) {
+ // TODO(jarin,bmeurer): Support the NewUnmappedArgumentsElement and
+ // NewRestParameterElements in the EscapeAnalysis and Deoptimizer
+ // instead, then we don't need this hack.
+ // Only works with zero formal parameters because of lacking deoptimizer
+ // support.
+ if (type != CreateArgumentsType::kRestParameter &&
+ formal_parameter_count == 0) {
+ // There are no other uses of the {arg_array} except in StateValues,
+ // so we just replace {arg_array} with a marker for the Deoptimizer
+ // that this refers to the arguments object.
+ Node* arguments = graph()->NewNode(common()->ArgumentsObjectState());
+ ReplaceWithValue(arg_array, arguments);
+ }
+
+ // Reduce {node} to a JSCallForwardVarargs operation, which just
+ // re-pushes the incoming arguments and calls the {target}.
+ node->RemoveInput(0); // Function.prototype.apply
+ node->RemoveInput(2); // arguments
+ NodeProperties::ChangeOp(node, javascript()->CallForwardVarargs(
+ start_index, p.tail_call_mode()));
+ return Changed(node);
+ }
// Get to the actual frame state from which to extract the arguments;
// we can only optimize this in case the {node} was already inlined into
// some other function (and same for the {arg_array}).
- CreateArgumentsType type = CreateArgumentsTypeOf(arg_array->op());
- Node* frame_state = NodeProperties::GetFrameStateInput(arg_array);
- Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
- if (outer_state->opcode() != IrOpcode::kFrameState) return NoChange();
FrameStateInfo outer_info = OpParameter<FrameStateInfo>(outer_state);
if (outer_info.type() == FrameStateType::kArgumentsAdaptor) {
// Need to take the parameters from the arguments adaptor.
frame_state = outer_state;
}
- FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
- int start_index = 0;
- if (type == CreateArgumentsType::kMappedArguments) {
- // Mapped arguments (sloppy mode) cannot be handled if they are aliased.
- Handle<SharedFunctionInfo> shared;
- if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
- if (shared->internal_formal_parameter_count() != 0) return NoChange();
- } else if (type == CreateArgumentsType::kRestParameter) {
- Handle<SharedFunctionInfo> shared;
- if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
- start_index = shared->internal_formal_parameter_count();
- }
// Remove the argArray input from the {node}.
node->RemoveInput(static_cast<int>(--arity));
- // Add the actual parameters to the {node}, skipping the receiver.
+ // Add the actual parameters to the {node}, skipping the receiver,
+ // starting from {start_index}.
Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
- for (int i = start_index + 1; i < state_info.parameter_count(); ++i) {
+ for (int i = start_index + 1; i < parameters->InputCount(); ++i) {
node->InsertInput(graph()->zone(), static_cast<int>(arity),
parameters->InputAt(i));
++arity;
@@ -143,24 +182,25 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
} else {
return NoChange();
}
- // Change {node} to the new {JSCallFunction} operator.
+ // Change {node} to the new {JSCall} operator.
NodeProperties::ChangeOp(
- node, javascript()->CallFunction(arity, p.frequency(), VectorSlotPair(),
- convert_mode, p.tail_call_mode()));
+ node,
+ javascript()->Call(arity, p.frequency(), VectorSlotPair(), convert_mode,
+ p.tail_call_mode()));
// Change context of {node} to the Function.prototype.apply context,
// to ensure any exception is thrown in the correct context.
NodeProperties::ReplaceContextInput(
node, jsgraph()->HeapConstant(handle(apply->context(), isolate())));
- // Try to further reduce the JSCallFunction {node}.
- Reduction const reduction = ReduceJSCallFunction(node);
+ // Try to further reduce the JSCall {node}.
+ Reduction const reduction = ReduceJSCall(node);
return reduction.Changed() ? reduction : Changed(node);
}
// ES6 section 19.2.3.3 Function.prototype.call (thisArg, ...args)
Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) {
- DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
- CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
Handle<JSFunction> call = Handle<JSFunction>::cast(
HeapObjectMatcher(NodeProperties::GetValueInput(node, 0)).Value());
// Change context of {node} to the Function.prototype.call context,
@@ -185,16 +225,17 @@ Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) {
--arity;
}
NodeProperties::ChangeOp(
- node, javascript()->CallFunction(arity, p.frequency(), VectorSlotPair(),
- convert_mode, p.tail_call_mode()));
- // Try to further reduce the JSCallFunction {node}.
- Reduction const reduction = ReduceJSCallFunction(node);
+ node,
+ javascript()->Call(arity, p.frequency(), VectorSlotPair(), convert_mode,
+ p.tail_call_mode()));
+ // Try to further reduce the JSCall {node}.
+ Reduction const reduction = ReduceJSCall(node);
return reduction.Changed() ? reduction : Changed(node);
}
// ES6 section 19.2.3.6 Function.prototype [ @@hasInstance ] (V)
Reduction JSCallReducer::ReduceFunctionPrototypeHasInstance(Node* node) {
- DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* object = (node->op()->ValueInputCount() >= 3)
? NodeProperties::GetValueInput(node, 2)
@@ -223,51 +264,14 @@ Reduction JSCallReducer::ReduceFunctionPrototypeHasInstance(Node* node) {
namespace {
-// TODO(turbofan): Shall we move this to the NodeProperties? Or some (untyped)
-// alias analyzer?
-bool IsSame(Node* a, Node* b) {
- if (a == b) {
- return true;
- } else if (a->opcode() == IrOpcode::kCheckHeapObject) {
- return IsSame(a->InputAt(0), b);
- } else if (b->opcode() == IrOpcode::kCheckHeapObject) {
- return IsSame(a, b->InputAt(0));
- }
- return false;
-}
-
-// TODO(turbofan): Share with similar functionality in JSInliningHeuristic
-// and JSNativeContextSpecialization, i.e. move to NodeProperties helper?!
-MaybeHandle<Map> InferReceiverMap(Node* node) {
- Node* receiver = NodeProperties::GetValueInput(node, 1);
- Node* effect = NodeProperties::GetEffectInput(node);
- // Check if the {node} is dominated by a CheckMaps with a single map
- // for the {receiver}, and if so use that map for the lowering below.
- for (Node* dominator = effect;;) {
- if (dominator->opcode() == IrOpcode::kCheckMaps &&
- IsSame(dominator->InputAt(0), receiver)) {
- if (dominator->op()->ValueInputCount() == 2) {
- HeapObjectMatcher m(dominator->InputAt(1));
- if (m.HasValue()) return Handle<Map>::cast(m.Value());
- }
- return MaybeHandle<Map>();
- }
- if (dominator->op()->EffectInputCount() != 1) {
- // Didn't find any appropriate CheckMaps node.
- return MaybeHandle<Map>();
- }
- dominator = NodeProperties::GetEffectInput(dominator);
- }
-}
-
bool CanInlineApiCall(Isolate* isolate, Node* node,
Handle<FunctionTemplateInfo> function_template_info) {
- DCHECK(node->opcode() == IrOpcode::kJSCallFunction);
+ DCHECK(node->opcode() == IrOpcode::kJSCall);
if (V8_UNLIKELY(FLAG_runtime_stats)) return false;
if (function_template_info->call_code()->IsUndefined(isolate)) {
return false;
}
- CallFunctionParameters const& params = CallFunctionParametersOf(node->op());
+ CallParameters const& params = CallParametersOf(node->op());
// CallApiCallbackStub expects the target in a register, so we count it out,
// and counts the receiver as an implicit argument, so we count the receiver
// out too.
@@ -315,20 +319,33 @@ JSCallReducer::HolderLookup JSCallReducer::LookupHolder(
// ES6 section B.2.2.1.1 get Object.prototype.__proto__
Reduction JSCallReducer::ReduceObjectPrototypeGetProto(Node* node) {
- DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
// Try to determine the {receiver} map.
- Handle<Map> receiver_map;
- if (InferReceiverMap(node).ToHandle(&receiver_map)) {
- // Check if we can constant-fold the {receiver} map.
- if (!receiver_map->IsJSProxyMap() &&
- !receiver_map->has_hidden_prototype() &&
- !receiver_map->is_access_check_needed()) {
- Handle<Object> receiver_prototype(receiver_map->prototype(), isolate());
- Node* value = jsgraph()->Constant(receiver_prototype);
- ReplaceWithValue(node, value);
- return Replace(value);
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kReliableReceiverMaps) {
+ Handle<Map> candidate_map(
+ receiver_maps[0]->GetPrototypeChainRootMap(isolate()));
+ Handle<Object> candidate_prototype(candidate_map->prototype(), isolate());
+
+ // Check if we can constant-fold the {candidate_prototype}.
+ for (size_t i = 0; i < receiver_maps.size(); ++i) {
+ Handle<Map> const receiver_map(
+ receiver_maps[i]->GetPrototypeChainRootMap(isolate()));
+ if (receiver_map->IsJSProxyMap() ||
+ receiver_map->has_hidden_prototype() ||
+ receiver_map->is_access_check_needed() ||
+ receiver_map->prototype() != *candidate_prototype) {
+ return NoChange();
+ }
}
+ Node* value = jsgraph()->Constant(candidate_prototype);
+ ReplaceWithValue(node, value);
+ return Replace(value);
}
return NoChange();
@@ -349,7 +366,7 @@ Reduction JSCallReducer::ReduceCallApiFunction(
Handle<Object> data(call_handler_info->data(), isolate);
Node* receiver_node = NodeProperties::GetValueInput(node, 1);
- CallFunctionParameters const& params = CallFunctionParametersOf(node->op());
+ CallParameters const& params = CallParametersOf(node->op());
Handle<HeapObject> receiver = HeapObjectMatcher(receiver_node).Value();
bool const receiver_is_undefined = receiver->IsUndefined(isolate);
@@ -397,14 +414,87 @@ Reduction JSCallReducer::ReduceCallApiFunction(
return Changed(node);
}
-Reduction JSCallReducer::ReduceJSCallFunction(Node* node) {
- DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
- CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+Reduction JSCallReducer::ReduceSpreadCall(Node* node, int arity) {
+ DCHECK(node->opcode() == IrOpcode::kJSCallWithSpread ||
+ node->opcode() == IrOpcode::kJSConstructWithSpread);
+
+ // Do check to make sure we can actually avoid iteration.
+ if (!isolate()->initial_array_iterator_prototype_map()->is_stable()) {
+ return NoChange();
+ }
+
+ Node* spread = NodeProperties::GetValueInput(node, arity);
+
+ // Check if spread is an arguments object, and {node} is the only value user
+ // of spread (except for value uses in frame states).
+ if (spread->opcode() != IrOpcode::kJSCreateArguments) return NoChange();
+ for (Edge edge : spread->use_edges()) {
+ if (edge.from()->opcode() == IrOpcode::kStateValues) continue;
+ if (!NodeProperties::IsValueEdge(edge)) continue;
+ if (edge.from() == node) continue;
+ return NoChange();
+ }
+
+ // Get to the actual frame state from which to extract the arguments;
+ // we can only optimize this in case the {node} was already inlined into
+ // some other function (and same for the {spread}).
+ CreateArgumentsType type = CreateArgumentsTypeOf(spread->op());
+ Node* frame_state = NodeProperties::GetFrameStateInput(spread);
+ Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
+ if (outer_state->opcode() != IrOpcode::kFrameState) return NoChange();
+ FrameStateInfo outer_info = OpParameter<FrameStateInfo>(outer_state);
+ if (outer_info.type() == FrameStateType::kArgumentsAdaptor) {
+ // Need to take the parameters from the arguments adaptor.
+ frame_state = outer_state;
+ }
+ FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
+ int start_index = 0;
+ if (type == CreateArgumentsType::kMappedArguments) {
+ // Mapped arguments (sloppy mode) cannot be handled if they are aliased.
+ Handle<SharedFunctionInfo> shared;
+ if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
+ if (shared->internal_formal_parameter_count() != 0) return NoChange();
+ } else if (type == CreateArgumentsType::kRestParameter) {
+ Handle<SharedFunctionInfo> shared;
+ if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
+ start_index = shared->internal_formal_parameter_count();
+
+ // Only check the array iterator protector when we have a rest object.
+ if (!isolate()->IsArrayIteratorLookupChainIntact()) return NoChange();
+ // Add a code dependency on the array iterator protector.
+ dependencies()->AssumePropertyCell(factory()->array_iterator_protector());
+ }
+
+ dependencies()->AssumeMapStable(
+ isolate()->initial_array_iterator_prototype_map());
+
+ node->RemoveInput(arity--);
+
+ // Add the actual parameters to the {node}, skipping the receiver.
+ Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
+ for (int i = start_index + 1; i < state_info.parameter_count(); ++i) {
+ node->InsertInput(graph()->zone(), static_cast<int>(++arity),
+ parameters->InputAt(i));
+ }
+
+ if (node->opcode() == IrOpcode::kJSCallWithSpread) {
+ NodeProperties::ChangeOp(
+ node, javascript()->Call(arity + 1, 7, VectorSlotPair()));
+ } else {
+ NodeProperties::ChangeOp(
+ node, javascript()->Construct(arity + 2, 7, VectorSlotPair()));
+ }
+ return Changed(node);
+}
+
+Reduction JSCallReducer::ReduceJSCall(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
Node* target = NodeProperties::GetValueInput(node, 0);
Node* control = NodeProperties::GetControlInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
- // Try to specialize JSCallFunction {node}s with constant {target}s.
+ // Try to specialize JSCall {node}s with constant {target}s.
HeapObjectMatcher m(target);
if (m.HasValue()) {
if (m.Value()->IsJSFunction()) {
@@ -420,6 +510,9 @@ Reduction JSCallReducer::ReduceJSCallFunction(Node* node) {
return Changed(node);
}
+ // Don't inline cross native context.
+ if (function->native_context() != *native_context()) return NoChange();
+
// Check for known builtin functions.
switch (shared->code()->builtin_index()) {
case Builtins::kFunctionPrototypeApply:
@@ -454,7 +547,7 @@ Reduction JSCallReducer::ReduceJSCallFunction(Node* node) {
Handle<Object> bound_this(function->bound_this(), isolate());
Handle<FixedArray> bound_arguments(function->bound_arguments(),
isolate());
- CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+ CallParameters const& p = CallParametersOf(node->op());
ConvertReceiverMode const convert_mode =
(bound_this->IsNullOrUndefined(isolate()))
? ConvertReceiverMode::kNullOrUndefined
@@ -473,11 +566,12 @@ Reduction JSCallReducer::ReduceJSCallFunction(Node* node) {
jsgraph()->Constant(handle(bound_arguments->get(i), isolate())));
arity++;
}
- NodeProperties::ChangeOp(node, javascript()->CallFunction(
- arity, p.frequency(), VectorSlotPair(),
- convert_mode, p.tail_call_mode()));
- // Try to further reduce the JSCallFunction {node}.
- Reduction const reduction = ReduceJSCallFunction(node);
+ NodeProperties::ChangeOp(
+ node,
+ javascript()->Call(arity, p.frequency(), VectorSlotPair(),
+ convert_mode, p.tail_call_mode()));
+ // Try to further reduce the JSCall {node}.
+ Reduction const reduction = ReduceJSCall(node);
return reduction.Changed() ? reduction : Changed(node);
}
@@ -495,8 +589,7 @@ Reduction JSCallReducer::ReduceJSCallFunction(Node* node) {
// Insert a CallIC here to collect feedback for uninitialized calls.
int const arg_count = static_cast<int>(p.arity() - 2);
- Callable callable =
- CodeFactory::CallICInOptimizedCode(isolate(), p.convert_mode());
+ Callable callable = CodeFactory::CallIC(isolate(), p.convert_mode());
CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), arg_count + 1,
@@ -544,22 +637,30 @@ Reduction JSCallReducer::ReduceJSCallFunction(Node* node) {
effect =
graph()->NewNode(simplified()->CheckIf(), check, effect, control);
- // Specialize the JSCallFunction node to the {target_function}.
+ // Specialize the JSCall node to the {target_function}.
NodeProperties::ReplaceValueInput(node, target_function, 0);
NodeProperties::ReplaceEffectInput(node, effect);
- // Try to further reduce the JSCallFunction {node}.
- Reduction const reduction = ReduceJSCallFunction(node);
+ // Try to further reduce the JSCall {node}.
+ Reduction const reduction = ReduceJSCall(node);
return reduction.Changed() ? reduction : Changed(node);
}
}
return NoChange();
}
+Reduction JSCallReducer::ReduceJSCallWithSpread(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCallWithSpread, node->opcode());
+ CallWithSpreadParameters const& p = CallWithSpreadParametersOf(node->op());
+ DCHECK_LE(3u, p.arity());
+ int arity = static_cast<int>(p.arity() - 1);
+
+ return ReduceSpreadCall(node, arity);
+}
-Reduction JSCallReducer::ReduceJSCallConstruct(Node* node) {
- DCHECK_EQ(IrOpcode::kJSCallConstruct, node->opcode());
- CallConstructParameters const& p = CallConstructParametersOf(node->op());
+Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSConstruct, node->opcode());
+ ConstructParameters const& p = ConstructParametersOf(node->op());
DCHECK_LE(2u, p.arity());
int const arity = static_cast<int>(p.arity() - 2);
Node* target = NodeProperties::GetValueInput(node, 0);
@@ -567,7 +668,7 @@ Reduction JSCallReducer::ReduceJSCallConstruct(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- // Try to specialize JSCallConstruct {node}s with constant {target}s.
+ // Try to specialize JSConstruct {node}s with constant {target}s.
HeapObjectMatcher m(target);
if (m.HasValue()) {
if (m.Value()->IsJSFunction()) {
@@ -582,6 +683,9 @@ Reduction JSCallReducer::ReduceJSCallConstruct(Node* node) {
return Changed(node);
}
+ // Don't inline cross native context.
+ if (function->native_context() != *native_context()) return NoChange();
+
// Check for the ArrayConstructor.
if (*function == function->native_context()->array_function()) {
// Check if we have an allocation site.
@@ -653,15 +757,15 @@ Reduction JSCallReducer::ReduceJSCallConstruct(Node* node) {
effect =
graph()->NewNode(simplified()->CheckIf(), check, effect, control);
- // Specialize the JSCallConstruct node to the {target_function}.
+ // Specialize the JSConstruct node to the {target_function}.
NodeProperties::ReplaceValueInput(node, target_function, 0);
NodeProperties::ReplaceEffectInput(node, effect);
if (target == new_target) {
NodeProperties::ReplaceValueInput(node, target_function, arity + 1);
}
- // Try to further reduce the JSCallConstruct {node}.
- Reduction const reduction = ReduceJSCallConstruct(node);
+ // Try to further reduce the JSConstruct {node}.
+ Reduction const reduction = ReduceJSConstruct(node);
return reduction.Changed() ? reduction : Changed(node);
}
}
@@ -669,10 +773,22 @@ Reduction JSCallReducer::ReduceJSCallConstruct(Node* node) {
return NoChange();
}
+Reduction JSCallReducer::ReduceJSConstructWithSpread(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSConstructWithSpread, node->opcode());
+ ConstructWithSpreadParameters const& p =
+ ConstructWithSpreadParametersOf(node->op());
+ DCHECK_LE(3u, p.arity());
+ int arity = static_cast<int>(p.arity() - 2);
+
+ return ReduceSpreadCall(node, arity);
+}
+
Graph* JSCallReducer::graph() const { return jsgraph()->graph(); }
Isolate* JSCallReducer::isolate() const { return jsgraph()->isolate(); }
+Factory* JSCallReducer::factory() const { return isolate()->factory(); }
+
CommonOperatorBuilder* JSCallReducer::common() const {
return jsgraph()->common();
}
diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h
index e39433a020..10b8ee8992 100644
--- a/deps/v8/src/compiler/js-call-reducer.h
+++ b/deps/v8/src/compiler/js-call-reducer.h
@@ -10,6 +10,11 @@
namespace v8 {
namespace internal {
+
+// Forward declarations.
+class CompilationDependencies;
+class Factory;
+
namespace compiler {
// Forward declarations.
@@ -18,7 +23,7 @@ class JSGraph;
class JSOperatorBuilder;
class SimplifiedOperatorBuilder;
-// Performs strength reduction on {JSCallConstruct} and {JSCallFunction} nodes,
+// Performs strength reduction on {JSConstruct} and {JSCall} nodes,
// which might allow inlining or other optimizations to be performed afterwards.
class JSCallReducer final : public AdvancedReducer {
public:
@@ -30,11 +35,13 @@ class JSCallReducer final : public AdvancedReducer {
typedef base::Flags<Flag> Flags;
JSCallReducer(Editor* editor, JSGraph* jsgraph, Flags flags,
- Handle<Context> native_context)
+ Handle<Context> native_context,
+ CompilationDependencies* dependencies)
: AdvancedReducer(editor),
jsgraph_(jsgraph),
flags_(flags),
- native_context_(native_context) {}
+ native_context_(native_context),
+ dependencies_(dependencies) {}
Reduction Reduce(Node* node) final;
@@ -48,8 +55,11 @@ class JSCallReducer final : public AdvancedReducer {
Reduction ReduceFunctionPrototypeCall(Node* node);
Reduction ReduceFunctionPrototypeHasInstance(Node* node);
Reduction ReduceObjectPrototypeGetProto(Node* node);
- Reduction ReduceJSCallConstruct(Node* node);
- Reduction ReduceJSCallFunction(Node* node);
+ Reduction ReduceSpreadCall(Node* node, int arity);
+ Reduction ReduceJSConstruct(Node* node);
+ Reduction ReduceJSConstructWithSpread(Node* node);
+ Reduction ReduceJSCall(Node* node);
+ Reduction ReduceJSCallWithSpread(Node* node);
enum HolderLookup { kHolderNotFound, kHolderIsReceiver, kHolderFound };
@@ -61,14 +71,17 @@ class JSCallReducer final : public AdvancedReducer {
Flags flags() const { return flags_; }
JSGraph* jsgraph() const { return jsgraph_; }
Isolate* isolate() const;
+ Factory* factory() const;
Handle<Context> native_context() const { return native_context_; }
CommonOperatorBuilder* common() const;
JSOperatorBuilder* javascript() const;
SimplifiedOperatorBuilder* simplified() const;
+ CompilationDependencies* dependencies() const { return dependencies_; }
JSGraph* const jsgraph_;
Flags const flags_;
Handle<Context> const native_context_;
+ CompilationDependencies* const dependencies_;
};
DEFINE_OPERATORS_FOR_FLAGS(JSCallReducer::Flags)
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index 9a3cbd7894..f3ceb2b0c0 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -12,11 +12,12 @@
#include "src/compiler/js-graph.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/linkage.h"
-#include "src/compiler/node.h"
#include "src/compiler/node-properties.h"
+#include "src/compiler/node.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/state-values-utils.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -210,8 +211,6 @@ Reduction JSCreateLowering::Reduce(Node* node) {
return ReduceJSCreateArguments(node);
case IrOpcode::kJSCreateArray:
return ReduceJSCreateArray(node);
- case IrOpcode::kJSCreateClosure:
- return ReduceJSCreateClosure(node);
case IrOpcode::kJSCreateIterResultObject:
return ReduceJSCreateIterResultObject(node);
case IrOpcode::kJSCreateKeyValueArray:
@@ -240,6 +239,7 @@ Reduction JSCreateLowering::ReduceJSCreate(Node* node) {
Node* const new_target = NodeProperties::GetValueInput(node, 1);
Type* const new_target_type = NodeProperties::GetType(new_target);
Node* const effect = NodeProperties::GetEffectInput(node);
+ Node* const control = NodeProperties::GetControlInput(node);
// Extract constructor and original constructor function.
if (target_type->IsHeapConstant() && new_target_type->IsHeapConstant() &&
new_target_type->AsHeapConstant()->Value()->IsJSFunction()) {
@@ -267,7 +267,7 @@ Reduction JSCreateLowering::ReduceJSCreate(Node* node) {
// Emit code to allocate the JSObject instance for the
// {original_constructor}.
- AllocationBuilder a(jsgraph(), effect, graph()->start());
+ AllocationBuilder a(jsgraph(), effect, control);
a.Allocate(instance_size);
a.Store(AccessBuilder::ForMap(), initial_map);
a.Store(AccessBuilder::ForJSObjectProperties(),
@@ -278,6 +278,7 @@ Reduction JSCreateLowering::ReduceJSCreate(Node* node) {
a.Store(AccessBuilder::ForJSObjectInObjectProperty(initial_map, i),
jsgraph()->UndefinedConstant());
}
+ RelaxControls(node);
a.FinishAndChange(node);
return Changed(node);
}
@@ -750,45 +751,6 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
return ReduceNewArrayToStubCall(node, site);
}
-Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
- if (!FLAG_turbo_lower_create_closure) return NoChange();
- DCHECK_EQ(IrOpcode::kJSCreateClosure, node->opcode());
- CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
- Handle<SharedFunctionInfo> shared = p.shared_info();
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- Node* context = NodeProperties::GetContextInput(node);
-
- int const function_map_index =
- Context::FunctionMapIndex(shared->language_mode(), shared->kind());
- Node* function_map = jsgraph()->HeapConstant(
- handle(Map::cast(native_context()->get(function_map_index)), isolate()));
-
- // Note that it is only safe to embed the raw entry point of the compile
- // lazy stub into the code, because that stub is immortal and immovable.
- Node* compile_entry = jsgraph()->PointerConstant(
- jsgraph()->isolate()->builtins()->CompileLazy()->entry());
- Node* empty_fixed_array = jsgraph()->EmptyFixedArrayConstant();
- Node* empty_literals_array = jsgraph()->EmptyLiteralsArrayConstant();
- Node* the_hole = jsgraph()->TheHoleConstant();
- Node* undefined = jsgraph()->UndefinedConstant();
- AllocationBuilder a(jsgraph(), effect, control);
- STATIC_ASSERT(JSFunction::kSize == 9 * kPointerSize);
- a.Allocate(JSFunction::kSize, p.pretenure());
- a.Store(AccessBuilder::ForMap(), function_map);
- a.Store(AccessBuilder::ForJSObjectProperties(), empty_fixed_array);
- a.Store(AccessBuilder::ForJSObjectElements(), empty_fixed_array);
- a.Store(AccessBuilder::ForJSFunctionLiterals(), empty_literals_array);
- a.Store(AccessBuilder::ForJSFunctionPrototypeOrInitialMap(), the_hole);
- a.Store(AccessBuilder::ForJSFunctionSharedFunctionInfo(), shared);
- a.Store(AccessBuilder::ForJSFunctionContext(), context);
- a.Store(AccessBuilder::ForJSFunctionCodeEntry(), compile_entry);
- a.Store(AccessBuilder::ForJSFunctionNextFunctionLink(), undefined);
- RelaxControls(node);
- a.FinishAndChange(node);
- return Changed(node);
-}
-
Reduction JSCreateLowering::ReduceJSCreateIterResultObject(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateIterResultObject, node->opcode());
Node* value = NodeProperties::GetValueInput(node, 0);
@@ -850,9 +812,10 @@ Reduction JSCreateLowering::ReduceJSCreateLiteral(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- Handle<LiteralsArray> literals_array;
- if (GetSpecializationLiterals(node).ToHandle(&literals_array)) {
- Handle<Object> literal(literals_array->literal(p.index()), isolate());
+ Handle<FeedbackVector> feedback_vector;
+ if (GetSpecializationFeedbackVector(node).ToHandle(&feedback_vector)) {
+ FeedbackSlot slot(FeedbackVector::ToSlot(p.index()));
+ Handle<Object> literal(feedback_vector->Get(slot), isolate());
if (literal->IsAllocationSite()) {
Handle<AllocationSite> site = Handle<AllocationSite>::cast(literal);
Handle<JSObject> boilerplate(JSObject::cast(site->transition_info()),
@@ -1343,13 +1306,13 @@ Node* JSCreateLowering::AllocateFastLiteralElements(
return builder.Finish();
}
-MaybeHandle<LiteralsArray> JSCreateLowering::GetSpecializationLiterals(
+MaybeHandle<FeedbackVector> JSCreateLowering::GetSpecializationFeedbackVector(
Node* node) {
Node* const closure = NodeProperties::GetValueInput(node, 0);
switch (closure->opcode()) {
case IrOpcode::kHeapConstant: {
Handle<HeapObject> object = OpParameter<Handle<HeapObject>>(closure);
- return handle(Handle<JSFunction>::cast(object)->literals());
+ return handle(Handle<JSFunction>::cast(object)->feedback_vector());
}
case IrOpcode::kParameter: {
int const index = ParameterIndexOf(closure->op());
@@ -1357,14 +1320,14 @@ MaybeHandle<LiteralsArray> JSCreateLowering::GetSpecializationLiterals(
// {Parameter} indices start at -1, so value outputs of {Start} look like
// this: closure, receiver, param0, ..., paramN, context.
if (index == -1) {
- return literals_array_;
+ return feedback_vector_;
}
break;
}
default:
break;
}
- return MaybeHandle<LiteralsArray>();
+ return MaybeHandle<FeedbackVector>();
}
Factory* JSCreateLowering::factory() const { return isolate()->factory(); }
diff --git a/deps/v8/src/compiler/js-create-lowering.h b/deps/v8/src/compiler/js-create-lowering.h
index b5390f136c..eea75d3842 100644
--- a/deps/v8/src/compiler/js-create-lowering.h
+++ b/deps/v8/src/compiler/js-create-lowering.h
@@ -33,12 +33,13 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
: public NON_EXPORTED_BASE(AdvancedReducer) {
public:
JSCreateLowering(Editor* editor, CompilationDependencies* dependencies,
- JSGraph* jsgraph, MaybeHandle<LiteralsArray> literals_array,
+ JSGraph* jsgraph,
+ MaybeHandle<FeedbackVector> feedback_vector,
Handle<Context> native_context, Zone* zone)
: AdvancedReducer(editor),
dependencies_(dependencies),
jsgraph_(jsgraph),
- literals_array_(literals_array),
+ feedback_vector_(feedback_vector),
native_context_(native_context),
zone_(zone) {}
~JSCreateLowering() final {}
@@ -49,7 +50,6 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
Reduction ReduceJSCreate(Node* node);
Reduction ReduceJSCreateArguments(Node* node);
Reduction ReduceJSCreateArray(Node* node);
- Reduction ReduceJSCreateClosure(Node* node);
Reduction ReduceJSCreateIterResultObject(Node* node);
Reduction ReduceJSCreateKeyValueArray(Node* node);
Reduction ReduceJSCreateLiteral(Node* node);
@@ -79,8 +79,8 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
Reduction ReduceNewArrayToStubCall(Node* node, Handle<AllocationSite> site);
- // Infers the LiteralsArray to use for a given {node}.
- MaybeHandle<LiteralsArray> GetSpecializationLiterals(Node* node);
+ // Infers the FeedbackVector to use for a given {node}.
+ MaybeHandle<FeedbackVector> GetSpecializationFeedbackVector(Node* node);
Factory* factory() const;
Graph* graph() const;
@@ -96,7 +96,7 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
CompilationDependencies* const dependencies_;
JSGraph* const jsgraph_;
- MaybeHandle<LiteralsArray> const literals_array_;
+ MaybeHandle<FeedbackVector> const feedback_vector_;
Handle<Context> const native_context_;
Zone* const zone_;
};
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index ee844e9ee2..79a3377462 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -14,6 +14,7 @@
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -88,11 +89,12 @@ void JSGenericLowering::ReplaceWithStubCall(Node* node, Callable callable,
void JSGenericLowering::ReplaceWithStubCall(Node* node, Callable callable,
CallDescriptor::Flags flags,
- Operator::Properties properties) {
+ Operator::Properties properties,
+ int result_size) {
const CallInterfaceDescriptor& descriptor = callable.descriptor();
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), zone(), descriptor, descriptor.GetStackParameterCount(), flags,
- properties);
+ properties, MachineType::AnyTagged(), result_size);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
node->InsertInput(zone(), 0, stub_code);
NodeProperties::ChangeOp(node, common()->Call(desc));
@@ -143,6 +145,15 @@ void JSGenericLowering::LowerJSToBoolean(Node* node) {
Operator::kEliminatable);
}
+void JSGenericLowering::LowerJSClassOf(Node* node) {
+ // The %_ClassOf intrinsic doesn't need the current context.
+ NodeProperties::ReplaceContextInput(node, jsgraph()->NoContextConstant());
+ Callable callable = CodeFactory::ClassOf(isolate());
+ node->AppendInput(zone(), graph()->start());
+ ReplaceWithStubCall(node, callable, CallDescriptor::kNoAllocate,
+ Operator::kEliminatable);
+}
+
void JSGenericLowering::LowerJSTypeOf(Node* node) {
// The typeof operator doesn't need the current context.
NodeProperties::ReplaceContextInput(node, jsgraph()->NoContextConstant());
@@ -195,9 +206,8 @@ void JSGenericLowering::LowerJSStoreProperty(Node* node) {
Node* value = NodeProperties::GetValueInput(node, 2);
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
PropertyAccess const& p = PropertyAccessOf(node->op());
- LanguageMode language_mode = p.language_mode();
Callable callable =
- CodeFactory::KeyedStoreICInOptimizedCode(isolate(), language_mode);
+ CodeFactory::KeyedStoreICInOptimizedCode(isolate(), p.language_mode());
Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
typedef StoreWithVectorDescriptor Descriptor;
node->InsertInputs(zone(), 0, 2);
@@ -230,6 +240,23 @@ void JSGenericLowering::LowerJSStoreNamed(Node* node) {
ReplaceWithStubCall(node, callable, flags);
}
+void JSGenericLowering::LowerJSStoreNamedOwn(Node* node) {
+ Node* receiver = NodeProperties::GetValueInput(node, 0);
+ Node* value = NodeProperties::GetValueInput(node, 1);
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+ StoreNamedOwnParameters const& p = StoreNamedOwnParametersOf(node->op());
+ Callable callable = CodeFactory::StoreOwnICInOptimizedCode(isolate());
+ Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
+ typedef StoreWithVectorDescriptor Descriptor;
+ node->InsertInputs(zone(), 0, 3);
+ node->ReplaceInput(Descriptor::kReceiver, receiver);
+ node->ReplaceInput(Descriptor::kName, jsgraph()->HeapConstant(p.name()));
+ node->ReplaceInput(Descriptor::kValue, value);
+ node->ReplaceInput(Descriptor::kSlot,
+ jsgraph()->SmiConstant(p.feedback().index()));
+ node->ReplaceInput(Descriptor::kVector, vector);
+ ReplaceWithStubCall(node, callable, flags);
+}
void JSGenericLowering::LowerJSStoreGlobal(Node* node) {
Node* value = NodeProperties::GetValueInput(node, 0);
@@ -481,9 +508,8 @@ void JSGenericLowering::LowerJSCreateScriptContext(Node* node) {
ReplaceWithRuntimeCall(node, Runtime::kNewScriptContext);
}
-
-void JSGenericLowering::LowerJSCallConstruct(Node* node) {
- CallConstructParameters const& p = CallConstructParametersOf(node->op());
+void JSGenericLowering::LowerJSConstruct(Node* node) {
+ ConstructParameters const& p = ConstructParametersOf(node->op());
int const arg_count = static_cast<int>(p.arity() - 2);
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
Callable callable = CodeFactory::Construct(isolate());
@@ -501,15 +527,44 @@ void JSGenericLowering::LowerJSCallConstruct(Node* node) {
NodeProperties::ChangeOp(node, common()->Call(desc));
}
-void JSGenericLowering::LowerJSCallConstructWithSpread(Node* node) {
- CallConstructWithSpreadParameters const& p =
- CallConstructWithSpreadParametersOf(node->op());
- ReplaceWithRuntimeCall(node, Runtime::kNewWithSpread,
- static_cast<int>(p.arity()));
+void JSGenericLowering::LowerJSConstructWithSpread(Node* node) {
+ ConstructWithSpreadParameters const& p =
+ ConstructWithSpreadParametersOf(node->op());
+ int const arg_count = static_cast<int>(p.arity() - 2);
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+ Callable callable = CodeFactory::ConstructWithSpread(isolate());
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), callable.descriptor(), arg_count + 1, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* stub_arity = jsgraph()->Int32Constant(arg_count);
+ Node* new_target = node->InputAt(arg_count + 1);
+ Node* receiver = jsgraph()->UndefinedConstant();
+ node->RemoveInput(arg_count + 1); // Drop new target.
+ node->InsertInput(zone(), 0, stub_code);
+ node->InsertInput(zone(), 2, new_target);
+ node->InsertInput(zone(), 3, stub_arity);
+ node->InsertInput(zone(), 4, receiver);
+ NodeProperties::ChangeOp(node, common()->Call(desc));
+}
+
+void JSGenericLowering::LowerJSCallForwardVarargs(Node* node) {
+ CallForwardVarargsParameters p = CallForwardVarargsParametersOf(node->op());
+ Callable callable = CodeFactory::CallForwardVarargs(isolate());
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+ if (p.tail_call_mode() == TailCallMode::kAllow) {
+ flags |= CallDescriptor::kSupportsTailCalls;
+ }
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), callable.descriptor(), 1, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* start_index = jsgraph()->Uint32Constant(p.start_index());
+ node->InsertInput(zone(), 0, stub_code);
+ node->InsertInput(zone(), 2, start_index);
+ NodeProperties::ChangeOp(node, common()->Call(desc));
}
-void JSGenericLowering::LowerJSCallFunction(Node* node) {
- CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+void JSGenericLowering::LowerJSCall(Node* node) {
+ CallParameters const& p = CallParametersOf(node->op());
int const arg_count = static_cast<int>(p.arity() - 2);
ConvertReceiverMode const mode = p.convert_mode();
Callable callable = CodeFactory::Call(isolate(), mode);
@@ -526,6 +581,19 @@ void JSGenericLowering::LowerJSCallFunction(Node* node) {
NodeProperties::ChangeOp(node, common()->Call(desc));
}
+void JSGenericLowering::LowerJSCallWithSpread(Node* node) {
+ CallWithSpreadParameters const& p = CallWithSpreadParametersOf(node->op());
+ int const arg_count = static_cast<int>(p.arity() - 2);
+ Callable callable = CodeFactory::CallWithSpread(isolate());
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), callable.descriptor(), arg_count + 1, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* stub_arity = jsgraph()->Int32Constant(arg_count);
+ node->InsertInput(zone(), 0, stub_code);
+ node->InsertInput(zone(), 2, stub_arity);
+ NodeProperties::ChangeOp(node, common()->Call(desc));
+}
void JSGenericLowering::LowerJSCallRuntime(Node* node) {
const CallRuntimeParameters& p = CallRuntimeParametersOf(node->op());
@@ -537,12 +605,15 @@ void JSGenericLowering::LowerJSConvertReceiver(Node* node) {
}
void JSGenericLowering::LowerJSForInNext(Node* node) {
- ReplaceWithRuntimeCall(node, Runtime::kForInNext);
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+ Callable callable = CodeFactory::ForInNext(isolate());
+ ReplaceWithStubCall(node, callable, flags);
}
-
void JSGenericLowering::LowerJSForInPrepare(Node* node) {
- ReplaceWithRuntimeCall(node, Runtime::kForInPrepare);
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+ Callable callable = CodeFactory::ForInPrepare(isolate());
+ ReplaceWithStubCall(node, callable, flags, node->op()->properties(), 3);
}
void JSGenericLowering::LowerJSLoadMessage(Node* node) {
@@ -616,6 +687,11 @@ void JSGenericLowering::LowerJSStackCheck(Node* node) {
ReplaceWithRuntimeCall(node, Runtime::kStackGuard);
}
+void JSGenericLowering::LowerJSDebugger(Node* node) {
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+ Callable callable = CodeFactory::HandleDebuggerStatement(isolate());
+ ReplaceWithStubCall(node, callable, flags);
+}
Zone* JSGenericLowering::zone() const { return graph()->zone(); }
diff --git a/deps/v8/src/compiler/js-generic-lowering.h b/deps/v8/src/compiler/js-generic-lowering.h
index 38ee431f15..88d0b45156 100644
--- a/deps/v8/src/compiler/js-generic-lowering.h
+++ b/deps/v8/src/compiler/js-generic-lowering.h
@@ -38,7 +38,8 @@ class JSGenericLowering final : public Reducer {
// Helpers to replace existing nodes with a generic call.
void ReplaceWithStubCall(Node* node, Callable c, CallDescriptor::Flags flags);
void ReplaceWithStubCall(Node* node, Callable c, CallDescriptor::Flags flags,
- Operator::Properties properties);
+ Operator::Properties properties,
+ int result_size = 1);
void ReplaceWithRuntimeCall(Node* node, Runtime::FunctionId f, int args = -1);
Zone* zone() const;
diff --git a/deps/v8/src/compiler/js-global-object-specialization.cc b/deps/v8/src/compiler/js-global-object-specialization.cc
deleted file mode 100644
index 2fe5cabc22..0000000000
--- a/deps/v8/src/compiler/js-global-object-specialization.cc
+++ /dev/null
@@ -1,294 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/js-global-object-specialization.h"
-
-#include "src/compilation-dependencies.h"
-#include "src/compiler/access-builder.h"
-#include "src/compiler/common-operator.h"
-#include "src/compiler/js-graph.h"
-#include "src/compiler/js-operator.h"
-#include "src/compiler/node-properties.h"
-#include "src/compiler/simplified-operator.h"
-#include "src/compiler/type-cache.h"
-#include "src/lookup.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-struct JSGlobalObjectSpecialization::ScriptContextTableLookupResult {
- Handle<Context> context;
- bool immutable;
- int index;
-};
-
-JSGlobalObjectSpecialization::JSGlobalObjectSpecialization(
- Editor* editor, JSGraph* jsgraph, Handle<JSGlobalObject> global_object,
- CompilationDependencies* dependencies)
- : AdvancedReducer(editor),
- jsgraph_(jsgraph),
- global_object_(global_object),
- dependencies_(dependencies),
- type_cache_(TypeCache::Get()) {}
-
-Reduction JSGlobalObjectSpecialization::Reduce(Node* node) {
- switch (node->opcode()) {
- case IrOpcode::kJSLoadGlobal:
- return ReduceJSLoadGlobal(node);
- case IrOpcode::kJSStoreGlobal:
- return ReduceJSStoreGlobal(node);
- default:
- break;
- }
- return NoChange();
-}
-
-namespace {
-
-FieldAccess ForPropertyCellValue(MachineRepresentation representation,
- Type* type, MaybeHandle<Map> map,
- Handle<Name> name) {
- WriteBarrierKind kind = kFullWriteBarrier;
- if (representation == MachineRepresentation::kTaggedSigned) {
- kind = kNoWriteBarrier;
- } else if (representation == MachineRepresentation::kTaggedPointer) {
- kind = kPointerWriteBarrier;
- }
- MachineType r = MachineType::TypeForRepresentation(representation);
- FieldAccess access = {
- kTaggedBase, PropertyCell::kValueOffset, name, map, type, r, kind};
- return access;
-}
-} // namespace
-
-Reduction JSGlobalObjectSpecialization::ReduceJSLoadGlobal(Node* node) {
- DCHECK_EQ(IrOpcode::kJSLoadGlobal, node->opcode());
- Handle<Name> name = LoadGlobalParametersOf(node->op()).name();
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
-
- // Try to lookup the name on the script context table first (lexical scoping).
- ScriptContextTableLookupResult result;
- if (LookupInScriptContextTable(name, &result)) {
- if (result.context->is_the_hole(isolate(), result.index)) return NoChange();
- Node* context = jsgraph()->HeapConstant(result.context);
- Node* value = effect = graph()->NewNode(
- javascript()->LoadContext(0, result.index, result.immutable), context,
- effect);
- ReplaceWithValue(node, value, effect);
- return Replace(value);
- }
-
- // Lookup on the global object instead. We only deal with own data
- // properties of the global object here (represented as PropertyCell).
- LookupIterator it(global_object(), name, LookupIterator::OWN);
- if (it.state() != LookupIterator::DATA) return NoChange();
- if (!it.GetHolder<JSObject>()->IsJSGlobalObject()) return NoChange();
- Handle<PropertyCell> property_cell = it.GetPropertyCell();
- PropertyDetails property_details = property_cell->property_details();
- Handle<Object> property_cell_value(property_cell->value(), isolate());
-
- // Load from non-configurable, read-only data property on the global
- // object can be constant-folded, even without deoptimization support.
- if (!property_details.IsConfigurable() && property_details.IsReadOnly()) {
- Node* value = jsgraph()->Constant(property_cell_value);
- ReplaceWithValue(node, value);
- return Replace(value);
- }
-
- // Record a code dependency on the cell if we can benefit from the
- // additional feedback, or the global property is configurable (i.e.
- // can be deleted or reconfigured to an accessor property).
- if (property_details.cell_type() != PropertyCellType::kMutable ||
- property_details.IsConfigurable()) {
- dependencies()->AssumePropertyCell(property_cell);
- }
-
- // Load from constant/undefined global property can be constant-folded.
- if (property_details.cell_type() == PropertyCellType::kConstant ||
- property_details.cell_type() == PropertyCellType::kUndefined) {
- Node* value = jsgraph()->Constant(property_cell_value);
- ReplaceWithValue(node, value);
- return Replace(value);
- }
-
- // Load from constant type cell can benefit from type feedback.
- MaybeHandle<Map> map;
- Type* property_cell_value_type = Type::NonInternal();
- MachineRepresentation representation = MachineRepresentation::kTagged;
- if (property_details.cell_type() == PropertyCellType::kConstantType) {
- // Compute proper type based on the current value in the cell.
- if (property_cell_value->IsSmi()) {
- property_cell_value_type = Type::SignedSmall();
- representation = MachineRepresentation::kTaggedSigned;
- } else if (property_cell_value->IsNumber()) {
- property_cell_value_type = Type::Number();
- representation = MachineRepresentation::kTaggedPointer;
- } else {
- Handle<Map> property_cell_value_map(
- Handle<HeapObject>::cast(property_cell_value)->map(), isolate());
- property_cell_value_type = Type::For(property_cell_value_map);
- representation = MachineRepresentation::kTaggedPointer;
-
- // We can only use the property cell value map for map check elimination
- // if it's stable, i.e. the HeapObject wasn't mutated without the cell
- // state being updated.
- if (property_cell_value_map->is_stable()) {
- dependencies()->AssumeMapStable(property_cell_value_map);
- map = property_cell_value_map;
- }
- }
- }
- Node* value = effect = graph()->NewNode(
- simplified()->LoadField(ForPropertyCellValue(
- representation, property_cell_value_type, map, name)),
- jsgraph()->HeapConstant(property_cell), effect, control);
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
-}
-
-
-Reduction JSGlobalObjectSpecialization::ReduceJSStoreGlobal(Node* node) {
- DCHECK_EQ(IrOpcode::kJSStoreGlobal, node->opcode());
- Handle<Name> name = StoreGlobalParametersOf(node->op()).name();
- Node* value = NodeProperties::GetValueInput(node, 0);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
-
- // Try to lookup the name on the script context table first (lexical scoping).
- ScriptContextTableLookupResult result;
- if (LookupInScriptContextTable(name, &result)) {
- if (result.context->is_the_hole(isolate(), result.index)) return NoChange();
- if (result.immutable) return NoChange();
- Node* context = jsgraph()->HeapConstant(result.context);
- effect = graph()->NewNode(javascript()->StoreContext(0, result.index),
- value, context, effect, control);
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
-
- // Lookup on the global object instead. We only deal with own data
- // properties of the global object here (represented as PropertyCell).
- LookupIterator it(global_object(), name, LookupIterator::OWN);
- if (it.state() != LookupIterator::DATA) return NoChange();
- if (!it.GetHolder<JSObject>()->IsJSGlobalObject()) return NoChange();
- Handle<PropertyCell> property_cell = it.GetPropertyCell();
- PropertyDetails property_details = property_cell->property_details();
- Handle<Object> property_cell_value(property_cell->value(), isolate());
-
- // Don't even bother trying to lower stores to read-only data properties.
- if (property_details.IsReadOnly()) return NoChange();
- switch (property_details.cell_type()) {
- case PropertyCellType::kUndefined: {
- return NoChange();
- }
- case PropertyCellType::kConstant: {
- // Record a code dependency on the cell, and just deoptimize if the new
- // value doesn't match the previous value stored inside the cell.
- dependencies()->AssumePropertyCell(property_cell);
- Node* check = graph()->NewNode(simplified()->ReferenceEqual(), value,
- jsgraph()->Constant(property_cell_value));
- effect =
- graph()->NewNode(simplified()->CheckIf(), check, effect, control);
- break;
- }
- case PropertyCellType::kConstantType: {
- // Record a code dependency on the cell, and just deoptimize if the new
- // values' type doesn't match the type of the previous value in the cell.
- dependencies()->AssumePropertyCell(property_cell);
- Type* property_cell_value_type;
- MachineRepresentation representation = MachineRepresentation::kTagged;
- if (property_cell_value->IsHeapObject()) {
- // We cannot do anything if the {property_cell_value}s map is no
- // longer stable.
- Handle<Map> property_cell_value_map(
- Handle<HeapObject>::cast(property_cell_value)->map(), isolate());
- if (!property_cell_value_map->is_stable()) return NoChange();
- dependencies()->AssumeMapStable(property_cell_value_map);
-
- // Check that the {value} is a HeapObject.
- value = effect = graph()->NewNode(simplified()->CheckHeapObject(),
- value, effect, control);
-
- // Check {value} map agains the {property_cell} map.
- effect =
- graph()->NewNode(simplified()->CheckMaps(
- CheckMapsFlag::kNone,
- ZoneHandleSet<Map>(property_cell_value_map)),
- value, effect, control);
- property_cell_value_type = Type::OtherInternal();
- representation = MachineRepresentation::kTaggedPointer;
- } else {
- // Check that the {value} is a Smi.
- value = effect =
- graph()->NewNode(simplified()->CheckSmi(), value, effect, control);
- property_cell_value_type = Type::SignedSmall();
- representation = MachineRepresentation::kTaggedSigned;
- }
- effect = graph()->NewNode(simplified()->StoreField(ForPropertyCellValue(
- representation, property_cell_value_type,
- MaybeHandle<Map>(), name)),
- jsgraph()->HeapConstant(property_cell), value,
- effect, control);
- break;
- }
- case PropertyCellType::kMutable: {
- // Record a code dependency on the cell, and just deoptimize if the
- // property ever becomes read-only.
- dependencies()->AssumePropertyCell(property_cell);
- effect = graph()->NewNode(
- simplified()->StoreField(ForPropertyCellValue(
- MachineRepresentation::kTagged, Type::NonInternal(),
- MaybeHandle<Map>(), name)),
- jsgraph()->HeapConstant(property_cell), value, effect, control);
- break;
- }
- }
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
-}
-
-bool JSGlobalObjectSpecialization::LookupInScriptContextTable(
- Handle<Name> name, ScriptContextTableLookupResult* result) {
- if (!name->IsString()) return false;
- Handle<ScriptContextTable> script_context_table(
- global_object()->native_context()->script_context_table(), isolate());
- ScriptContextTable::LookupResult lookup_result;
- if (!ScriptContextTable::Lookup(script_context_table,
- Handle<String>::cast(name), &lookup_result)) {
- return false;
- }
- Handle<Context> script_context = ScriptContextTable::GetContext(
- script_context_table, lookup_result.context_index);
- result->context = script_context;
- result->immutable = lookup_result.mode == CONST;
- result->index = lookup_result.slot_index;
- return true;
-}
-
-Graph* JSGlobalObjectSpecialization::graph() const {
- return jsgraph()->graph();
-}
-
-Isolate* JSGlobalObjectSpecialization::isolate() const {
- return jsgraph()->isolate();
-}
-
-CommonOperatorBuilder* JSGlobalObjectSpecialization::common() const {
- return jsgraph()->common();
-}
-
-JSOperatorBuilder* JSGlobalObjectSpecialization::javascript() const {
- return jsgraph()->javascript();
-}
-
-SimplifiedOperatorBuilder* JSGlobalObjectSpecialization::simplified() const {
- return jsgraph()->simplified();
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/js-global-object-specialization.h b/deps/v8/src/compiler/js-global-object-specialization.h
deleted file mode 100644
index 50bdd80a88..0000000000
--- a/deps/v8/src/compiler/js-global-object-specialization.h
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_JS_GLOBAL_OBJECT_SPECIALIZATION_H_
-#define V8_COMPILER_JS_GLOBAL_OBJECT_SPECIALIZATION_H_
-
-#include "src/compiler/graph-reducer.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class CompilationDependencies;
-
-namespace compiler {
-
-// Forward declarations.
-class CommonOperatorBuilder;
-class JSGraph;
-class JSOperatorBuilder;
-class SimplifiedOperatorBuilder;
-class TypeCache;
-
-// Specializes a given JSGraph to a given global object, potentially constant
-// folding some {JSLoadGlobal} nodes or strength reducing some {JSStoreGlobal}
-// nodes.
-class JSGlobalObjectSpecialization final : public AdvancedReducer {
- public:
- JSGlobalObjectSpecialization(Editor* editor, JSGraph* jsgraph,
- Handle<JSGlobalObject> global_object,
- CompilationDependencies* dependencies);
-
- Reduction Reduce(Node* node) final;
-
- private:
- Reduction ReduceJSLoadGlobal(Node* node);
- Reduction ReduceJSStoreGlobal(Node* node);
-
- struct ScriptContextTableLookupResult;
- bool LookupInScriptContextTable(Handle<Name> name,
- ScriptContextTableLookupResult* result);
-
- Graph* graph() const;
- JSGraph* jsgraph() const { return jsgraph_; }
- Isolate* isolate() const;
- CommonOperatorBuilder* common() const;
- JSOperatorBuilder* javascript() const;
- SimplifiedOperatorBuilder* simplified() const;
- Handle<JSGlobalObject> global_object() const { return global_object_; }
- CompilationDependencies* dependencies() const { return dependencies_; }
-
- JSGraph* const jsgraph_;
- Handle<JSGlobalObject> const global_object_;
- CompilationDependencies* const dependencies_;
- TypeCache const& type_cache_;
-
- DISALLOW_COPY_AND_ASSIGN(JSGlobalObjectSpecialization);
-};
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_JS_GLOBAL_OBJECT_SPECIALIZATION_H_
diff --git a/deps/v8/src/compiler/js-graph.cc b/deps/v8/src/compiler/js-graph.cc
index 1fa7861d49..b51623aca2 100644
--- a/deps/v8/src/compiler/js-graph.cc
+++ b/deps/v8/src/compiler/js-graph.cc
@@ -2,10 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/code-stubs.h"
#include "src/compiler/js-graph.h"
+
+#include "src/code-stubs.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/typer.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -66,11 +68,6 @@ Node* JSGraph::EmptyFixedArrayConstant() {
HeapConstant(factory()->empty_fixed_array()));
}
-Node* JSGraph::EmptyLiteralsArrayConstant() {
- return CACHED(kEmptyLiteralsArrayConstant,
- HeapConstant(factory()->empty_literals_array()));
-}
-
Node* JSGraph::EmptyStringConstant() {
return CACHED(kEmptyStringConstant, HeapConstant(factory()->empty_string()));
}
diff --git a/deps/v8/src/compiler/js-graph.h b/deps/v8/src/compiler/js-graph.h
index e10591998c..8f81555cb2 100644
--- a/deps/v8/src/compiler/js-graph.h
+++ b/deps/v8/src/compiler/js-graph.h
@@ -49,7 +49,6 @@ class V8_EXPORT_PRIVATE JSGraph : public NON_EXPORTED_BASE(ZoneObject) {
ArgvMode argv_mode = kArgvOnStack,
bool builtin_exit_frame = false);
Node* EmptyFixedArrayConstant();
- Node* EmptyLiteralsArrayConstant();
Node* EmptyStringConstant();
Node* FixedArrayMapConstant();
Node* FixedDoubleArrayMapConstant();
@@ -167,7 +166,6 @@ class V8_EXPORT_PRIVATE JSGraph : public NON_EXPORTED_BASE(ZoneObject) {
kCEntryStub3Constant,
kCEntryStub1WithBuiltinExitFrameConstant,
kEmptyFixedArrayConstant,
- kEmptyLiteralsArrayConstant,
kEmptyStringConstant,
kFixedArrayMapConstant,
kFixedDoubleArrayMapConstant,
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc
index 672d322a24..6f99fbb183 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.cc
+++ b/deps/v8/src/compiler/js-inlining-heuristic.cc
@@ -22,7 +22,7 @@ namespace compiler {
namespace {
int CollectFunctions(Node* node, Handle<JSFunction>* functions,
- int functions_size) {
+ int functions_size, Handle<SharedFunctionInfo>& shared) {
DCHECK_NE(0, functions_size);
HeapObjectMatcher m(node);
if (m.HasValue() && m.Value()->IsJSFunction()) {
@@ -39,23 +39,29 @@ int CollectFunctions(Node* node, Handle<JSFunction>* functions,
}
return value_input_count;
}
+ if (m.IsJSCreateClosure()) {
+ CreateClosureParameters const& p = CreateClosureParametersOf(m.op());
+ functions[0] = Handle<JSFunction>::null();
+ shared = p.shared_info();
+ return 1;
+ }
return 0;
}
-bool CanInlineFunction(Handle<JSFunction> function) {
+bool CanInlineFunction(Handle<SharedFunctionInfo> shared) {
// Built-in functions are handled by the JSBuiltinReducer.
- if (function->shared()->HasBuiltinFunctionId()) return false;
+ if (shared->HasBuiltinFunctionId()) return false;
// Only choose user code for inlining.
- if (!function->shared()->IsUserJavaScript()) return false;
+ if (!shared->IsUserJavaScript()) return false;
// Quick check on the size of the AST to avoid parsing large candidate.
- if (function->shared()->ast_node_count() > FLAG_max_inlined_nodes) {
+ if (shared->ast_node_count() > FLAG_max_inlined_nodes) {
return false;
}
// Avoid inlining across the boundary of asm.js code.
- if (function->shared()->asm_function()) return false;
+ if (shared->asm_function()) return false;
return true;
}
@@ -72,8 +78,8 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
Node* callee = node->InputAt(0);
Candidate candidate;
candidate.node = node;
- candidate.num_functions =
- CollectFunctions(callee, candidate.functions, kMaxCallPolymorphism);
+ candidate.num_functions = CollectFunctions(
+ callee, candidate.functions, kMaxCallPolymorphism, candidate.shared_info);
if (candidate.num_functions == 0) {
return NoChange();
} else if (candidate.num_functions > 1 && !FLAG_polymorphic_inlining) {
@@ -87,11 +93,14 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
// Functions marked with %SetForceInlineFlag are immediately inlined.
bool can_inline = false, force_inline = true;
for (int i = 0; i < candidate.num_functions; ++i) {
- Handle<JSFunction> function = candidate.functions[i];
- if (!function->shared()->force_inline()) {
+ Handle<SharedFunctionInfo> shared =
+ candidate.functions[i].is_null()
+ ? candidate.shared_info
+ : handle(candidate.functions[i]->shared());
+ if (!shared->force_inline()) {
force_inline = false;
}
- if (CanInlineFunction(function)) {
+ if (CanInlineFunction(shared)) {
can_inline = true;
}
}
@@ -117,11 +126,11 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
}
// Gather feedback on how often this call site has been hit before.
- if (node->opcode() == IrOpcode::kJSCallFunction) {
- CallFunctionParameters const p = CallFunctionParametersOf(node->op());
+ if (node->opcode() == IrOpcode::kJSCall) {
+ CallParameters const p = CallParametersOf(node->op());
candidate.frequency = p.frequency();
} else {
- CallConstructParameters const p = CallConstructParametersOf(node->op());
+ ConstructParameters const p = ConstructParametersOf(node->op());
candidate.frequency = p.frequency();
}
@@ -167,15 +176,18 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate) {
int const num_calls = candidate.num_functions;
Node* const node = candidate.node;
if (num_calls == 1) {
- Handle<JSFunction> function = candidate.functions[0];
- Reduction const reduction = inliner_.ReduceJSCall(node, function);
+ Handle<SharedFunctionInfo> shared =
+ candidate.functions[0].is_null()
+ ? candidate.shared_info
+ : handle(candidate.functions[0]->shared());
+ Reduction const reduction = inliner_.ReduceJSCall(node);
if (reduction.Changed()) {
- cumulative_count_ += function->shared()->ast_node_count();
+ cumulative_count_ += shared->ast_node_count();
}
return reduction;
}
- // Expand the JSCallFunction/JSCallConstruct node to a subgraph first if
+ // Expand the JSCall/JSConstruct node to a subgraph first if
// we have multiple known target functions.
DCHECK_LT(1, num_calls);
Node* calls[kMaxCallPolymorphism + 1];
@@ -192,6 +204,8 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate) {
// Create the appropriate control flow to dispatch to the cloned calls.
for (int i = 0; i < num_calls; ++i) {
+ // TODO(2206): Make comparison be based on underlying SharedFunctionInfo
+ // instead of the target JSFunction reference directly.
Node* target = jsgraph()->HeapConstant(candidate.functions[i]);
if (i != (num_calls - 1)) {
Node* check =
@@ -255,7 +269,7 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate) {
for (int i = 0; i < num_calls; ++i) {
Handle<JSFunction> function = candidate.functions[i];
Node* node = calls[i];
- Reduction const reduction = inliner_.ReduceJSCall(node, function);
+ Reduction const reduction = inliner_.ReduceJSCall(node);
if (reduction.Changed()) {
cumulative_count_ += function->shared()->ast_node_count();
}
@@ -281,9 +295,12 @@ void JSInliningHeuristic::PrintCandidates() {
PrintF(" #%d:%s, frequency:%g\n", candidate.node->id(),
candidate.node->op()->mnemonic(), candidate.frequency);
for (int i = 0; i < candidate.num_functions; ++i) {
- Handle<JSFunction> function = candidate.functions[i];
- PrintF(" - size:%d, name: %s\n", function->shared()->ast_node_count(),
- function->shared()->DebugName()->ToCString().get());
+ Handle<SharedFunctionInfo> shared =
+ candidate.functions[i].is_null()
+ ? candidate.shared_info
+ : handle(candidate.functions[i]->shared());
+ PrintF(" - size:%d, name: %s\n", shared->ast_node_count(),
+ shared->DebugName()->ToCString().get());
}
}
}
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.h b/deps/v8/src/compiler/js-inlining-heuristic.h
index aca801103a..b834cb0a06 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.h
+++ b/deps/v8/src/compiler/js-inlining-heuristic.h
@@ -37,6 +37,11 @@ class JSInliningHeuristic final : public AdvancedReducer {
struct Candidate {
Handle<JSFunction> functions[kMaxCallPolymorphism];
+ // TODO(2206): For now polymorphic inlining is treated orthogonally to
+ // inlining based on SharedFunctionInfo. This should be unified and the
+ // above array should be switched to SharedFunctionInfo instead. Currently
+ // we use {num_functions == 1 && functions[0].is_null()} as an indicator.
+ Handle<SharedFunctionInfo> shared_info;
int num_functions;
Node* node = nullptr; // The call site at which to inline.
float frequency = 0.0f; // Relative frequency of this call site.
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index 1717d4118a..c87be6c236 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -31,45 +31,45 @@ namespace compiler {
// Provides convenience accessors for the common layout of nodes having either
-// the {JSCallFunction} or the {JSCallConstruct} operator.
+// the {JSCall} or the {JSConstruct} operator.
class JSCallAccessor {
public:
explicit JSCallAccessor(Node* call) : call_(call) {
- DCHECK(call->opcode() == IrOpcode::kJSCallFunction ||
- call->opcode() == IrOpcode::kJSCallConstruct);
+ DCHECK(call->opcode() == IrOpcode::kJSCall ||
+ call->opcode() == IrOpcode::kJSConstruct);
}
Node* target() {
- // Both, {JSCallFunction} and {JSCallConstruct}, have same layout here.
+ // Both, {JSCall} and {JSConstruct}, have same layout here.
return call_->InputAt(0);
}
Node* receiver() {
- DCHECK_EQ(IrOpcode::kJSCallFunction, call_->opcode());
+ DCHECK_EQ(IrOpcode::kJSCall, call_->opcode());
return call_->InputAt(1);
}
Node* new_target() {
- DCHECK_EQ(IrOpcode::kJSCallConstruct, call_->opcode());
+ DCHECK_EQ(IrOpcode::kJSConstruct, call_->opcode());
return call_->InputAt(formal_arguments() + 1);
}
Node* frame_state() {
- // Both, {JSCallFunction} and {JSCallConstruct}, have frame state.
+ // Both, {JSCall} and {JSConstruct}, have frame state.
return NodeProperties::GetFrameStateInput(call_);
}
int formal_arguments() {
- // Both, {JSCallFunction} and {JSCallConstruct}, have two extra inputs:
- // - JSCallConstruct: Includes target function and new target.
- // - JSCallFunction: Includes target function and receiver.
+ // Both, {JSCall} and {JSConstruct}, have two extra inputs:
+ // - JSConstruct: Includes target function and new target.
+ // - JSCall: Includes target function and receiver.
return call_->op()->ValueInputCount() - 2;
}
float frequency() const {
- return (call_->opcode() == IrOpcode::kJSCallFunction)
- ? CallFunctionParametersOf(call_->op()).frequency()
- : CallConstructParametersOf(call_->op()).frequency();
+ return (call_->opcode() == IrOpcode::kJSCall)
+ ? CallParametersOf(call_->op()).frequency()
+ : ConstructParametersOf(call_->op()).frequency();
}
private:
@@ -220,9 +220,9 @@ Reduction JSInliner::InlineCall(Node* call, Node* new_target, Node* context,
}
}
-
Node* JSInliner::CreateArtificialFrameState(Node* node, Node* outer_frame_state,
int parameter_count,
+ BailoutId bailout_id,
FrameStateType frame_state_type,
Handle<SharedFunctionInfo> shared) {
const FrameStateFunctionInfo* state_info =
@@ -230,7 +230,7 @@ Node* JSInliner::CreateArtificialFrameState(Node* node, Node* outer_frame_state,
parameter_count + 1, 0, shared);
const Operator* op = common()->FrameState(
- BailoutId(-1), OutputFrameStateCombine::Ignore(), state_info);
+ bailout_id, OutputFrameStateCombine::Ignore(), state_info);
const Operator* op0 = common()->StateValues(0, SparseInputMask::Dense());
Node* node0 = graph()->NewNode(op0);
NodeVector params(local_zone_);
@@ -278,19 +278,6 @@ Node* JSInliner::CreateTailCallerFrameState(Node* node, Node* frame_state) {
namespace {
-// TODO(turbofan): Shall we move this to the NodeProperties? Or some (untyped)
-// alias analyzer?
-bool IsSame(Node* a, Node* b) {
- if (a == b) {
- return true;
- } else if (a->opcode() == IrOpcode::kCheckHeapObject) {
- return IsSame(a->InputAt(0), b);
- } else if (b->opcode() == IrOpcode::kCheckHeapObject) {
- return IsSame(a, b->InputAt(0));
- }
- return false;
-}
-
// TODO(bmeurer): Unify this with the witness helper functions in the
// js-builtin-reducer.cc once we have a better understanding of the
// map tracking we want to do, and eventually changed the CheckMaps
@@ -303,40 +290,39 @@ bool IsSame(Node* a, Node* b) {
// function, which either returns the map set from the CheckMaps or
// a singleton set from a StoreField.
bool NeedsConvertReceiver(Node* receiver, Node* effect) {
- for (Node* dominator = effect;;) {
- if (dominator->opcode() == IrOpcode::kCheckMaps &&
- IsSame(dominator->InputAt(0), receiver)) {
- // Check if all maps have the given {instance_type}.
- ZoneHandleSet<Map> const& maps =
- CheckMapsParametersOf(dominator->op()).maps();
- for (size_t i = 0; i < maps.size(); ++i) {
- if (!maps[i]->IsJSReceiverMap()) return true;
- }
+ // Check if the {receiver} is already a JSReceiver.
+ switch (receiver->opcode()) {
+ case IrOpcode::kJSConstruct:
+ case IrOpcode::kJSConstructWithSpread:
+ case IrOpcode::kJSCreate:
+ case IrOpcode::kJSCreateArguments:
+ case IrOpcode::kJSCreateArray:
+ case IrOpcode::kJSCreateClosure:
+ case IrOpcode::kJSCreateIterResultObject:
+ case IrOpcode::kJSCreateKeyValueArray:
+ case IrOpcode::kJSCreateLiteralArray:
+ case IrOpcode::kJSCreateLiteralObject:
+ case IrOpcode::kJSCreateLiteralRegExp:
+ case IrOpcode::kJSConvertReceiver:
+ case IrOpcode::kJSGetSuperConstructor:
+ case IrOpcode::kJSToObject: {
return false;
}
- switch (dominator->opcode()) {
- case IrOpcode::kStoreField: {
- FieldAccess const& access = FieldAccessOf(dominator->op());
- if (access.base_is_tagged == kTaggedBase &&
- access.offset == HeapObject::kMapOffset) {
- return true;
+ default: {
+ // We don't really care about the exact maps here, just the instance
+ // types, which don't change across potential side-effecting operations.
+ ZoneHandleSet<Map> maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &maps);
+ if (result != NodeProperties::kNoReceiverMaps) {
+ // Check if all {maps} are actually JSReceiver maps.
+ for (size_t i = 0; i < maps.size(); ++i) {
+ if (!maps[i]->IsJSReceiverMap()) return true;
}
- break;
- }
- case IrOpcode::kStoreElement:
- case IrOpcode::kStoreTypedElement:
- break;
- default: {
- DCHECK_EQ(1, dominator->op()->EffectOutputCount());
- if (dominator->op()->EffectInputCount() != 1 ||
- !dominator->op()->HasProperty(Operator::kNoWrite)) {
- // Didn't find any appropriate CheckMaps node.
- return true;
- }
- break;
+ return false;
}
+ return true;
}
- dominator = NodeProperties::GetEffectInput(dominator);
}
}
@@ -360,29 +346,120 @@ bool IsNonConstructible(Handle<SharedFunctionInfo> shared_info) {
} // namespace
-
-Reduction JSInliner::Reduce(Node* node) {
- if (!IrOpcode::IsInlineeOpcode(node->opcode())) return NoChange();
+// Determines whether the call target of the given call {node} is statically
+// known and can be used as an inlining candidate. The {SharedFunctionInfo} of
+// the call target is provided (the exact closure might be unknown).
+bool JSInliner::DetermineCallTarget(
+ Node* node, Handle<SharedFunctionInfo>& shared_info_out) {
+ DCHECK(IrOpcode::IsInlineeOpcode(node->opcode()));
+ HeapObjectMatcher match(node->InputAt(0));
// This reducer can handle both normal function calls as well a constructor
// calls whenever the target is a constant function object, as follows:
- // - JSCallFunction(target:constant, receiver, args...)
- // - JSCallConstruct(target:constant, args..., new.target)
+ // - JSCall(target:constant, receiver, args...)
+ // - JSConstruct(target:constant, args..., new.target)
+ if (match.HasValue() && match.Value()->IsJSFunction()) {
+ Handle<JSFunction> function = Handle<JSFunction>::cast(match.Value());
+
+ // Disallow cross native-context inlining for now. This means that all parts
+ // of the resulting code will operate on the same global object. This also
+ // prevents cross context leaks, where we could inline functions from a
+ // different context and hold on to that context (and closure) from the code
+ // object.
+ // TODO(turbofan): We might want to revisit this restriction later when we
+ // have a need for this, and we know how to model different native contexts
+ // in the same graph in a compositional way.
+ if (function->context()->native_context() !=
+ info_->context()->native_context()) {
+ return false;
+ }
+
+ shared_info_out = handle(function->shared());
+ return true;
+ }
+
+ // This reducer can also handle calls where the target is statically known to
+ // be the result of a closure instantiation operation, as follows:
+ // - JSCall(JSCreateClosure[shared](context), receiver, args...)
+ // - JSConstruct(JSCreateClosure[shared](context), args..., new.target)
+ if (match.IsJSCreateClosure()) {
+ CreateClosureParameters const& p = CreateClosureParametersOf(match.op());
+
+ // Disallow inlining in case the instantiation site was never run and hence
+ // the vector cell does not contain a valid feedback vector for the call
+ // target.
+ // TODO(turbofan): We might consider to eagerly create the feedback vector
+ // in such a case (in {DetermineCallContext} below) eventually.
+ FeedbackSlot slot = p.feedback().slot();
+ Handle<Cell> cell(Cell::cast(p.feedback().vector()->Get(slot)));
+ if (!cell->value()->IsFeedbackVector()) return false;
+
+ shared_info_out = p.shared_info();
+ return true;
+ }
+
+ return false;
+}
+
+// Determines statically known information about the call target (assuming that
+// the call target is known according to {DetermineCallTarget} above). The
+// following static information is provided:
+// - context : The context (as SSA value) bound by the call target.
+// - feedback_vector : The target is guaranteed to use this feedback vector.
+void JSInliner::DetermineCallContext(
+ Node* node, Node*& context_out,
+ Handle<FeedbackVector>& feedback_vector_out) {
+ DCHECK(IrOpcode::IsInlineeOpcode(node->opcode()));
HeapObjectMatcher match(node->InputAt(0));
- if (!match.HasValue() || !match.Value()->IsJSFunction()) return NoChange();
- Handle<JSFunction> function = Handle<JSFunction>::cast(match.Value());
- return ReduceJSCall(node, function);
+ if (match.HasValue() && match.Value()->IsJSFunction()) {
+ Handle<JSFunction> function = Handle<JSFunction>::cast(match.Value());
+
+ // If the target function was never invoked, its literals array might not
+ // contain a feedback vector. We ensure at this point that it is created.
+ JSFunction::EnsureLiterals(function);
+
+ // The inlinee specializes to the context from the JSFunction object.
+ context_out = jsgraph()->Constant(handle(function->context()));
+ feedback_vector_out = handle(function->feedback_vector());
+ return;
+ }
+
+ if (match.IsJSCreateClosure()) {
+ CreateClosureParameters const& p = CreateClosureParametersOf(match.op());
+
+ // Load the feedback vector of the target by looking up its vector cell at
+ // the instantiation site (we only decide to inline if it's populated).
+ FeedbackSlot slot = p.feedback().slot();
+ Handle<Cell> cell(Cell::cast(p.feedback().vector()->Get(slot)));
+ DCHECK(cell->value()->IsFeedbackVector());
+
+ // The inlinee uses the locally provided context at instantiation.
+ context_out = NodeProperties::GetContextInput(match.node());
+ feedback_vector_out = handle(FeedbackVector::cast(cell->value()));
+ return;
+ }
+
+ // Must succeed.
+ UNREACHABLE();
+}
+
+Reduction JSInliner::Reduce(Node* node) {
+ if (!IrOpcode::IsInlineeOpcode(node->opcode())) return NoChange();
+ return ReduceJSCall(node);
}
-Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
+Reduction JSInliner::ReduceJSCall(Node* node) {
DCHECK(IrOpcode::IsInlineeOpcode(node->opcode()));
+ Handle<SharedFunctionInfo> shared_info;
JSCallAccessor call(node);
- Handle<SharedFunctionInfo> shared_info(function->shared());
+
+ // Determine the call target.
+ if (!DetermineCallTarget(node, shared_info)) return NoChange();
// Inlining is only supported in the bytecode pipeline.
if (!info_->is_optimizing_from_bytecode()) {
- TRACE("Inlining %s into %s is not supported in the deprecated pipeline\n",
+ TRACE("Not inlining %s into %s due to use of the deprecated pipeline\n",
shared_info->DebugName()->ToCString().get(),
info_->shared_info()->DebugName()->ToCString().get());
return NoChange();
@@ -397,7 +474,7 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
}
// Constructor must be constructable.
- if (node->opcode() == IrOpcode::kJSCallConstruct &&
+ if (node->opcode() == IrOpcode::kJSConstruct &&
IsNonConstructible(shared_info)) {
TRACE("Not inlining %s into %s because constructor is not constructable.\n",
shared_info->DebugName()->ToCString().get(),
@@ -405,9 +482,21 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
return NoChange();
}
+ // TODO(706642): Don't inline derived class constructors for now, as the
+ // inlining logic doesn't deal properly with derived class constructors
+ // that return a primitive, i.e. it's not in sync with what the Parser
+ // and the JSConstructSub does.
+ if (node->opcode() == IrOpcode::kJSConstruct &&
+ IsDerivedConstructor(shared_info->kind())) {
+ TRACE("Not inlining %s into %s because constructor is derived.\n",
+ shared_info->DebugName()->ToCString().get(),
+ info_->shared_info()->DebugName()->ToCString().get());
+ return NoChange();
+ }
+
// Class constructors are callable, but [[Call]] will raise an exception.
// See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList ).
- if (node->opcode() == IrOpcode::kJSCallFunction &&
+ if (node->opcode() == IrOpcode::kJSCall &&
IsClassConstructor(shared_info->kind())) {
TRACE("Not inlining %s into %s because callee is a class constructor.\n",
shared_info->DebugName()->ToCString().get(),
@@ -423,22 +512,6 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
return NoChange();
}
- // Disallow cross native-context inlining for now. This means that all parts
- // of the resulting code will operate on the same global object.
- // This also prevents cross context leaks for asm.js code, where we could
- // inline functions from a different context and hold on to that context (and
- // closure) from the code object.
- // TODO(turbofan): We might want to revisit this restriction later when we
- // have a need for this, and we know how to model different native contexts
- // in the same graph in a compositional way.
- if (function->context()->native_context() !=
- info_->context()->native_context()) {
- TRACE("Not inlining %s into %s because of different native contexts\n",
- shared_info->DebugName()->ToCString().get(),
- info_->shared_info()->DebugName()->ToCString().get());
- return NoChange();
- }
-
// TODO(turbofan): TranslatedState::GetAdaptedArguments() currently relies on
// not inlining recursive functions. We might want to relax that at some
// point.
@@ -487,9 +560,9 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
}
}
- Zone zone(info_->isolate()->allocator(), ZONE_NAME);
- ParseInfo parse_info(&zone, shared_info);
- CompilationInfo info(&parse_info, Handle<JSFunction>::null());
+ ParseInfo parse_info(shared_info);
+ CompilationInfo info(parse_info.zone(), &parse_info,
+ Handle<JSFunction>::null());
if (info_->is_deoptimization_enabled()) info.MarkAsDeoptimizationEnabled();
info.MarkAsOptimizeFromBytecode();
@@ -517,8 +590,10 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
shared_info->DebugName()->ToCString().get(),
info_->shared_info()->DebugName()->ToCString().get());
- // If function was lazily compiled, its literals array may not yet be set up.
- JSFunction::EnsureLiterals(function);
+ // Determine the targets feedback vector and its context.
+ Node* context;
+ Handle<FeedbackVector> feedback_vector;
+ DetermineCallContext(node, context, feedback_vector);
// Create the subgraph for the inlinee.
Node* start;
@@ -527,9 +602,8 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
// Run the BytecodeGraphBuilder to create the subgraph.
Graph::SubgraphScope scope(graph());
BytecodeGraphBuilder graph_builder(
- &zone, shared_info, handle(function->feedback_vector()),
- BailoutId::None(), jsgraph(), call.frequency(), source_positions_,
- inlining_id);
+ parse_info.zone(), shared_info, feedback_vector, BailoutId::None(),
+ jsgraph(), call.frequency(), source_positions_, inlining_id);
graph_builder.CreateGraph(false);
// Extract the inlinee start/end nodes.
@@ -563,20 +637,38 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
Node* frame_state = call.frame_state();
Node* new_target = jsgraph()->UndefinedConstant();
- // Inline {JSCallConstruct} requires some additional magic.
- if (node->opcode() == IrOpcode::kJSCallConstruct) {
+ // Inline {JSConstruct} requires some additional magic.
+ if (node->opcode() == IrOpcode::kJSConstruct) {
+ // Swizzle the inputs of the {JSConstruct} node to look like inputs to a
+ // normal {JSCall} node so that the rest of the inlining machinery
+ // behaves as if we were dealing with a regular function invocation.
+ new_target = call.new_target(); // Retrieve new target value input.
+ node->RemoveInput(call.formal_arguments() + 1); // Drop new target.
+ node->InsertInput(graph()->zone(), 1, new_target);
+
// Insert nodes around the call that model the behavior required for a
// constructor dispatch (allocate implicit receiver and check return value).
// This models the behavior usually accomplished by our {JSConstructStub}.
// Note that the context has to be the callers context (input to call node).
+ // Also note that by splitting off the {JSCreate} piece of the constructor
+ // call, we create an observable deoptimization point after the receiver
+ // instantiation but before the invocation (i.e. inside {JSConstructStub}
+ // where execution continues at {construct_stub_create_deopt_pc_offset}).
Node* receiver = jsgraph()->TheHoleConstant(); // Implicit receiver.
if (NeedsImplicitReceiver(shared_info)) {
- Node* frame_state_before = NodeProperties::FindFrameStateBefore(node);
Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
Node* context = NodeProperties::GetContextInput(node);
- Node* create = graph()->NewNode(javascript()->Create(), call.target(),
- call.new_target(), context,
- frame_state_before, effect);
+ Node* frame_state_inside = CreateArtificialFrameState(
+ node, frame_state, call.formal_arguments(),
+ BailoutId::ConstructStubCreate(), FrameStateType::kConstructStub,
+ info.shared_info());
+ Node* create =
+ graph()->NewNode(javascript()->Create(), call.target(), new_target,
+ context, frame_state_inside, effect, control);
+ Node* success = graph()->NewNode(common()->IfSuccess(), create);
+ uncaught_subcalls.push_back(create); // Adds {IfException}.
+ NodeProperties::ReplaceControlInput(node, success);
NodeProperties::ReplaceEffectInput(node, create);
// Insert a check of the return value to determine whether the return
// value or the implicit receiver should be selected as a result of the
@@ -591,42 +683,26 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
NodeProperties::ReplaceValueInput(check, node, 0); // Fix-up input.
receiver = create; // The implicit receiver.
}
-
- // Swizzle the inputs of the {JSCallConstruct} node to look like inputs to a
- // normal {JSCallFunction} node so that the rest of the inlining machinery
- // behaves as if we were dealing with a regular function invocation.
- new_target = call.new_target(); // Retrieve new target value input.
- node->RemoveInput(call.formal_arguments() + 1); // Drop new target.
- node->InsertInput(graph()->zone(), 1, receiver);
+ node->ReplaceInput(1, receiver);
// Insert a construct stub frame into the chain of frame states. This will
// reconstruct the proper frame when deoptimizing within the constructor.
frame_state = CreateArtificialFrameState(
node, frame_state, call.formal_arguments(),
- FrameStateType::kConstructStub, info.shared_info());
+ BailoutId::ConstructStubInvoke(), FrameStateType::kConstructStub,
+ info.shared_info());
}
- // The inlinee specializes to the context from the JSFunction object.
- // TODO(turbofan): We might want to load the context from the JSFunction at
- // runtime in case we only know the SharedFunctionInfo once we have dynamic
- // type feedback in the compiler.
- Node* context = jsgraph()->Constant(handle(function->context()));
-
// Insert a JSConvertReceiver node for sloppy callees. Note that the context
- // passed into this node has to be the callees context (loaded above). Note
- // that the frame state passed to the JSConvertReceiver must be the frame
- // state _before_ the call; it is not necessary to fiddle with the receiver
- // in that frame state tho, as the conversion of the receiver can be repeated
- // any number of times, it's not observable.
- if (node->opcode() == IrOpcode::kJSCallFunction &&
+ // passed into this node has to be the callees context (loaded above).
+ if (node->opcode() == IrOpcode::kJSCall &&
is_sloppy(shared_info->language_mode()) && !shared_info->native()) {
Node* effect = NodeProperties::GetEffectInput(node);
if (NeedsConvertReceiver(call.receiver(), effect)) {
- const CallFunctionParameters& p = CallFunctionParametersOf(node->op());
- Node* frame_state_before = NodeProperties::FindFrameStateBefore(node);
- Node* convert = effect = graph()->NewNode(
- javascript()->ConvertReceiver(p.convert_mode()), call.receiver(),
- context, frame_state_before, effect, start);
+ const CallParameters& p = CallParametersOf(node->op());
+ Node* convert = effect =
+ graph()->NewNode(javascript()->ConvertReceiver(p.convert_mode()),
+ call.receiver(), context, effect, start);
NodeProperties::ReplaceValueInput(node, convert, 1);
NodeProperties::ReplaceEffectInput(node, effect);
}
@@ -639,8 +715,8 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
// the case when the outermost function inlines a tail call (it should remove
// potential arguments adaptor frame that belongs to outermost function when
// deopt happens).
- if (node->opcode() == IrOpcode::kJSCallFunction) {
- const CallFunctionParameters& p = CallFunctionParametersOf(node->op());
+ if (node->opcode() == IrOpcode::kJSCall) {
+ const CallParameters& p = CallParametersOf(node->op());
if (p.tail_call_mode() == TailCallMode::kAllow) {
frame_state = CreateTailCallerFrameState(node, frame_state);
}
@@ -654,7 +730,7 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
DCHECK_EQ(parameter_count, start->op()->ValueOutputCount() - 5);
if (call.formal_arguments() != parameter_count) {
frame_state = CreateArtificialFrameState(
- node, frame_state, call.formal_arguments(),
+ node, frame_state, call.formal_arguments(), BailoutId::None(),
FrameStateType::kArgumentsAdaptor, shared_info);
}
diff --git a/deps/v8/src/compiler/js-inlining.h b/deps/v8/src/compiler/js-inlining.h
index 9bb8ec4643..e40e6a745e 100644
--- a/deps/v8/src/compiler/js-inlining.h
+++ b/deps/v8/src/compiler/js-inlining.h
@@ -11,7 +11,7 @@
namespace v8 {
namespace internal {
-// Forward declarations.
+class BailoutId;
class CompilationInfo;
namespace compiler {
@@ -36,7 +36,7 @@ class JSInliner final : public AdvancedReducer {
// Can be used by inlining heuristics or by testing code directly, without
// using the above generic reducer interface of the inlining machinery.
- Reduction ReduceJSCall(Node* node, Handle<JSFunction> function);
+ Reduction ReduceJSCall(Node* node);
private:
CommonOperatorBuilder* common() const;
@@ -50,8 +50,13 @@ class JSInliner final : public AdvancedReducer {
JSGraph* const jsgraph_;
SourcePositionTable* const source_positions_;
+ bool DetermineCallTarget(Node* node,
+ Handle<SharedFunctionInfo>& shared_info_out);
+ void DetermineCallContext(Node* node, Node*& context_out,
+ Handle<FeedbackVector>& feedback_vector_out);
+
Node* CreateArtificialFrameState(Node* node, Node* outer_frame_state,
- int parameter_count,
+ int parameter_count, BailoutId bailout_id,
FrameStateType frame_state_type,
Handle<SharedFunctionInfo> shared);
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.cc b/deps/v8/src/compiler/js-intrinsic-lowering.cc
index 2a7a3a3896..8a866eeec4 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.cc
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.cc
@@ -48,6 +48,8 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceIsInstanceType(node, JS_ARRAY_TYPE);
case Runtime::kInlineIsTypedArray:
return ReduceIsInstanceType(node, JS_TYPED_ARRAY_TYPE);
+ case Runtime::kInlineIsJSProxy:
+ return ReduceIsInstanceType(node, JS_PROXY_TYPE);
case Runtime::kInlineIsJSReceiver:
return ReduceIsJSReceiver(node);
case Runtime::kInlineIsSmi:
@@ -56,8 +58,6 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceFixedArrayGet(node);
case Runtime::kInlineFixedArraySet:
return ReduceFixedArraySet(node);
- case Runtime::kInlineRegExpExec:
- return ReduceRegExpExec(node);
case Runtime::kInlineSubString:
return ReduceSubString(node);
case Runtime::kInlineToInteger:
@@ -74,6 +74,27 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceCall(node);
case Runtime::kInlineGetSuperConstructor:
return ReduceGetSuperConstructor(node);
+ case Runtime::kInlineArrayBufferViewGetByteLength:
+ return ReduceArrayBufferViewField(
+ node, AccessBuilder::ForJSArrayBufferViewByteLength());
+ case Runtime::kInlineArrayBufferViewGetByteOffset:
+ return ReduceArrayBufferViewField(
+ node, AccessBuilder::ForJSArrayBufferViewByteOffset());
+ case Runtime::kInlineMaxSmi:
+ return ReduceMaxSmi(node);
+ case Runtime::kInlineTypedArrayGetLength:
+ return ReduceArrayBufferViewField(node,
+ AccessBuilder::ForJSTypedArrayLength());
+ case Runtime::kInlineTypedArrayMaxSizeInHeap:
+ return ReduceTypedArrayMaxSizeInHeap(node);
+ case Runtime::kInlineJSCollectionGetTable:
+ return ReduceJSCollectionGetTable(node);
+ case Runtime::kInlineStringGetRawHashField:
+ return ReduceStringGetRawHashField(node);
+ case Runtime::kInlineTheHole:
+ return ReduceTheHole(node);
+ case Runtime::kInlineClassOf:
+ return ReduceClassOf(node);
default:
break;
}
@@ -247,11 +268,6 @@ Reduction JSIntrinsicLowering::ReduceFixedArraySet(Node* node) {
}
-Reduction JSIntrinsicLowering::ReduceRegExpExec(Node* node) {
- return Change(node, CodeFactory::RegExpExec(isolate()), 4);
-}
-
-
Reduction JSIntrinsicLowering::ReduceSubString(Node* node) {
return Change(node, CodeFactory::SubString(isolate()), 3);
}
@@ -290,9 +306,9 @@ Reduction JSIntrinsicLowering::ReduceToString(Node* node) {
Reduction JSIntrinsicLowering::ReduceCall(Node* node) {
size_t const arity = CallRuntimeParametersOf(node->op()).arity();
NodeProperties::ChangeOp(
- node, javascript()->CallFunction(arity, 0.0f, VectorSlotPair(),
- ConvertReceiverMode::kAny,
- TailCallMode::kDisallow));
+ node,
+ javascript()->Call(arity, 0.0f, VectorSlotPair(),
+ ConvertReceiverMode::kAny, TailCallMode::kDisallow));
return Changed(node);
}
@@ -307,6 +323,75 @@ Reduction JSIntrinsicLowering::ReduceGetSuperConstructor(Node* node) {
active_function_map, effect, control);
}
+Reduction JSIntrinsicLowering::ReduceArrayBufferViewField(
+ Node* node, FieldAccess const& access) {
+ Node* receiver = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Load the {receiver}s field.
+ Node* value = effect = graph()->NewNode(simplified()->LoadField(access),
+ receiver, effect, control);
+
+ // Check if the {receiver}s buffer was neutered.
+ Node* receiver_buffer = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
+ receiver, effect, control);
+ Node* check = effect = graph()->NewNode(
+ simplified()->ArrayBufferWasNeutered(), receiver_buffer, effect, control);
+
+ // Default to zero if the {receiver}s buffer was neutered.
+ value = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
+ check, jsgraph()->ZeroConstant(), value);
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+Reduction JSIntrinsicLowering::ReduceMaxSmi(Node* node) {
+ Node* value = jsgraph()->Constant(Smi::kMaxValue);
+ ReplaceWithValue(node, value);
+ return Replace(value);
+}
+
+Reduction JSIntrinsicLowering::ReduceTypedArrayMaxSizeInHeap(Node* node) {
+ Node* value = jsgraph()->Constant(FLAG_typed_array_max_size_in_heap);
+ ReplaceWithValue(node, value);
+ return Replace(value);
+}
+
+Reduction JSIntrinsicLowering::ReduceJSCollectionGetTable(Node* node) {
+ Node* collection = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ return Change(node,
+ simplified()->LoadField(AccessBuilder::ForJSCollectionTable()),
+ collection, effect, control);
+}
+
+Reduction JSIntrinsicLowering::ReduceStringGetRawHashField(Node* node) {
+ Node* string = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ return Change(node,
+ simplified()->LoadField(AccessBuilder::ForNameHashField()),
+ string, effect, control);
+}
+
+Reduction JSIntrinsicLowering::ReduceTheHole(Node* node) {
+ Node* value = jsgraph()->TheHoleConstant();
+ ReplaceWithValue(node, value);
+ return Replace(value);
+}
+
+Reduction JSIntrinsicLowering::ReduceClassOf(Node* node) {
+ RelaxEffectsAndControls(node);
+ node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, javascript()->ClassOf());
+ return Changed(node);
+}
+
Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op, Node* a,
Node* b) {
RelaxControls(node);
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.h b/deps/v8/src/compiler/js-intrinsic-lowering.h
index 2bc7cafa3d..f3e3e2ab3b 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.h
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.h
@@ -21,6 +21,7 @@ namespace compiler {
// Forward declarations.
class CommonOperatorBuilder;
+struct FieldAccess;
class JSOperatorBuilder;
class JSGraph;
class SimplifiedOperatorBuilder;
@@ -51,7 +52,6 @@ class V8_EXPORT_PRIVATE JSIntrinsicLowering final
Reduction ReduceIsSmi(Node* node);
Reduction ReduceFixedArrayGet(Node* node);
Reduction ReduceFixedArraySet(Node* node);
- Reduction ReduceRegExpExec(Node* node);
Reduction ReduceSubString(Node* node);
Reduction ReduceToInteger(Node* node);
Reduction ReduceToLength(Node* node);
@@ -61,6 +61,22 @@ class V8_EXPORT_PRIVATE JSIntrinsicLowering final
Reduction ReduceCall(Node* node);
Reduction ReduceGetSuperConstructor(Node* node);
+ // TODO(turbofan): typedarray.js support; drop once TypedArrays are
+ // converted to proper CodeStubAssembler based builtins.
+ Reduction ReduceArrayBufferViewField(Node* node, FieldAccess const& access);
+ Reduction ReduceMaxSmi(Node* node);
+ Reduction ReduceTypedArrayMaxSizeInHeap(Node* node);
+
+ // TODO(turbofan): collection.js support; drop once Maps and Sets are
+ // converted to proper CodeStubAssembler based builtins.
+ Reduction ReduceJSCollectionGetTable(Node* node);
+ Reduction ReduceStringGetRawHashField(Node* node);
+ Reduction ReduceTheHole(Node* node);
+
+ // TODO(turbofan): JavaScript builtins support; drop once all uses of
+ // %_ClassOf in JavaScript builtins are eliminated.
+ Reduction ReduceClassOf(Node* node);
+
Reduction Change(Node* node, const Operator* op);
Reduction Change(Node* node, const Operator* op, Node* a, Node* b);
Reduction Change(Node* node, const Operator* op, Node* a, Node* b, Node* c);
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index 14ba905e29..c32ee269a0 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -55,6 +55,12 @@ bool HasOnlyStringMaps(T const& maps) {
} // namespace
+struct JSNativeContextSpecialization::ScriptContextTableLookupResult {
+ Handle<Context> context;
+ bool immutable;
+ int index;
+};
+
JSNativeContextSpecialization::JSNativeContextSpecialization(
Editor* editor, JSGraph* jsgraph, Flags flags,
Handle<Context> native_context, CompilationDependencies* dependencies,
@@ -62,6 +68,8 @@ JSNativeContextSpecialization::JSNativeContextSpecialization(
: AdvancedReducer(editor),
jsgraph_(jsgraph),
flags_(flags),
+ global_object_(native_context->global_object()),
+ global_proxy_(JSGlobalProxy::cast(native_context->global_proxy())),
native_context_(native_context),
dependencies_(dependencies),
zone_(zone),
@@ -69,12 +77,20 @@ JSNativeContextSpecialization::JSNativeContextSpecialization(
Reduction JSNativeContextSpecialization::Reduce(Node* node) {
switch (node->opcode()) {
+ case IrOpcode::kJSAdd:
+ return ReduceJSAdd(node);
+ case IrOpcode::kJSGetSuperConstructor:
+ return ReduceJSGetSuperConstructor(node);
case IrOpcode::kJSInstanceOf:
return ReduceJSInstanceOf(node);
case IrOpcode::kJSOrdinaryHasInstance:
return ReduceJSOrdinaryHasInstance(node);
case IrOpcode::kJSLoadContext:
return ReduceJSLoadContext(node);
+ case IrOpcode::kJSLoadGlobal:
+ return ReduceJSLoadGlobal(node);
+ case IrOpcode::kJSStoreGlobal:
+ return ReduceJSStoreGlobal(node);
case IrOpcode::kJSLoadNamed:
return ReduceJSLoadNamed(node);
case IrOpcode::kJSStoreNamed:
@@ -83,6 +99,8 @@ Reduction JSNativeContextSpecialization::Reduce(Node* node) {
return ReduceJSLoadProperty(node);
case IrOpcode::kJSStoreProperty:
return ReduceJSStoreProperty(node);
+ case IrOpcode::kJSStoreNamedOwn:
+ return ReduceJSStoreNamedOwn(node);
case IrOpcode::kJSStoreDataPropertyInLiteral:
return ReduceJSStoreDataPropertyInLiteral(node);
default:
@@ -91,6 +109,65 @@ Reduction JSNativeContextSpecialization::Reduce(Node* node) {
return NoChange();
}
+Reduction JSNativeContextSpecialization::ReduceJSAdd(Node* node) {
+ // TODO(turbofan): This has to run together with the inlining and
+ // native context specialization to be able to leverage the string
+ // constant-folding for optimizing property access, but we should
+ // nevertheless find a better home for this at some point.
+ DCHECK_EQ(IrOpcode::kJSAdd, node->opcode());
+
+ // Constant-fold string concatenation.
+ HeapObjectBinopMatcher m(node);
+ if (m.left().HasValue() && m.left().Value()->IsString() &&
+ m.right().HasValue() && m.right().Value()->IsString()) {
+ Handle<String> left = Handle<String>::cast(m.left().Value());
+ Handle<String> right = Handle<String>::cast(m.right().Value());
+ if (left->length() + right->length() <= String::kMaxLength) {
+ Handle<String> result =
+ factory()->NewConsString(left, right).ToHandleChecked();
+ Node* value = jsgraph()->HeapConstant(result);
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+ }
+ return NoChange();
+}
+
+Reduction JSNativeContextSpecialization::ReduceJSGetSuperConstructor(
+ Node* node) {
+ DCHECK_EQ(IrOpcode::kJSGetSuperConstructor, node->opcode());
+ Node* constructor = NodeProperties::GetValueInput(node, 0);
+
+ // If deoptimization is disabled, we cannot optimize.
+ if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+
+ // Check if the input is a known JSFunction.
+ HeapObjectMatcher m(constructor);
+ if (!m.HasValue()) return NoChange();
+ Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
+ Handle<Map> function_map(function->map(), isolate());
+ Handle<Object> function_prototype(function_map->prototype(), isolate());
+
+ // We can constant-fold the super constructor access if the
+ // {function}s map is stable, i.e. we can use a code dependency
+ // to guard against [[Prototype]] changes of {function}.
+ if (function_map->is_stable()) {
+ Node* value = jsgraph()->Constant(function_prototype);
+ dependencies()->AssumeMapStable(function_map);
+ if (function_prototype->IsConstructor()) {
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ } else {
+ node->InsertInput(graph()->zone(), 0, value);
+ NodeProperties::ChangeOp(
+ node, javascript()->CallRuntime(Runtime::kThrowNotSuperConstructor));
+ return Changed(node);
+ }
+ }
+
+ return NoChange();
+}
+
Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
DCHECK_EQ(IrOpcode::kJSInstanceOf, node->opcode());
Node* object = NodeProperties::GetValueInput(node, 0);
@@ -161,8 +238,8 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
node->ReplaceInput(5, effect);
NodeProperties::ChangeOp(
node,
- javascript()->CallFunction(3, 0.0f, VectorSlotPair(),
- ConvertReceiverMode::kNotNullOrUndefined));
+ javascript()->Call(3, 0.0f, VectorSlotPair(),
+ ConvertReceiverMode::kNotNullOrUndefined));
// Rewire the value uses of {node} to ToBoolean conversion of the result.
Node* value = graph()->NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
@@ -218,24 +295,292 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadContext(Node* node) {
return NoChange();
}
+namespace {
+
+FieldAccess ForPropertyCellValue(MachineRepresentation representation,
+ Type* type, MaybeHandle<Map> map,
+ Handle<Name> name) {
+ WriteBarrierKind kind = kFullWriteBarrier;
+ if (representation == MachineRepresentation::kTaggedSigned) {
+ kind = kNoWriteBarrier;
+ } else if (representation == MachineRepresentation::kTaggedPointer) {
+ kind = kPointerWriteBarrier;
+ }
+ MachineType r = MachineType::TypeForRepresentation(representation);
+ FieldAccess access = {
+ kTaggedBase, PropertyCell::kValueOffset, name, map, type, r, kind};
+ return access;
+}
+
+} // namespace
+
+Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
+ Node* node, Node* receiver, Node* value, Handle<Name> name,
+ AccessMode access_mode, Node* index) {
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Lookup on the global object. We only deal with own data properties
+ // of the global object here (represented as PropertyCell).
+ LookupIterator it(global_object(), name, LookupIterator::OWN);
+ it.TryLookupCachedProperty();
+ if (it.state() != LookupIterator::DATA) return NoChange();
+ if (!it.GetHolder<JSObject>()->IsJSGlobalObject()) return NoChange();
+ Handle<PropertyCell> property_cell = it.GetPropertyCell();
+ PropertyDetails property_details = property_cell->property_details();
+ Handle<Object> property_cell_value(property_cell->value(), isolate());
+ PropertyCellType property_cell_type = property_details.cell_type();
+
+ // We have additional constraints for stores.
+ if (access_mode == AccessMode::kStore) {
+ if (property_details.IsReadOnly()) {
+ // Don't even bother trying to lower stores to read-only data properties.
+ return NoChange();
+ } else if (property_cell_type == PropertyCellType::kUndefined) {
+ // There's no fast-path for dealing with undefined property cells.
+ return NoChange();
+ } else if (property_cell_type == PropertyCellType::kConstantType) {
+ // There's also no fast-path to store to a global cell which pretended
+ // to be stable, but is no longer stable now.
+ if (property_cell_value->IsHeapObject() &&
+ !Handle<HeapObject>::cast(property_cell_value)->map()->is_stable()) {
+ return NoChange();
+ }
+ }
+ }
+
+ // Ensure that {index} matches the specified {name} (if {index} is given).
+ if (index != nullptr) {
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(), index,
+ jsgraph()->HeapConstant(name));
+ effect = graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+ }
+
+ // Check if we have a {receiver} to validate. If so, we need to check that
+ // the {receiver} is actually the JSGlobalProxy for the native context that
+ // we are specializing to.
+ if (receiver != nullptr) {
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(), receiver,
+ jsgraph()->HeapConstant(global_proxy()));
+ effect = graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+ }
+
+ if (access_mode == AccessMode::kLoad) {
+ // Load from non-configurable, read-only data property on the global
+ // object can be constant-folded, even without deoptimization support.
+ if (!property_details.IsConfigurable() && property_details.IsReadOnly()) {
+ value = jsgraph()->Constant(property_cell_value);
+ } else {
+ // Record a code dependency on the cell if we can benefit from the
+ // additional feedback, or the global property is configurable (i.e.
+ // can be deleted or reconfigured to an accessor property).
+ if (property_details.cell_type() != PropertyCellType::kMutable ||
+ property_details.IsConfigurable()) {
+ dependencies()->AssumePropertyCell(property_cell);
+ }
+
+ // Load from constant/undefined global property can be constant-folded.
+ if (property_details.cell_type() == PropertyCellType::kConstant ||
+ property_details.cell_type() == PropertyCellType::kUndefined) {
+ value = jsgraph()->Constant(property_cell_value);
+ } else {
+ // Load from constant type cell can benefit from type feedback.
+ MaybeHandle<Map> map;
+ Type* property_cell_value_type = Type::NonInternal();
+ MachineRepresentation representation = MachineRepresentation::kTagged;
+ if (property_details.cell_type() == PropertyCellType::kConstantType) {
+ // Compute proper type based on the current value in the cell.
+ if (property_cell_value->IsSmi()) {
+ property_cell_value_type = Type::SignedSmall();
+ representation = MachineRepresentation::kTaggedSigned;
+ } else if (property_cell_value->IsNumber()) {
+ property_cell_value_type = Type::Number();
+ representation = MachineRepresentation::kTaggedPointer;
+ } else {
+ Handle<Map> property_cell_value_map(
+ Handle<HeapObject>::cast(property_cell_value)->map(),
+ isolate());
+ property_cell_value_type = Type::For(property_cell_value_map);
+ representation = MachineRepresentation::kTaggedPointer;
+
+ // We can only use the property cell value map for map check
+ // elimination if it's stable, i.e. the HeapObject wasn't
+ // mutated without the cell state being updated.
+ if (property_cell_value_map->is_stable()) {
+ dependencies()->AssumeMapStable(property_cell_value_map);
+ map = property_cell_value_map;
+ }
+ }
+ }
+ value = effect = graph()->NewNode(
+ simplified()->LoadField(ForPropertyCellValue(
+ representation, property_cell_value_type, map, name)),
+ jsgraph()->HeapConstant(property_cell), effect, control);
+ }
+ }
+ } else {
+ DCHECK_EQ(AccessMode::kStore, access_mode);
+ DCHECK(!property_details.IsReadOnly());
+ switch (property_details.cell_type()) {
+ case PropertyCellType::kUndefined: {
+ UNREACHABLE();
+ break;
+ }
+ case PropertyCellType::kConstant: {
+ // Record a code dependency on the cell, and just deoptimize if the new
+ // value doesn't match the previous value stored inside the cell.
+ dependencies()->AssumePropertyCell(property_cell);
+ Node* check =
+ graph()->NewNode(simplified()->ReferenceEqual(), value,
+ jsgraph()->Constant(property_cell_value));
+ effect =
+ graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+ break;
+ }
+ case PropertyCellType::kConstantType: {
+ // Record a code dependency on the cell, and just deoptimize if the new
+ // values' type doesn't match the type of the previous value in the
+ // cell.
+ dependencies()->AssumePropertyCell(property_cell);
+ Type* property_cell_value_type;
+ MachineRepresentation representation = MachineRepresentation::kTagged;
+ if (property_cell_value->IsHeapObject()) {
+ // We cannot do anything if the {property_cell_value}s map is no
+ // longer stable.
+ Handle<Map> property_cell_value_map(
+ Handle<HeapObject>::cast(property_cell_value)->map(), isolate());
+ DCHECK(property_cell_value_map->is_stable());
+ dependencies()->AssumeMapStable(property_cell_value_map);
+
+ // Check that the {value} is a HeapObject.
+ value = effect = graph()->NewNode(simplified()->CheckHeapObject(),
+ value, effect, control);
+
+ // Check {value} map agains the {property_cell} map.
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(
+ CheckMapsFlag::kNone,
+ ZoneHandleSet<Map>(property_cell_value_map)),
+ value, effect, control);
+ property_cell_value_type = Type::OtherInternal();
+ representation = MachineRepresentation::kTaggedPointer;
+ } else {
+ // Check that the {value} is a Smi.
+ value = effect = graph()->NewNode(simplified()->CheckSmi(), value,
+ effect, control);
+ property_cell_value_type = Type::SignedSmall();
+ representation = MachineRepresentation::kTaggedSigned;
+ }
+ effect = graph()->NewNode(simplified()->StoreField(ForPropertyCellValue(
+ representation, property_cell_value_type,
+ MaybeHandle<Map>(), name)),
+ jsgraph()->HeapConstant(property_cell), value,
+ effect, control);
+ break;
+ }
+ case PropertyCellType::kMutable: {
+ // Record a code dependency on the cell, and just deoptimize if the
+ // property ever becomes read-only.
+ dependencies()->AssumePropertyCell(property_cell);
+ effect = graph()->NewNode(
+ simplified()->StoreField(ForPropertyCellValue(
+ MachineRepresentation::kTagged, Type::NonInternal(),
+ MaybeHandle<Map>(), name)),
+ jsgraph()->HeapConstant(property_cell), value, effect, control);
+ break;
+ }
+ }
+ }
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+Reduction JSNativeContextSpecialization::ReduceJSLoadGlobal(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSLoadGlobal, node->opcode());
+ Handle<Name> name = LoadGlobalParametersOf(node->op()).name();
+ Node* effect = NodeProperties::GetEffectInput(node);
+
+ // Try to lookup the name on the script context table first (lexical scoping).
+ ScriptContextTableLookupResult result;
+ if (LookupInScriptContextTable(name, &result)) {
+ if (result.context->is_the_hole(isolate(), result.index)) return NoChange();
+ Node* context = jsgraph()->HeapConstant(result.context);
+ Node* value = effect = graph()->NewNode(
+ javascript()->LoadContext(0, result.index, result.immutable), context,
+ effect);
+ ReplaceWithValue(node, value, effect);
+ return Replace(value);
+ }
+
+ // Not much we can do if deoptimization support is disabled.
+ if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+
+ // Lookup the {name} on the global object instead.
+ return ReduceGlobalAccess(node, nullptr, nullptr, name, AccessMode::kLoad);
+}
+
+Reduction JSNativeContextSpecialization::ReduceJSStoreGlobal(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSStoreGlobal, node->opcode());
+ Handle<Name> name = StoreGlobalParametersOf(node->op()).name();
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Try to lookup the name on the script context table first (lexical scoping).
+ ScriptContextTableLookupResult result;
+ if (LookupInScriptContextTable(name, &result)) {
+ if (result.context->is_the_hole(isolate(), result.index)) return NoChange();
+ if (result.immutable) return NoChange();
+ Node* context = jsgraph()->HeapConstant(result.context);
+ effect = graph()->NewNode(javascript()->StoreContext(0, result.index),
+ value, context, effect, control);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
+
+ // Not much we can do if deoptimization support is disabled.
+ if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+
+ // Lookup the {name} on the global object instead.
+ return ReduceGlobalAccess(node, nullptr, value, name, AccessMode::kStore);
+}
+
Reduction JSNativeContextSpecialization::ReduceNamedAccess(
Node* node, Node* value, MapHandleList const& receiver_maps,
Handle<Name> name, AccessMode access_mode, LanguageMode language_mode,
- Handle<FeedbackVector> vector, FeedbackVectorSlot slot, Node* index) {
+ Handle<FeedbackVector> vector, FeedbackSlot slot, Node* index) {
DCHECK(node->opcode() == IrOpcode::kJSLoadNamed ||
node->opcode() == IrOpcode::kJSStoreNamed ||
node->opcode() == IrOpcode::kJSLoadProperty ||
- node->opcode() == IrOpcode::kJSStoreProperty);
+ node->opcode() == IrOpcode::kJSStoreProperty ||
+ node->opcode() == IrOpcode::kJSStoreNamedOwn);
Node* receiver = NodeProperties::GetValueInput(node, 0);
Node* context = NodeProperties::GetContextInput(node);
- Node* frame_state_eager = NodeProperties::FindFrameStateBefore(node);
- Node* frame_state_lazy = NodeProperties::GetFrameStateInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
// Not much we can do if deoptimization support is disabled.
if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+ // Check if we have an access o.x or o.x=v where o is the current
+ // native contexts' global proxy, and turn that into a direct access
+ // to the current native contexts' global object instead.
+ if (receiver_maps.length() == 1) {
+ Handle<Map> receiver_map = receiver_maps.first();
+ if (receiver_map->IsJSGlobalProxyMap()) {
+ Object* maybe_constructor = receiver_map->GetConstructor();
+ // Detached global proxies have |null| as their constructor.
+ if (maybe_constructor->IsJSFunction() &&
+ JSFunction::cast(maybe_constructor)->native_context() ==
+ *native_context()) {
+ return ReduceGlobalAccess(node, receiver, value, name, access_mode,
+ index);
+ }
+ }
+ }
+
// Compute property access infos for the receiver maps.
AccessInfoFactory access_info_factory(dependencies(), native_context(),
graph()->zone());
@@ -257,7 +602,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
// We do not handle generic calls in try blocks.
if (is_exceptional) return NoChange();
// We only handle the generic store IC case.
- if (vector->GetKind(slot) != FeedbackVectorSlotKind::STORE_IC) {
+ if (!vector->IsStoreIC(slot)) {
return NoChange();
}
}
@@ -297,7 +642,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
// Generate the actual property access.
ValueEffectControl continuation = BuildPropertyAccess(
- receiver, value, context, frame_state_lazy, effect, control, name,
+ receiver, value, context, frame_state, effect, control, name,
access_info, access_mode, language_mode, vector, slot);
value = continuation.value();
effect = continuation.effect();
@@ -397,20 +742,14 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
this_effect =
graph()->NewNode(common()->EffectPhi(this_control_count),
this_control_count + 1, &this_effects.front());
-
- // TODO(turbofan): The effect/control linearization will not find a
- // FrameState after the EffectPhi that is generated above.
- this_effect =
- graph()->NewNode(common()->Checkpoint(), frame_state_eager,
- this_effect, this_control);
}
}
// Generate the actual property access.
- ValueEffectControl continuation = BuildPropertyAccess(
- this_receiver, this_value, context, frame_state_lazy, this_effect,
- this_control, name, access_info, access_mode, language_mode, vector,
- slot);
+ ValueEffectControl continuation =
+ BuildPropertyAccess(this_receiver, this_value, context, frame_state,
+ this_effect, this_control, name, access_info,
+ access_mode, language_mode, vector, slot);
values.push_back(continuation.value());
effects.push_back(continuation.effect());
controls.push_back(continuation.control());
@@ -446,10 +785,20 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccessFromNexus(
Node* node, Node* value, FeedbackNexus const& nexus, Handle<Name> name,
AccessMode access_mode, LanguageMode language_mode) {
DCHECK(node->opcode() == IrOpcode::kJSLoadNamed ||
- node->opcode() == IrOpcode::kJSStoreNamed);
+ node->opcode() == IrOpcode::kJSStoreNamed ||
+ node->opcode() == IrOpcode::kJSStoreNamedOwn);
Node* const receiver = NodeProperties::GetValueInput(node, 0);
Node* const effect = NodeProperties::GetEffectInput(node);
+ if (flags() & kDeoptimizationEnabled) {
+ // Check if we are accessing the current native contexts' global proxy.
+ HeapObjectMatcher m(receiver);
+ if (m.HasValue() && m.Value().is_identical_to(global_proxy())) {
+ // Optimize accesses to the current native contexts' global proxy.
+ return ReduceGlobalAccess(node, nullptr, value, name, access_mode);
+ }
+ }
+
// Check if the {nexus} reports type feedback for the IC.
if (nexus.IsUninitialized()) {
if ((flags() & kDeoptimizationEnabled) &&
@@ -480,7 +829,6 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccessFromNexus(
language_mode, nexus.vector_handle(), nexus.slot());
}
-
Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadNamed, node->opcode());
NamedAccess const& p = NamedAccessOf(node->op());
@@ -542,6 +890,19 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreNamed(Node* node) {
AccessMode::kStore, p.language_mode());
}
+Reduction JSNativeContextSpecialization::ReduceJSStoreNamedOwn(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSStoreNamedOwn, node->opcode());
+ StoreNamedOwnParameters const& p = StoreNamedOwnParametersOf(node->op());
+ Node* const value = NodeProperties::GetValueInput(node, 1);
+
+ // Extract receiver maps from the IC using the StoreOwnICNexus.
+ if (!p.feedback().IsValid()) return NoChange();
+ StoreOwnICNexus nexus(p.feedback().vector(), p.feedback().slot());
+
+ // Try to lower the creation of a named property based on the {receiver_maps}.
+ return ReduceNamedAccessFromNexus(node, value, nexus, p.name(),
+ AccessMode::kStoreInLiteral, STRICT);
+}
Reduction JSNativeContextSpecialization::ReduceElementAccess(
Node* node, Node* index, Node* value, MapHandleList const& receiver_maps,
@@ -746,11 +1107,6 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
this_effect =
graph()->NewNode(common()->EffectPhi(this_control_count),
this_control_count + 1, &this_effects.front());
-
- // TODO(turbofan): The effect/control linearization will not find a
- // FrameState after the EffectPhi that is generated above.
- this_effect = graph()->NewNode(common()->Checkpoint(), frame_state,
- this_effect, this_control);
}
}
@@ -964,7 +1320,7 @@ JSNativeContextSpecialization::BuildPropertyAccess(
Node* receiver, Node* value, Node* context, Node* frame_state, Node* effect,
Node* control, Handle<Name> name, PropertyAccessInfo const& access_info,
AccessMode access_mode, LanguageMode language_mode,
- Handle<FeedbackVector> vector, FeedbackVectorSlot slot) {
+ Handle<FeedbackVector> vector, FeedbackSlot slot) {
// Determine actual holder and perform prototype chain checks.
Handle<JSObject> holder;
if (access_info.holder().ToHandle(&holder)) {
@@ -1010,9 +1366,8 @@ JSNativeContextSpecialization::BuildPropertyAccess(
// Introduce the call to the getter function.
if (access_info.constant()->IsJSFunction()) {
value = effect = graph()->NewNode(
- javascript()->CallFunction(
- 2, 0.0f, VectorSlotPair(),
- ConvertReceiverMode::kNotNullOrUndefined),
+ javascript()->Call(2, 0.0f, VectorSlotPair(),
+ ConvertReceiverMode::kNotNullOrUndefined),
target, receiver, context, frame_state0, effect, control);
control = graph()->NewNode(common()->IfSuccess(), value);
} else {
@@ -1048,9 +1403,8 @@ JSNativeContextSpecialization::BuildPropertyAccess(
// Introduce the call to the setter function.
if (access_info.constant()->IsJSFunction()) {
effect = graph()->NewNode(
- javascript()->CallFunction(
- 3, 0.0f, VectorSlotPair(),
- ConvertReceiverMode::kNotNullOrUndefined),
+ javascript()->Call(3, 0.0f, VectorSlotPair(),
+ ConvertReceiverMode::kNotNullOrUndefined),
target, receiver, value, context, frame_state0, effect, control);
control = graph()->NewNode(common()->IfSuccess(), effect);
} else {
@@ -1068,7 +1422,7 @@ JSNativeContextSpecialization::BuildPropertyAccess(
break;
}
}
- } else if (access_info.IsDataField()) {
+ } else if (access_info.IsDataField() || access_info.IsDataConstantField()) {
FieldIndex const field_index = access_info.field_index();
Type* const field_type = access_info.field_type();
MachineRepresentation const field_representation =
@@ -1093,10 +1447,23 @@ JSNativeContextSpecialization::BuildPropertyAccess(
// but for now let's just do what Crankshaft does.
LookupIterator it(m.Value(), name,
LookupIterator::OWN_SKIP_INTERCEPTOR);
- if (it.state() == LookupIterator::DATA && it.IsReadOnly() &&
- !it.IsConfigurable()) {
- Node* value = jsgraph()->Constant(JSReceiver::GetDataProperty(&it));
- return ValueEffectControl(value, effect, control);
+ if (it.state() == LookupIterator::DATA) {
+ bool is_reaonly_non_configurable =
+ it.IsReadOnly() && !it.IsConfigurable();
+ if (is_reaonly_non_configurable ||
+ (FLAG_track_constant_fields &&
+ access_info.IsDataConstantField())) {
+ Node* value = jsgraph()->Constant(JSReceiver::GetDataProperty(&it));
+ if (!is_reaonly_non_configurable) {
+ // It's necessary to add dependency on the map that introduced
+ // the field.
+ DCHECK(access_info.IsDataConstantField());
+ DCHECK(!it.is_dictionary_holder());
+ Handle<Map> field_owner_map = it.GetFieldOwnerMap();
+ dependencies()->AssumeFieldOwner(field_owner_map);
+ }
+ return ValueEffectControl(value, effect, control);
+ }
}
}
}
@@ -1146,7 +1513,12 @@ JSNativeContextSpecialization::BuildPropertyAccess(
value = effect = graph()->NewNode(simplified()->LoadField(field_access),
storage, effect, control);
} else {
- DCHECK_EQ(AccessMode::kStore, access_mode);
+ bool store_to_constant_field = FLAG_track_constant_fields &&
+ (access_mode == AccessMode::kStore) &&
+ access_info.IsDataConstantField();
+
+ DCHECK(access_mode == AccessMode::kStore ||
+ access_mode == AccessMode::kStoreInLiteral);
switch (field_representation) {
case MachineRepresentation::kFloat64: {
value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
@@ -1191,29 +1563,62 @@ JSNativeContextSpecialization::BuildPropertyAccess(
field_access.machine_type = MachineType::Float64();
}
}
- break;
- }
- case MachineRepresentation::kTaggedSigned: {
- value = effect = graph()->NewNode(simplified()->CheckSmi(), value,
- effect, control);
- field_access.write_barrier_kind = kNoWriteBarrier;
- break;
- }
- case MachineRepresentation::kTaggedPointer: {
- // Ensure that {value} is a HeapObject.
- value = BuildCheckHeapObject(value, &effect, control);
- Handle<Map> field_map;
- if (access_info.field_map().ToHandle(&field_map)) {
- // Emit a map check for the value.
- effect = graph()->NewNode(
- simplified()->CheckMaps(CheckMapsFlag::kNone,
- ZoneHandleSet<Map>(field_map)),
- value, effect, control);
+ if (store_to_constant_field) {
+ DCHECK(!access_info.HasTransitionMap());
+ // If the field is constant check that the value we are going
+ // to store matches current value.
+ Node* current_value = effect =
+ graph()->NewNode(simplified()->LoadField(field_access), storage,
+ effect, control);
+
+ Node* check = graph()->NewNode(simplified()->NumberEqual(),
+ current_value, value);
+ effect = graph()->NewNode(simplified()->CheckIf(), check, effect,
+ control);
+ return ValueEffectControl(value, effect, control);
}
- field_access.write_barrier_kind = kPointerWriteBarrier;
break;
}
+ case MachineRepresentation::kTaggedSigned:
+ case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
+ if (store_to_constant_field) {
+ DCHECK(!access_info.HasTransitionMap());
+ // If the field is constant check that the value we are going
+ // to store matches current value.
+ Node* current_value = effect =
+ graph()->NewNode(simplified()->LoadField(field_access), storage,
+ effect, control);
+
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(),
+ current_value, value);
+ effect = graph()->NewNode(simplified()->CheckIf(), check, effect,
+ control);
+ return ValueEffectControl(value, effect, control);
+ }
+
+ if (field_representation == MachineRepresentation::kTaggedSigned) {
+ value = effect = graph()->NewNode(simplified()->CheckSmi(), value,
+ effect, control);
+ field_access.write_barrier_kind = kNoWriteBarrier;
+
+ } else if (field_representation ==
+ MachineRepresentation::kTaggedPointer) {
+ // Ensure that {value} is a HeapObject.
+ value = BuildCheckHeapObject(value, &effect, control);
+ Handle<Map> field_map;
+ if (access_info.field_map().ToHandle(&field_map)) {
+ // Emit a map check for the value.
+ effect = graph()->NewNode(
+ simplified()->CheckMaps(CheckMapsFlag::kNone,
+ ZoneHandleSet<Map>(field_map)),
+ value, effect, control);
+ }
+ field_access.write_barrier_kind = kPointerWriteBarrier;
+
+ } else {
+ DCHECK_EQ(MachineRepresentation::kTagged, field_representation);
+ }
break;
case MachineRepresentation::kNone:
case MachineRepresentation::kBit:
@@ -1223,6 +1628,9 @@ JSNativeContextSpecialization::BuildPropertyAccess(
case MachineRepresentation::kWord64:
case MachineRepresentation::kFloat32:
case MachineRepresentation::kSimd128:
+ case MachineRepresentation::kSimd1x4:
+ case MachineRepresentation::kSimd1x8:
+ case MachineRepresentation::kSimd1x16:
UNREACHABLE();
break;
}
@@ -1244,7 +1652,8 @@ JSNativeContextSpecialization::BuildPropertyAccess(
} else {
DCHECK(access_info.IsGeneric());
DCHECK_EQ(AccessMode::kStore, access_mode);
- DCHECK_EQ(FeedbackVectorSlotKind::STORE_IC, vector->GetKind(slot));
+ DCHECK(vector->IsStoreIC(slot));
+ DCHECK_EQ(vector->GetLanguageMode(slot), language_mode);
Callable callable =
CodeFactory::StoreICInOptimizedCode(isolate(), language_mode);
const CallInterfaceDescriptor& descriptor = callable.descriptor();
@@ -1291,7 +1700,13 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral(
DCHECK_EQ(MONOMORPHIC, nexus.ic_state());
- Handle<Map> receiver_map(nexus.FindFirstMap(), isolate());
+ Map* map = nexus.FindFirstMap();
+ if (map == nullptr) {
+ // Maps are weakly held in the type feedback vector, we may not have one.
+ return NoChange();
+ }
+
+ Handle<Map> receiver_map(map, isolate());
Handle<Name> cached_name =
handle(Name::cast(nexus.GetFeedbackExtra()), isolate());
@@ -1447,7 +1862,6 @@ JSNativeContextSpecialization::BuildElementAccess(
effect, control);
} else {
// Check that the {index} is in the valid range for the {receiver}.
- DCHECK_EQ(STANDARD_STORE, store_mode);
index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
length, effect, control);
}
@@ -1505,7 +1919,6 @@ JSNativeContextSpecialization::BuildElementAccess(
graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
} else {
// Perform the actual store
- DCHECK_EQ(STANDARD_STORE, store_mode);
effect = graph()->NewNode(
simplified()->StoreTypedElement(external_array_type), buffer,
base_pointer, external_pointer, index, value, effect, control);
@@ -1820,15 +2233,14 @@ bool JSNativeContextSpecialization::ExtractReceiverMaps(
MapHandleList* receiver_maps) {
DCHECK_EQ(0, receiver_maps->length());
// See if we can infer a concrete type for the {receiver}.
- Handle<Map> receiver_map;
- if (InferReceiverMap(receiver, effect).ToHandle(&receiver_map)) {
- // We can assume that the {receiver} still has the infered {receiver_map}.
- receiver_maps->Add(receiver_map);
+ if (InferReceiverMaps(receiver, effect, receiver_maps)) {
+ // We can assume that the {receiver} still has the infered {receiver_maps}.
return true;
}
// Try to extract some maps from the {nexus}.
if (nexus.ExtractMaps(receiver_maps) != 0) {
// Try to filter impossible candidates based on infered root map.
+ Handle<Map> receiver_map;
if (InferReceiverRootMap(receiver).ToHandle(&receiver_map)) {
for (int i = receiver_maps->length(); --i >= 0;) {
if (receiver_maps->at(i)->FindRootMap() != *receiver_map) {
@@ -1841,38 +2253,28 @@ bool JSNativeContextSpecialization::ExtractReceiverMaps(
return false;
}
-MaybeHandle<Map> JSNativeContextSpecialization::InferReceiverMap(Node* receiver,
- Node* effect) {
- HeapObjectMatcher m(receiver);
- if (m.HasValue()) {
- Handle<Map> receiver_map(m.Value()->map(), isolate());
- if (receiver_map->is_stable()) return receiver_map;
- } else if (m.IsJSCreate()) {
- HeapObjectMatcher mtarget(m.InputAt(0));
- HeapObjectMatcher mnewtarget(m.InputAt(1));
- if (mtarget.HasValue() && mnewtarget.HasValue()) {
- Handle<JSFunction> original_constructor =
- Handle<JSFunction>::cast(mnewtarget.Value());
- if (original_constructor->has_initial_map()) {
- Handle<Map> initial_map(original_constructor->initial_map(), isolate());
- if (initial_map->constructor_or_backpointer() == *mtarget.Value()) {
- // Walk up the {effect} chain to see if the {receiver} is the
- // dominating effect and there's no other observable write in
- // between.
- while (true) {
- if (receiver == effect) return initial_map;
- if (!effect->op()->HasProperty(Operator::kNoWrite) ||
- effect->op()->EffectInputCount() != 1) {
- break;
- }
- effect = NodeProperties::GetEffectInput(effect);
- }
- }
- }
+bool JSNativeContextSpecialization::InferReceiverMaps(
+ Node* receiver, Node* effect, MapHandleList* receiver_maps) {
+ ZoneHandleSet<Map> maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &maps);
+ if (result == NodeProperties::kReliableReceiverMaps) {
+ for (size_t i = 0; i < maps.size(); ++i) {
+ receiver_maps->Add(maps[i]);
}
+ return true;
+ } else if (result == NodeProperties::kUnreliableReceiverMaps) {
+ // For untrusted receiver maps, we can still use the information
+ // if the maps are stable.
+ for (size_t i = 0; i < maps.size(); ++i) {
+ if (!maps[i]->is_stable()) return false;
+ }
+ for (size_t i = 0; i < maps.size(); ++i) {
+ receiver_maps->Add(maps[i]);
+ }
+ return true;
}
- // TODO(turbofan): Go hunting for CheckMaps(receiver) in the effect chain?
- return MaybeHandle<Map>();
+ return false;
}
MaybeHandle<Map> JSNativeContextSpecialization::InferReceiverRootMap(
@@ -1898,6 +2300,24 @@ MaybeHandle<Map> JSNativeContextSpecialization::InferReceiverRootMap(
return MaybeHandle<Map>();
}
+bool JSNativeContextSpecialization::LookupInScriptContextTable(
+ Handle<Name> name, ScriptContextTableLookupResult* result) {
+ if (!name->IsString()) return false;
+ Handle<ScriptContextTable> script_context_table(
+ global_object()->native_context()->script_context_table(), isolate());
+ ScriptContextTable::LookupResult lookup_result;
+ if (!ScriptContextTable::Lookup(script_context_table,
+ Handle<String>::cast(name), &lookup_result)) {
+ return false;
+ }
+ Handle<Context> script_context = ScriptContextTable::GetContext(
+ script_context_table, lookup_result.context_index);
+ result->context = script_context;
+ result->immutable = lookup_result.mode == CONST;
+ result->index = lookup_result.slot_index;
+ return true;
+}
+
Graph* JSNativeContextSpecialization::graph() const {
return jsgraph()->graph();
}
diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h
index 21889eda44..249c52d4e3 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.h
+++ b/deps/v8/src/compiler/js-native-context-specialization.h
@@ -53,13 +53,18 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
Reduction Reduce(Node* node) final;
private:
+ Reduction ReduceJSAdd(Node* node);
+ Reduction ReduceJSGetSuperConstructor(Node* node);
Reduction ReduceJSInstanceOf(Node* node);
Reduction ReduceJSOrdinaryHasInstance(Node* node);
Reduction ReduceJSLoadContext(Node* node);
+ Reduction ReduceJSLoadGlobal(Node* node);
+ Reduction ReduceJSStoreGlobal(Node* node);
Reduction ReduceJSLoadNamed(Node* node);
Reduction ReduceJSStoreNamed(Node* node);
Reduction ReduceJSLoadProperty(Node* node);
Reduction ReduceJSStoreProperty(Node* node);
+ Reduction ReduceJSStoreNamedOwn(Node* node);
Reduction ReduceJSStoreDataPropertyInLiteral(Node* node);
Reduction ReduceElementAccess(Node* node, Node* index, Node* value,
@@ -81,8 +86,11 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
MapHandleList const& receiver_maps,
Handle<Name> name, AccessMode access_mode,
LanguageMode language_mode,
- Handle<FeedbackVector> vector,
- FeedbackVectorSlot slot, Node* index = nullptr);
+ Handle<FeedbackVector> vector, FeedbackSlot slot,
+ Node* index = nullptr);
+ Reduction ReduceGlobalAccess(Node* node, Node* receiver, Node* value,
+ Handle<Name> name, AccessMode access_mode,
+ Node* index = nullptr);
Reduction ReduceSoftDeoptimize(Node* node, DeoptimizeReason reason);
@@ -108,7 +116,7 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
Node* effect, Node* control, Handle<Name> name,
PropertyAccessInfo const& access_info, AccessMode access_mode,
LanguageMode language_mode, Handle<FeedbackVector> vector,
- FeedbackVectorSlot slot);
+ FeedbackSlot slot);
// Construct the appropriate subgraph for element access.
ValueEffectControl BuildElementAccess(Node* receiver, Node* index,
@@ -141,10 +149,12 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
FeedbackNexus const& nexus,
MapHandleList* receiver_maps);
- // Try to infer a map for the given {receiver} at the current {effect}.
- // If a map is returned then you can be sure that the {receiver} definitely
- // has the returned map at this point in the program (identified by {effect}).
- MaybeHandle<Map> InferReceiverMap(Node* receiver, Node* effect);
+ // Try to infer maps for the given {receiver} at the current {effect}.
+ // If maps are returned then you can be sure that the {receiver} definitely
+ // has one of the returned maps at this point in the program (identified
+ // by {effect}).
+ bool InferReceiverMaps(Node* receiver, Node* effect,
+ MapHandleList* receiver_maps);
// Try to infer a root map for the {receiver} independent of the current
// program location.
MaybeHandle<Map> InferReceiverRootMap(Node* receiver);
@@ -155,6 +165,11 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
Handle<SharedFunctionInfo> shared_info,
Handle<FunctionTemplateInfo> function_template_info);
+ // Script context lookup logic.
+ struct ScriptContextTableLookupResult;
+ bool LookupInScriptContextTable(Handle<Name> name,
+ ScriptContextTableLookupResult* result);
+
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
Isolate* isolate() const;
@@ -164,12 +179,16 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
SimplifiedOperatorBuilder* simplified() const;
MachineOperatorBuilder* machine() const;
Flags flags() const { return flags_; }
+ Handle<JSGlobalObject> global_object() const { return global_object_; }
+ Handle<JSGlobalProxy> global_proxy() const { return global_proxy_; }
Handle<Context> native_context() const { return native_context_; }
CompilationDependencies* dependencies() const { return dependencies_; }
Zone* zone() const { return zone_; }
JSGraph* const jsgraph_;
Flags const flags_;
+ Handle<JSGlobalObject> global_object_;
+ Handle<JSGlobalProxy> global_proxy_;
Handle<Context> native_context_;
CompilationDependencies* const dependencies_;
Zone* const zone_;
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index a635c17cf1..a8f5692d54 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -52,72 +52,99 @@ ToBooleanHints ToBooleanHintsOf(Operator const* op) {
return OpParameter<ToBooleanHints>(op);
}
-
-bool operator==(CallConstructParameters const& lhs,
- CallConstructParameters const& rhs) {
+bool operator==(ConstructParameters const& lhs,
+ ConstructParameters const& rhs) {
return lhs.arity() == rhs.arity() && lhs.frequency() == rhs.frequency() &&
lhs.feedback() == rhs.feedback();
}
-
-bool operator!=(CallConstructParameters const& lhs,
- CallConstructParameters const& rhs) {
+bool operator!=(ConstructParameters const& lhs,
+ ConstructParameters const& rhs) {
return !(lhs == rhs);
}
-
-size_t hash_value(CallConstructParameters const& p) {
+size_t hash_value(ConstructParameters const& p) {
return base::hash_combine(p.arity(), p.frequency(), p.feedback());
}
-
-std::ostream& operator<<(std::ostream& os, CallConstructParameters const& p) {
+std::ostream& operator<<(std::ostream& os, ConstructParameters const& p) {
return os << p.arity() << ", " << p.frequency();
}
-
-CallConstructParameters const& CallConstructParametersOf(Operator const* op) {
- DCHECK_EQ(IrOpcode::kJSCallConstruct, op->opcode());
- return OpParameter<CallConstructParameters>(op);
+ConstructParameters const& ConstructParametersOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kJSConstruct, op->opcode());
+ return OpParameter<ConstructParameters>(op);
}
-bool operator==(CallConstructWithSpreadParameters const& lhs,
- CallConstructWithSpreadParameters const& rhs) {
+bool operator==(ConstructWithSpreadParameters const& lhs,
+ ConstructWithSpreadParameters const& rhs) {
return lhs.arity() == rhs.arity();
}
-bool operator!=(CallConstructWithSpreadParameters const& lhs,
- CallConstructWithSpreadParameters const& rhs) {
+bool operator!=(ConstructWithSpreadParameters const& lhs,
+ ConstructWithSpreadParameters const& rhs) {
return !(lhs == rhs);
}
-size_t hash_value(CallConstructWithSpreadParameters const& p) {
+size_t hash_value(ConstructWithSpreadParameters const& p) {
return base::hash_combine(p.arity());
}
std::ostream& operator<<(std::ostream& os,
- CallConstructWithSpreadParameters const& p) {
+ ConstructWithSpreadParameters const& p) {
return os << p.arity();
}
-CallConstructWithSpreadParameters const& CallConstructWithSpreadParametersOf(
+ConstructWithSpreadParameters const& ConstructWithSpreadParametersOf(
Operator const* op) {
- DCHECK_EQ(IrOpcode::kJSCallConstructWithSpread, op->opcode());
- return OpParameter<CallConstructWithSpreadParameters>(op);
+ DCHECK_EQ(IrOpcode::kJSConstructWithSpread, op->opcode());
+ return OpParameter<ConstructWithSpreadParameters>(op);
}
-std::ostream& operator<<(std::ostream& os, CallFunctionParameters const& p) {
+std::ostream& operator<<(std::ostream& os, CallParameters const& p) {
os << p.arity() << ", " << p.frequency() << ", " << p.convert_mode() << ", "
<< p.tail_call_mode();
return os;
}
+const CallParameters& CallParametersOf(const Operator* op) {
+ DCHECK_EQ(IrOpcode::kJSCall, op->opcode());
+ return OpParameter<CallParameters>(op);
+}
+
+std::ostream& operator<<(std::ostream& os,
+ CallForwardVarargsParameters const& p) {
+ return os << p.start_index() << ", " << p.tail_call_mode();
+}
-const CallFunctionParameters& CallFunctionParametersOf(const Operator* op) {
- DCHECK_EQ(IrOpcode::kJSCallFunction, op->opcode());
- return OpParameter<CallFunctionParameters>(op);
+CallForwardVarargsParameters const& CallForwardVarargsParametersOf(
+ Operator const* op) {
+ DCHECK_EQ(IrOpcode::kJSCallForwardVarargs, op->opcode());
+ return OpParameter<CallForwardVarargsParameters>(op);
}
+bool operator==(CallWithSpreadParameters const& lhs,
+ CallWithSpreadParameters const& rhs) {
+ return lhs.arity() == rhs.arity();
+}
+
+bool operator!=(CallWithSpreadParameters const& lhs,
+ CallWithSpreadParameters const& rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(CallWithSpreadParameters const& p) {
+ return base::hash_combine(p.arity());
+}
+
+std::ostream& operator<<(std::ostream& os, CallWithSpreadParameters const& p) {
+ return os << p.arity();
+}
+
+CallWithSpreadParameters const& CallWithSpreadParametersOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kJSCallWithSpread, op->opcode());
+ return OpParameter<CallWithSpreadParameters>(op);
+}
bool operator==(CallRuntimeParameters const& lhs,
CallRuntimeParameters const& rhs) {
@@ -247,6 +274,30 @@ CreateFunctionContextParameters const& CreateFunctionContextParametersOf(
return OpParameter<CreateFunctionContextParameters>(op);
}
+bool operator==(StoreNamedOwnParameters const& lhs,
+ StoreNamedOwnParameters const& rhs) {
+ return lhs.name().location() == rhs.name().location() &&
+ lhs.feedback() == rhs.feedback();
+}
+
+bool operator!=(StoreNamedOwnParameters const& lhs,
+ StoreNamedOwnParameters const& rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(StoreNamedOwnParameters const& p) {
+ return base::hash_combine(p.name().location(), p.feedback());
+}
+
+std::ostream& operator<<(std::ostream& os, StoreNamedOwnParameters const& p) {
+ return os << Brief(*p.name());
+}
+
+StoreNamedOwnParameters const& StoreNamedOwnParametersOf(const Operator* op) {
+ DCHECK_EQ(IrOpcode::kJSStoreNamedOwn, op->opcode());
+ return OpParameter<StoreNamedOwnParameters>(op);
+}
+
bool operator==(DataPropertyParameters const& lhs,
DataPropertyParameters const& rhs) {
return lhs.feedback() == rhs.feedback();
@@ -491,17 +542,7 @@ const CreateLiteralParameters& CreateLiteralParametersOf(const Operator* op) {
}
BinaryOperationHint BinaryOperationHintOf(const Operator* op) {
- DCHECK(op->opcode() == IrOpcode::kJSBitwiseOr ||
- op->opcode() == IrOpcode::kJSBitwiseXor ||
- op->opcode() == IrOpcode::kJSBitwiseAnd ||
- op->opcode() == IrOpcode::kJSShiftLeft ||
- op->opcode() == IrOpcode::kJSShiftRight ||
- op->opcode() == IrOpcode::kJSShiftRightLogical ||
- op->opcode() == IrOpcode::kJSAdd ||
- op->opcode() == IrOpcode::kJSSubtract ||
- op->opcode() == IrOpcode::kJSMultiply ||
- op->opcode() == IrOpcode::kJSDivide ||
- op->opcode() == IrOpcode::kJSModulus);
+ DCHECK_EQ(IrOpcode::kJSAdd, op->opcode());
return OpParameter<BinaryOperationHint>(op);
}
@@ -518,16 +559,27 @@ CompareOperationHint CompareOperationHintOf(const Operator* op) {
}
#define CACHED_OP_LIST(V) \
+ V(BitwiseOr, Operator::kNoProperties, 2, 1) \
+ V(BitwiseXor, Operator::kNoProperties, 2, 1) \
+ V(BitwiseAnd, Operator::kNoProperties, 2, 1) \
+ V(ShiftLeft, Operator::kNoProperties, 2, 1) \
+ V(ShiftRight, Operator::kNoProperties, 2, 1) \
+ V(ShiftRightLogical, Operator::kNoProperties, 2, 1) \
+ V(Subtract, Operator::kNoProperties, 2, 1) \
+ V(Multiply, Operator::kNoProperties, 2, 1) \
+ V(Divide, Operator::kNoProperties, 2, 1) \
+ V(Modulus, Operator::kNoProperties, 2, 1) \
V(ToInteger, Operator::kNoProperties, 1, 1) \
V(ToLength, Operator::kNoProperties, 1, 1) \
V(ToName, Operator::kNoProperties, 1, 1) \
V(ToNumber, Operator::kNoProperties, 1, 1) \
V(ToObject, Operator::kFoldable, 1, 1) \
V(ToString, Operator::kNoProperties, 1, 1) \
- V(Create, Operator::kEliminatable, 2, 1) \
+ V(Create, Operator::kNoProperties, 2, 1) \
V(CreateIterResultObject, Operator::kEliminatable, 2, 1) \
V(CreateKeyValueArray, Operator::kEliminatable, 2, 1) \
V(HasProperty, Operator::kNoProperties, 2, 1) \
+ V(ClassOf, Operator::kPure, 1, 1) \
V(TypeOf, Operator::kPure, 1, 1) \
V(InstanceOf, Operator::kNoProperties, 2, 1) \
V(OrdinaryHasInstance, Operator::kNoProperties, 2, 1) \
@@ -537,20 +589,10 @@ CompareOperationHint CompareOperationHintOf(const Operator* op) {
V(StoreMessage, Operator::kNoRead | Operator::kNoThrow, 1, 0) \
V(GeneratorRestoreContinuation, Operator::kNoThrow, 1, 1) \
V(StackCheck, Operator::kNoWrite, 0, 0) \
+ V(Debugger, Operator::kNoProperties, 0, 0) \
V(GetSuperConstructor, Operator::kNoWrite, 1, 1)
-#define BINARY_OP_LIST(V) \
- V(BitwiseOr) \
- V(BitwiseXor) \
- V(BitwiseAnd) \
- V(ShiftLeft) \
- V(ShiftRight) \
- V(ShiftRightLogical) \
- V(Add) \
- V(Subtract) \
- V(Multiply) \
- V(Divide) \
- V(Modulus)
+#define BINARY_OP_LIST(V) V(Add)
#define COMPARE_OP_LIST(V) \
V(Equal, Operator::kNoProperties) \
@@ -595,23 +637,24 @@ struct JSOperatorGlobalCache final {
BINARY_OP_LIST(BINARY_OP)
#undef BINARY_OP
-#define COMPARE_OP(Name, properties) \
- template <CompareOperationHint kHint> \
- struct Name##Operator final : public Operator1<CompareOperationHint> { \
- Name##Operator() \
- : Operator1<CompareOperationHint>( \
- IrOpcode::kJS##Name, properties, "JS" #Name, 2, 1, 1, 1, 1, \
- Operator::ZeroIfNoThrow(properties), kHint) {} \
- }; \
- Name##Operator<CompareOperationHint::kNone> k##Name##NoneOperator; \
- Name##Operator<CompareOperationHint::kSignedSmall> \
- k##Name##SignedSmallOperator; \
- Name##Operator<CompareOperationHint::kNumber> k##Name##NumberOperator; \
- Name##Operator<CompareOperationHint::kNumberOrOddball> \
- k##Name##NumberOrOddballOperator; \
- Name##Operator<CompareOperationHint::kString> k##Name##StringOperator; \
- Name##Operator<CompareOperationHint::kInternalizedString> \
- k##Name##InternalizedStringOperator; \
+#define COMPARE_OP(Name, properties) \
+ template <CompareOperationHint kHint> \
+ struct Name##Operator final : public Operator1<CompareOperationHint> { \
+ Name##Operator() \
+ : Operator1<CompareOperationHint>( \
+ IrOpcode::kJS##Name, properties, "JS" #Name, 2, 1, 1, 1, 1, \
+ Operator::ZeroIfNoThrow(properties), kHint) {} \
+ }; \
+ Name##Operator<CompareOperationHint::kNone> k##Name##NoneOperator; \
+ Name##Operator<CompareOperationHint::kSignedSmall> \
+ k##Name##SignedSmallOperator; \
+ Name##Operator<CompareOperationHint::kNumber> k##Name##NumberOperator; \
+ Name##Operator<CompareOperationHint::kNumberOrOddball> \
+ k##Name##NumberOrOddballOperator; \
+ Name##Operator<CompareOperationHint::kInternalizedString> \
+ k##Name##InternalizedStringOperator; \
+ Name##Operator<CompareOperationHint::kString> k##Name##StringOperator; \
+ Name##Operator<CompareOperationHint::kReceiver> k##Name##ReceiverOperator; \
Name##Operator<CompareOperationHint::kAny> k##Name##AnyOperator;
COMPARE_OP_LIST(COMPARE_OP)
#undef COMPARE_OP
@@ -667,6 +710,8 @@ BINARY_OP_LIST(BINARY_OP)
return &cache_.k##Name##InternalizedStringOperator; \
case CompareOperationHint::kString: \
return &cache_.k##Name##StringOperator; \
+ case CompareOperationHint::kReceiver: \
+ return &cache_.k##Name##ReceiverOperator; \
case CompareOperationHint::kAny: \
return &cache_.k##Name##AnyOperator; \
} \
@@ -696,18 +741,37 @@ const Operator* JSOperatorBuilder::ToBoolean(ToBooleanHints hints) {
hints); // parameter
}
-const Operator* JSOperatorBuilder::CallFunction(
- size_t arity, float frequency, VectorSlotPair const& feedback,
- ConvertReceiverMode convert_mode, TailCallMode tail_call_mode) {
- CallFunctionParameters parameters(arity, frequency, feedback, tail_call_mode,
- convert_mode);
- return new (zone()) Operator1<CallFunctionParameters>( // --
- IrOpcode::kJSCallFunction, Operator::kNoProperties, // opcode
- "JSCallFunction", // name
- parameters.arity(), 1, 1, 1, 1, 2, // inputs/outputs
- parameters); // parameter
+const Operator* JSOperatorBuilder::CallForwardVarargs(
+ uint32_t start_index, TailCallMode tail_call_mode) {
+ CallForwardVarargsParameters parameters(start_index, tail_call_mode);
+ return new (zone()) Operator1<CallForwardVarargsParameters>( // --
+ IrOpcode::kJSCallForwardVarargs, Operator::kNoProperties, // opcode
+ "JSCallForwardVarargs", // name
+ 2, 1, 1, 1, 1, 2, // counts
+ parameters); // parameter
}
+const Operator* JSOperatorBuilder::Call(size_t arity, float frequency,
+ VectorSlotPair const& feedback,
+ ConvertReceiverMode convert_mode,
+ TailCallMode tail_call_mode) {
+ CallParameters parameters(arity, frequency, feedback, tail_call_mode,
+ convert_mode);
+ return new (zone()) Operator1<CallParameters>( // --
+ IrOpcode::kJSCall, Operator::kNoProperties, // opcode
+ "JSCall", // name
+ parameters.arity(), 1, 1, 1, 1, 2, // inputs/outputs
+ parameters); // parameter
+}
+
+const Operator* JSOperatorBuilder::CallWithSpread(uint32_t arity) {
+ CallWithSpreadParameters parameters(arity);
+ return new (zone()) Operator1<CallWithSpreadParameters>( // --
+ IrOpcode::kJSCallWithSpread, Operator::kNoProperties, // opcode
+ "JSCallWithSpread", // name
+ parameters.arity(), 1, 1, 1, 1, 2, // counts
+ parameters); // parameter
+}
const Operator* JSOperatorBuilder::CallRuntime(Runtime::FunctionId id) {
const Runtime::Function* f = Runtime::FunctionForId(id);
@@ -733,23 +797,23 @@ const Operator* JSOperatorBuilder::CallRuntime(const Runtime::Function* f,
parameters); // parameter
}
-const Operator* JSOperatorBuilder::CallConstruct(
- uint32_t arity, float frequency, VectorSlotPair const& feedback) {
- CallConstructParameters parameters(arity, frequency, feedback);
- return new (zone()) Operator1<CallConstructParameters>( // --
- IrOpcode::kJSCallConstruct, Operator::kNoProperties, // opcode
- "JSCallConstruct", // name
- parameters.arity(), 1, 1, 1, 1, 2, // counts
- parameters); // parameter
+const Operator* JSOperatorBuilder::Construct(uint32_t arity, float frequency,
+ VectorSlotPair const& feedback) {
+ ConstructParameters parameters(arity, frequency, feedback);
+ return new (zone()) Operator1<ConstructParameters>( // --
+ IrOpcode::kJSConstruct, Operator::kNoProperties, // opcode
+ "JSConstruct", // name
+ parameters.arity(), 1, 1, 1, 1, 2, // counts
+ parameters); // parameter
}
-const Operator* JSOperatorBuilder::CallConstructWithSpread(uint32_t arity) {
- CallConstructWithSpreadParameters parameters(arity);
- return new (zone()) Operator1<CallConstructWithSpreadParameters>( // --
- IrOpcode::kJSCallConstructWithSpread, Operator::kNoProperties, // opcode
- "JSCallConstructWithSpread", // name
- parameters.arity(), 1, 1, 1, 1, 2, // counts
- parameters); // parameter
+const Operator* JSOperatorBuilder::ConstructWithSpread(uint32_t arity) {
+ ConstructWithSpreadParameters parameters(arity);
+ return new (zone()) Operator1<ConstructWithSpreadParameters>( // --
+ IrOpcode::kJSConstructWithSpread, Operator::kNoProperties, // opcode
+ "JSConstructWithSpread", // name
+ parameters.arity(), 1, 1, 1, 1, 2, // counts
+ parameters); // parameter
}
const Operator* JSOperatorBuilder::ConvertReceiver(
@@ -819,6 +883,15 @@ const Operator* JSOperatorBuilder::StoreProperty(
access); // parameter
}
+const Operator* JSOperatorBuilder::StoreNamedOwn(
+ Handle<Name> name, VectorSlotPair const& feedback) {
+ StoreNamedOwnParameters parameters(name, feedback);
+ return new (zone()) Operator1<StoreNamedOwnParameters>( // --
+ IrOpcode::kJSStoreNamedOwn, Operator::kNoProperties, // opcode
+ "JSStoreNamedOwn", // name
+ 2, 1, 1, 0, 1, 2, // counts
+ parameters); // parameter
+}
const Operator* JSOperatorBuilder::DeleteProperty(LanguageMode language_mode) {
return new (zone()) Operator1<LanguageMode>( // --
@@ -938,7 +1011,7 @@ const Operator* JSOperatorBuilder::CreateLiteralArray(
}
const Operator* JSOperatorBuilder::CreateLiteralObject(
- Handle<FixedArray> constant_properties, int literal_flags,
+ Handle<BoilerplateDescription> constant_properties, int literal_flags,
int literal_index, int number_of_properties) {
CreateLiteralParameters parameters(constant_properties, number_of_properties,
literal_flags, literal_index);
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index 65f3f12d51..730b4b9551 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -7,35 +7,43 @@
#include "src/base/compiler-specific.h"
#include "src/globals.h"
+#include "src/handles.h"
#include "src/runtime/runtime.h"
#include "src/type-hints.h"
namespace v8 {
namespace internal {
+
+class AllocationSite;
+class BoilerplateDescription;
+class ConstantElementsPair;
+class SharedFunctionInfo;
+class FeedbackVector;
+
namespace compiler {
// Forward declarations.
class Operator;
struct JSOperatorGlobalCache;
-// Defines a pair of {FeedbackVector} and {FeedbackVectorSlot}, which
+// Defines a pair of {FeedbackVector} and {FeedbackSlot}, which
// is used to access the type feedback for a certain {Node}.
class V8_EXPORT_PRIVATE VectorSlotPair {
public:
VectorSlotPair();
- VectorSlotPair(Handle<FeedbackVector> vector, FeedbackVectorSlot slot)
+ VectorSlotPair(Handle<FeedbackVector> vector, FeedbackSlot slot)
: vector_(vector), slot_(slot) {}
bool IsValid() const { return !vector_.is_null() && !slot_.IsInvalid(); }
Handle<FeedbackVector> vector() const { return vector_; }
- FeedbackVectorSlot slot() const { return slot_; }
+ FeedbackSlot slot() const { return slot_; }
int index() const;
private:
const Handle<FeedbackVector> vector_;
- const FeedbackVectorSlot slot_;
+ const FeedbackSlot slot_;
};
bool operator==(VectorSlotPair const&, VectorSlotPair const&);
@@ -53,11 +61,11 @@ ToBooleanHints ToBooleanHintsOf(Operator const* op);
// Defines the arity and the feedback for a JavaScript constructor call. This is
-// used as a parameter by JSCallConstruct operators.
-class CallConstructParameters final {
+// used as a parameter by JSConstruct operators.
+class ConstructParameters final {
public:
- CallConstructParameters(uint32_t arity, float frequency,
- VectorSlotPair const& feedback)
+ ConstructParameters(uint32_t arity, float frequency,
+ VectorSlotPair const& feedback)
: arity_(arity), frequency_(frequency), feedback_(feedback) {}
uint32_t arity() const { return arity_; }
@@ -70,21 +78,21 @@ class CallConstructParameters final {
VectorSlotPair const feedback_;
};
-bool operator==(CallConstructParameters const&, CallConstructParameters const&);
-bool operator!=(CallConstructParameters const&, CallConstructParameters const&);
+bool operator==(ConstructParameters const&, ConstructParameters const&);
+bool operator!=(ConstructParameters const&, ConstructParameters const&);
-size_t hash_value(CallConstructParameters const&);
+size_t hash_value(ConstructParameters const&);
-std::ostream& operator<<(std::ostream&, CallConstructParameters const&);
+std::ostream& operator<<(std::ostream&, ConstructParameters const&);
-CallConstructParameters const& CallConstructParametersOf(Operator const*);
+ConstructParameters const& ConstructParametersOf(Operator const*);
// Defines the arity for a JavaScript constructor call with a spread as the last
-// parameters. This is used as a parameter by JSCallConstructWithSpread
+// parameters. This is used as a parameter by JSConstructWithSpread
// operators.
-class CallConstructWithSpreadParameters final {
+class ConstructWithSpreadParameters final {
public:
- explicit CallConstructWithSpreadParameters(uint32_t arity) : arity_(arity) {}
+ explicit ConstructWithSpreadParameters(uint32_t arity) : arity_(arity) {}
uint32_t arity() const { return arity_; }
@@ -92,27 +100,61 @@ class CallConstructWithSpreadParameters final {
uint32_t const arity_;
};
-bool operator==(CallConstructWithSpreadParameters const&,
- CallConstructWithSpreadParameters const&);
-bool operator!=(CallConstructWithSpreadParameters const&,
- CallConstructWithSpreadParameters const&);
+bool operator==(ConstructWithSpreadParameters const&,
+ ConstructWithSpreadParameters const&);
+bool operator!=(ConstructWithSpreadParameters const&,
+ ConstructWithSpreadParameters const&);
-size_t hash_value(CallConstructWithSpreadParameters const&);
+size_t hash_value(ConstructWithSpreadParameters const&);
-std::ostream& operator<<(std::ostream&,
- CallConstructWithSpreadParameters const&);
+std::ostream& operator<<(std::ostream&, ConstructWithSpreadParameters const&);
-CallConstructWithSpreadParameters const& CallConstructWithSpreadParametersOf(
+ConstructWithSpreadParameters const& ConstructWithSpreadParametersOf(
Operator const*);
+// Defines the flags for a JavaScript call forwarding parameters. This
+// is used as parameter by JSCallForwardVarargs operators.
+class CallForwardVarargsParameters final {
+ public:
+ CallForwardVarargsParameters(uint32_t start_index,
+ TailCallMode tail_call_mode)
+ : bit_field_(StartIndexField::encode(start_index) |
+ TailCallModeField::encode(tail_call_mode)) {}
+
+ uint32_t start_index() const { return StartIndexField::decode(bit_field_); }
+ TailCallMode tail_call_mode() const {
+ return TailCallModeField::decode(bit_field_);
+ }
+
+ bool operator==(CallForwardVarargsParameters const& that) const {
+ return this->bit_field_ == that.bit_field_;
+ }
+ bool operator!=(CallForwardVarargsParameters const& that) const {
+ return !(*this == that);
+ }
+
+ private:
+ friend size_t hash_value(CallForwardVarargsParameters const& p) {
+ return p.bit_field_;
+ }
+
+ typedef BitField<uint32_t, 0, 30> StartIndexField;
+ typedef BitField<TailCallMode, 31, 1> TailCallModeField;
+
+ uint32_t const bit_field_;
+};
+
+std::ostream& operator<<(std::ostream&, CallForwardVarargsParameters const&);
+
+CallForwardVarargsParameters const& CallForwardVarargsParametersOf(
+ Operator const*) WARN_UNUSED_RESULT;
+
// Defines the arity and the call flags for a JavaScript function call. This is
-// used as a parameter by JSCallFunction operators.
-class CallFunctionParameters final {
+// used as a parameter by JSCall operators.
+class CallParameters final {
public:
- CallFunctionParameters(size_t arity, float frequency,
- VectorSlotPair const& feedback,
- TailCallMode tail_call_mode,
- ConvertReceiverMode convert_mode)
+ CallParameters(size_t arity, float frequency, VectorSlotPair const& feedback,
+ TailCallMode tail_call_mode, ConvertReceiverMode convert_mode)
: bit_field_(ArityField::encode(arity) |
ConvertReceiverModeField::encode(convert_mode) |
TailCallModeField::encode(tail_call_mode)),
@@ -129,17 +171,15 @@ class CallFunctionParameters final {
}
VectorSlotPair const& feedback() const { return feedback_; }
- bool operator==(CallFunctionParameters const& that) const {
+ bool operator==(CallParameters const& that) const {
return this->bit_field_ == that.bit_field_ &&
this->frequency_ == that.frequency_ &&
this->feedback_ == that.feedback_;
}
- bool operator!=(CallFunctionParameters const& that) const {
- return !(*this == that);
- }
+ bool operator!=(CallParameters const& that) const { return !(*this == that); }
private:
- friend size_t hash_value(CallFunctionParameters const& p) {
+ friend size_t hash_value(CallParameters const& p) {
return base::hash_combine(p.bit_field_, p.frequency_, p.feedback_);
}
@@ -152,12 +192,35 @@ class CallFunctionParameters final {
VectorSlotPair const feedback_;
};
-size_t hash_value(CallFunctionParameters const&);
+size_t hash_value(CallParameters const&);
-std::ostream& operator<<(std::ostream&, CallFunctionParameters const&);
+std::ostream& operator<<(std::ostream&, CallParameters const&);
-const CallFunctionParameters& CallFunctionParametersOf(const Operator* op);
+const CallParameters& CallParametersOf(const Operator* op);
+// Defines the arity for a JavaScript constructor call with a spread as the last
+// parameters. This is used as a parameter by JSConstructWithSpread
+// operators.
+class CallWithSpreadParameters final {
+ public:
+ explicit CallWithSpreadParameters(uint32_t arity) : arity_(arity) {}
+
+ uint32_t arity() const { return arity_; }
+
+ private:
+ uint32_t const arity_;
+};
+
+bool operator==(CallWithSpreadParameters const&,
+ CallWithSpreadParameters const&);
+bool operator!=(CallWithSpreadParameters const&,
+ CallWithSpreadParameters const&);
+
+size_t hash_value(CallWithSpreadParameters const&);
+
+std::ostream& operator<<(std::ostream&, CallWithSpreadParameters const&);
+
+CallWithSpreadParameters const& CallWithSpreadParametersOf(Operator const*);
// Defines the arity and the ID for a runtime function call. This is used as a
// parameter by JSCallRuntime operators.
@@ -267,6 +330,29 @@ std::ostream& operator<<(std::ostream& os,
CreateFunctionContextParameters const& CreateFunctionContextParametersOf(
Operator const*);
+// Defines parameters for JSStoreNamedOwn operator.
+class StoreNamedOwnParameters final {
+ public:
+ StoreNamedOwnParameters(Handle<Name> name, VectorSlotPair const& feedback)
+ : name_(name), feedback_(feedback) {}
+
+ Handle<Name> name() const { return name_; }
+ VectorSlotPair const& feedback() const { return feedback_; }
+
+ private:
+ Handle<Name> const name_;
+ VectorSlotPair const feedback_;
+};
+
+bool operator==(StoreNamedOwnParameters const&, StoreNamedOwnParameters const&);
+bool operator!=(StoreNamedOwnParameters const&, StoreNamedOwnParameters const&);
+
+size_t hash_value(StoreNamedOwnParameters const&);
+
+std::ostream& operator<<(std::ostream&, StoreNamedOwnParameters const&);
+
+const StoreNamedOwnParameters& StoreNamedOwnParametersOf(const Operator* op);
+
// Defines the feedback, i.e., vector and index, for storing a data property in
// an object literal. This is
// used as a parameter by the JSStoreDataPropertyInLiteral operator.
@@ -509,17 +595,17 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* LessThanOrEqual(CompareOperationHint hint);
const Operator* GreaterThanOrEqual(CompareOperationHint hint);
- const Operator* BitwiseOr(BinaryOperationHint hint);
- const Operator* BitwiseXor(BinaryOperationHint hint);
- const Operator* BitwiseAnd(BinaryOperationHint hint);
- const Operator* ShiftLeft(BinaryOperationHint hint);
- const Operator* ShiftRight(BinaryOperationHint hint);
- const Operator* ShiftRightLogical(BinaryOperationHint hint);
+ const Operator* BitwiseOr();
+ const Operator* BitwiseXor();
+ const Operator* BitwiseAnd();
+ const Operator* ShiftLeft();
+ const Operator* ShiftRight();
+ const Operator* ShiftRightLogical();
const Operator* Add(BinaryOperationHint hint);
- const Operator* Subtract(BinaryOperationHint hint);
- const Operator* Multiply(BinaryOperationHint hint);
- const Operator* Divide(BinaryOperationHint hint);
- const Operator* Modulus(BinaryOperationHint hint);
+ const Operator* Subtract();
+ const Operator* Multiply();
+ const Operator* Divide();
+ const Operator* Modulus();
const Operator* ToBoolean(ToBooleanHints hints);
const Operator* ToInteger();
@@ -540,23 +626,26 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* CreateLiteralArray(Handle<ConstantElementsPair> constant,
int literal_flags, int literal_index,
int number_of_elements);
- const Operator* CreateLiteralObject(Handle<FixedArray> constant_properties,
+ const Operator* CreateLiteralObject(Handle<BoilerplateDescription> constant,
int literal_flags, int literal_index,
int number_of_properties);
const Operator* CreateLiteralRegExp(Handle<String> constant_pattern,
int literal_flags, int literal_index);
- const Operator* CallFunction(
+ const Operator* CallForwardVarargs(uint32_t start_index,
+ TailCallMode tail_call_mode);
+ const Operator* Call(
size_t arity, float frequency = 0.0f,
VectorSlotPair const& feedback = VectorSlotPair(),
ConvertReceiverMode convert_mode = ConvertReceiverMode::kAny,
TailCallMode tail_call_mode = TailCallMode::kDisallow);
+ const Operator* CallWithSpread(uint32_t arity);
const Operator* CallRuntime(Runtime::FunctionId id);
const Operator* CallRuntime(Runtime::FunctionId id, size_t arity);
const Operator* CallRuntime(const Runtime::Function* function, size_t arity);
- const Operator* CallConstruct(uint32_t arity, float frequency,
- VectorSlotPair const& feedback);
- const Operator* CallConstructWithSpread(uint32_t arity);
+ const Operator* Construct(uint32_t arity, float frequency,
+ VectorSlotPair const& feedback);
+ const Operator* ConstructWithSpread(uint32_t arity);
const Operator* ConvertReceiver(ConvertReceiverMode convert_mode);
@@ -568,6 +657,8 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* StoreNamed(LanguageMode language_mode, Handle<Name> name,
VectorSlotPair const& feedback);
+ const Operator* StoreNamedOwn(Handle<Name> name,
+ VectorSlotPair const& feedback);
const Operator* StoreDataPropertyInLiteral(const VectorSlotPair& feedback);
const Operator* DeleteProperty(LanguageMode language_mode);
@@ -589,6 +680,7 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* LoadModule(int32_t cell_index);
const Operator* StoreModule(int32_t cell_index);
+ const Operator* ClassOf();
const Operator* TypeOf();
const Operator* InstanceOf();
const Operator* OrdinaryHasInstance();
@@ -607,6 +699,7 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* GeneratorRestoreRegister(int index);
const Operator* StackCheck();
+ const Operator* Debugger();
const Operator* CreateFunctionContext(int slot_count, ScopeType scope_type);
const Operator* CreateCatchContext(const Handle<String>& name,
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.cc b/deps/v8/src/compiler/js-type-hint-lowering.cc
new file mode 100644
index 0000000000..e30e016c79
--- /dev/null
+++ b/deps/v8/src/compiler/js-type-hint-lowering.cc
@@ -0,0 +1,153 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-type-hint-lowering.h"
+
+#include "src/compiler/js-graph.h"
+#include "src/compiler/operator-properties.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/feedback-vector.h"
+#include "src/type-hints.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class JSSpeculativeBinopBuilder final {
+ public:
+ JSSpeculativeBinopBuilder(JSTypeHintLowering* lowering, const Operator* op,
+ Node* left, Node* right, Node* effect,
+ Node* control, FeedbackSlot slot)
+ : lowering_(lowering),
+ op_(op),
+ left_(left),
+ right_(right),
+ effect_(effect),
+ control_(control),
+ slot_(slot) {}
+
+ BinaryOperationHint GetBinaryOperationHint() {
+ DCHECK_EQ(FeedbackSlotKind::kBinaryOp, feedback_vector()->GetKind(slot_));
+ BinaryOpICNexus nexus(feedback_vector(), slot_);
+ return nexus.GetBinaryOperationFeedback();
+ }
+
+ bool GetBinaryNumberOperationHint(NumberOperationHint* hint) {
+ switch (GetBinaryOperationHint()) {
+ case BinaryOperationHint::kSignedSmall:
+ *hint = NumberOperationHint::kSignedSmall;
+ return true;
+ case BinaryOperationHint::kSigned32:
+ *hint = NumberOperationHint::kSigned32;
+ return true;
+ case BinaryOperationHint::kNumberOrOddball:
+ *hint = NumberOperationHint::kNumberOrOddball;
+ return true;
+ case BinaryOperationHint::kAny:
+ case BinaryOperationHint::kNone:
+ case BinaryOperationHint::kString:
+ break;
+ }
+ return false;
+ }
+
+ const Operator* SpeculativeNumberOp(NumberOperationHint hint) {
+ switch (op_->opcode()) {
+ case IrOpcode::kJSAdd:
+ return simplified()->SpeculativeNumberAdd(hint);
+ case IrOpcode::kJSSubtract:
+ return simplified()->SpeculativeNumberSubtract(hint);
+ case IrOpcode::kJSMultiply:
+ return simplified()->SpeculativeNumberMultiply(hint);
+ case IrOpcode::kJSDivide:
+ return simplified()->SpeculativeNumberDivide(hint);
+ case IrOpcode::kJSModulus:
+ return simplified()->SpeculativeNumberModulus(hint);
+ case IrOpcode::kJSBitwiseAnd:
+ return simplified()->SpeculativeNumberBitwiseAnd(hint);
+ case IrOpcode::kJSBitwiseOr:
+ return simplified()->SpeculativeNumberBitwiseOr(hint);
+ case IrOpcode::kJSBitwiseXor:
+ return simplified()->SpeculativeNumberBitwiseXor(hint);
+ case IrOpcode::kJSShiftLeft:
+ return simplified()->SpeculativeNumberShiftLeft(hint);
+ case IrOpcode::kJSShiftRight:
+ return simplified()->SpeculativeNumberShiftRight(hint);
+ case IrOpcode::kJSShiftRightLogical:
+ return simplified()->SpeculativeNumberShiftRightLogical(hint);
+ default:
+ break;
+ }
+ UNREACHABLE();
+ return nullptr;
+ }
+
+ Node* BuildSpeculativeOperator(const Operator* op) {
+ DCHECK_EQ(2, op->ValueInputCount());
+ DCHECK_EQ(1, op->EffectInputCount());
+ DCHECK_EQ(1, op->ControlInputCount());
+ DCHECK_EQ(false, OperatorProperties::HasFrameStateInput(op));
+ DCHECK_EQ(false, OperatorProperties::HasContextInput(op));
+ DCHECK_EQ(1, op->EffectOutputCount());
+ DCHECK_EQ(0, op->ControlOutputCount());
+ return graph()->NewNode(op, left_, right_, effect_, control_);
+ }
+
+ JSGraph* jsgraph() const { return lowering_->jsgraph(); }
+ Graph* graph() const { return jsgraph()->graph(); }
+ JSOperatorBuilder* javascript() { return jsgraph()->javascript(); }
+ SimplifiedOperatorBuilder* simplified() { return jsgraph()->simplified(); }
+ CommonOperatorBuilder* common() { return jsgraph()->common(); }
+ const Handle<FeedbackVector>& feedback_vector() const {
+ return lowering_->feedback_vector();
+ }
+
+ private:
+ JSTypeHintLowering* lowering_;
+ const Operator* op_;
+ Node* left_;
+ Node* right_;
+ Node* effect_;
+ Node* control_;
+ FeedbackSlot slot_;
+};
+
+JSTypeHintLowering::JSTypeHintLowering(JSGraph* jsgraph,
+ Handle<FeedbackVector> feedback_vector)
+ : jsgraph_(jsgraph), feedback_vector_(feedback_vector) {}
+
+Reduction JSTypeHintLowering::ReduceBinaryOperation(const Operator* op,
+ Node* left, Node* right,
+ Node* effect, Node* control,
+ FeedbackSlot slot) {
+ switch (op->opcode()) {
+ case IrOpcode::kJSBitwiseOr:
+ case IrOpcode::kJSBitwiseXor:
+ case IrOpcode::kJSBitwiseAnd:
+ case IrOpcode::kJSShiftLeft:
+ case IrOpcode::kJSShiftRight:
+ case IrOpcode::kJSShiftRightLogical:
+ case IrOpcode::kJSAdd:
+ case IrOpcode::kJSSubtract:
+ case IrOpcode::kJSMultiply:
+ case IrOpcode::kJSDivide:
+ case IrOpcode::kJSModulus: {
+ JSSpeculativeBinopBuilder b(this, op, left, right, effect, control, slot);
+ NumberOperationHint hint;
+ if (b.GetBinaryNumberOperationHint(&hint)) {
+ Node* node = b.BuildSpeculativeOperator(b.SpeculativeNumberOp(hint));
+ return Reduction(node);
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return Reduction();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.h b/deps/v8/src/compiler/js-type-hint-lowering.h
new file mode 100644
index 0000000000..d1dd1a86d6
--- /dev/null
+++ b/deps/v8/src/compiler/js-type-hint-lowering.h
@@ -0,0 +1,54 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_TYPE_HINT_LOWERING_H_
+#define V8_COMPILER_JS_TYPE_HINT_LOWERING_H_
+
+#include "src/compiler/graph-reducer.h"
+#include "src/handles.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class JSGraph;
+
+// The type-hint lowering consumes feedback about data operations (i.e. unary
+// and binary operations) to emit nodes using speculative simplified operators
+// in favor of the generic JavaScript operators.
+//
+// This lowering is implemented as an early reduction and can be applied before
+// nodes are placed into the initial graph. It provides the ability to shortcut
+// the JavaScript-level operators and directly emit simplified-level operators
+// even during initial graph building. This is the reason this lowering doesn't
+// follow the interface of the reducer framework used after graph construction.
+class JSTypeHintLowering {
+ public:
+ JSTypeHintLowering(JSGraph* jsgraph, Handle<FeedbackVector> feedback_vector);
+
+ // Potential reduction of binary (arithmetic, logical and shift) operations.
+ Reduction ReduceBinaryOperation(const Operator* op, Node* left, Node* right,
+ Node* effect, Node* control,
+ FeedbackSlot slot);
+
+ private:
+ friend class JSSpeculativeBinopBuilder;
+
+ JSGraph* jsgraph() const { return jsgraph_; }
+ const Handle<FeedbackVector>& feedback_vector() const {
+ return feedback_vector_;
+ }
+
+ JSGraph* jsgraph_;
+ Handle<FeedbackVector> feedback_vector_;
+
+ DISALLOW_COPY_AND_ASSIGN(JSTypeHintLowering);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_JS_TYPE_HINT_LOWERING_H_
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index 54c8713578..31accbd86c 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -16,6 +16,7 @@
#include "src/compiler/operator-properties.h"
#include "src/compiler/type-cache.h"
#include "src/compiler/types.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -30,30 +31,6 @@ class JSBinopReduction final {
JSBinopReduction(JSTypedLowering* lowering, Node* node)
: lowering_(lowering), node_(node) {}
- bool GetBinaryNumberOperationHint(NumberOperationHint* hint) {
- if (lowering_->flags() & JSTypedLowering::kDeoptimizationEnabled) {
- DCHECK_NE(0, node_->op()->ControlOutputCount());
- DCHECK_EQ(1, node_->op()->EffectOutputCount());
- DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node_->op()));
- switch (BinaryOperationHintOf(node_->op())) {
- case BinaryOperationHint::kSignedSmall:
- *hint = NumberOperationHint::kSignedSmall;
- return true;
- case BinaryOperationHint::kSigned32:
- *hint = NumberOperationHint::kSigned32;
- return true;
- case BinaryOperationHint::kNumberOrOddball:
- *hint = NumberOperationHint::kNumberOrOddball;
- return true;
- case BinaryOperationHint::kAny:
- case BinaryOperationHint::kNone:
- case BinaryOperationHint::kString:
- break;
- }
- }
- return false;
- }
-
bool GetCompareNumberOperationHint(NumberOperationHint* hint) {
if (lowering_->flags() & JSTypedLowering::kDeoptimizationEnabled) {
DCHECK_EQ(1, node_->op()->EffectOutputCount());
@@ -70,6 +47,7 @@ class JSBinopReduction final {
case CompareOperationHint::kAny:
case CompareOperationHint::kNone:
case CompareOperationHint::kString:
+ case CompareOperationHint::kReceiver:
case CompareOperationHint::kInternalizedString:
break;
}
@@ -87,11 +65,32 @@ class JSBinopReduction final {
return false;
}
+ bool IsReceiverCompareOperation() {
+ if (lowering_->flags() & JSTypedLowering::kDeoptimizationEnabled) {
+ DCHECK_EQ(1, node_->op()->EffectOutputCount());
+ return (CompareOperationHintOf(node_->op()) ==
+ CompareOperationHint::kReceiver) &&
+ BothInputsMaybe(Type::Receiver());
+ }
+ return false;
+ }
+
+ bool IsStringCompareOperation() {
+ if (lowering_->flags() & JSTypedLowering::kDeoptimizationEnabled) {
+ DCHECK_EQ(1, node_->op()->EffectOutputCount());
+ return (CompareOperationHintOf(node_->op()) ==
+ CompareOperationHint::kString) &&
+ BothInputsMaybe(Type::String());
+ }
+ return false;
+ }
+
// Check if a string addition will definitely result in creating a ConsString,
// i.e. if the combined length of the resulting string exceeds the ConsString
// minimum length.
bool ShouldCreateConsString() {
DCHECK_EQ(IrOpcode::kJSAdd, node_->opcode());
+ DCHECK(OneInputIs(Type::String()));
if (BothInputsAre(Type::String()) ||
((lowering_->flags() & JSTypedLowering::kDeoptimizationEnabled) &&
BinaryOperationHintOf(node_->op()) == BinaryOperationHint::kString)) {
@@ -115,6 +114,47 @@ class JSBinopReduction final {
return false;
}
+ // Inserts a CheckReceiver for the left input.
+ void CheckLeftInputToReceiver() {
+ Node* left_input = graph()->NewNode(simplified()->CheckReceiver(), left(),
+ effect(), control());
+ node_->ReplaceInput(0, left_input);
+ update_effect(left_input);
+ }
+
+ // Checks that both inputs are Receiver, and if we don't know
+ // statically that one side is already a Receiver, insert a
+ // CheckReceiver node.
+ void CheckInputsToReceiver() {
+ if (!left_type()->Is(Type::Receiver())) {
+ CheckLeftInputToReceiver();
+ }
+ if (!right_type()->Is(Type::Receiver())) {
+ Node* right_input = graph()->NewNode(simplified()->CheckReceiver(),
+ right(), effect(), control());
+ node_->ReplaceInput(1, right_input);
+ update_effect(right_input);
+ }
+ }
+
+ // Checks that both inputs are String, and if we don't know
+ // statically that one side is already a String, insert a
+ // CheckString node.
+ void CheckInputsToString() {
+ if (!left_type()->Is(Type::String())) {
+ Node* left_input = graph()->NewNode(simplified()->CheckString(), left(),
+ effect(), control());
+ node_->ReplaceInput(0, left_input);
+ update_effect(left_input);
+ }
+ if (!right_type()->Is(Type::String())) {
+ Node* right_input = graph()->NewNode(simplified()->CheckString(), right(),
+ effect(), control());
+ node_->ReplaceInput(1, right_input);
+ update_effect(right_input);
+ }
+ }
+
// Checks that both inputs are InternalizedString, and if we don't know
// statically that one side is already an InternalizedString, insert a
// CheckInternalizedString node.
@@ -308,30 +348,18 @@ class JSBinopReduction final {
return nullptr;
}
- const Operator* SpeculativeNumberOp(NumberOperationHint hint) {
+ const Operator* NumberOpFromSpeculativeNumberOp() {
switch (node_->opcode()) {
- case IrOpcode::kJSAdd:
- return simplified()->SpeculativeNumberAdd(hint);
- case IrOpcode::kJSSubtract:
- return simplified()->SpeculativeNumberSubtract(hint);
- case IrOpcode::kJSMultiply:
- return simplified()->SpeculativeNumberMultiply(hint);
- case IrOpcode::kJSDivide:
- return simplified()->SpeculativeNumberDivide(hint);
- case IrOpcode::kJSModulus:
- return simplified()->SpeculativeNumberModulus(hint);
- case IrOpcode::kJSBitwiseAnd:
- return simplified()->SpeculativeNumberBitwiseAnd(hint);
- case IrOpcode::kJSBitwiseOr:
- return simplified()->SpeculativeNumberBitwiseOr(hint);
- case IrOpcode::kJSBitwiseXor:
- return simplified()->SpeculativeNumberBitwiseXor(hint);
- case IrOpcode::kJSShiftLeft:
- return simplified()->SpeculativeNumberShiftLeft(hint);
- case IrOpcode::kJSShiftRight:
- return simplified()->SpeculativeNumberShiftRight(hint);
- case IrOpcode::kJSShiftRightLogical:
- return simplified()->SpeculativeNumberShiftRightLogical(hint);
+ case IrOpcode::kSpeculativeNumberAdd:
+ return simplified()->NumberAdd();
+ case IrOpcode::kSpeculativeNumberSubtract:
+ return simplified()->NumberSubtract();
+ case IrOpcode::kSpeculativeNumberMultiply:
+ return simplified()->NumberMultiply();
+ case IrOpcode::kSpeculativeNumberDivide:
+ return simplified()->NumberDivide();
+ case IrOpcode::kSpeculativeNumberModulus:
+ return simplified()->NumberModulus();
default:
break;
}
@@ -494,6 +522,13 @@ JSTypedLowering::JSTypedLowering(Editor* editor,
dependencies_(dependencies),
flags_(flags),
jsgraph_(jsgraph),
+ pointer_comparable_type_(Type::Union(
+ Type::Oddball(),
+ Type::Union(
+ Type::SymbolOrReceiver(),
+ Type::HeapConstant(factory()->empty_string(), graph()->zone()),
+ graph()->zone()),
+ graph()->zone())),
type_cache_(TypeCache::Get()) {
for (size_t k = 0; k < arraysize(shifted_int32_ranges_); ++k) {
double min = kMinInt / (1 << k);
@@ -502,20 +537,22 @@ JSTypedLowering::JSTypedLowering(Editor* editor,
}
}
-Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
+Reduction JSTypedLowering::ReduceSpeculativeNumberAdd(Node* node) {
JSBinopReduction r(this, node);
- NumberOperationHint hint;
- if (r.GetBinaryNumberOperationHint(&hint)) {
- if (hint == NumberOperationHint::kNumberOrOddball &&
- r.BothInputsAre(Type::PlainPrimitive()) &&
- r.NeitherInputCanBe(Type::StringOrReceiver())) {
- // JSAdd(x:-string, y:-string) => NumberAdd(ToNumber(x), ToNumber(y))
- r.ConvertInputsToNumber();
- return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
- }
- return r.ChangeToSpeculativeOperator(
- simplified()->SpeculativeNumberAdd(hint), Type::Number());
+ NumberOperationHint hint = NumberOperationHintOf(node->op());
+ if (hint == NumberOperationHint::kNumberOrOddball &&
+ r.BothInputsAre(Type::PlainPrimitive()) &&
+ r.NeitherInputCanBe(Type::StringOrReceiver())) {
+ // SpeculativeNumberAdd(x:-string, y:-string) =>
+ // NumberAdd(ToNumber(x), ToNumber(y))
+ r.ConvertInputsToNumber();
+ return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
}
+ return NoChange();
+}
+
+Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
+ JSBinopReduction r(this, node);
if (r.BothInputsAre(Type::Number())) {
// JSAdd(x:number, y:number) => NumberAdd(x, y)
r.ConvertInputsToNumber();
@@ -538,13 +575,20 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
} else if (!r.RightInputIs(Type::String())) {
flags = STRING_ADD_CONVERT_RIGHT;
}
+ Operator::Properties properties = node->op()->properties();
+ if (r.NeitherInputCanBe(Type::Receiver())) {
+ // Both sides are already strings, so we know that the
+ // string addition will not cause any observable side
+ // effects; it can still throw obviously.
+ properties = Operator::kNoWrite | Operator::kNoDeopt;
+ }
// JSAdd(x:string, y) => CallStub[StringAdd](x, y)
// JSAdd(x, y:string) => CallStub[StringAdd](x, y)
Callable const callable =
CodeFactory::StringAdd(isolate(), flags, NOT_TENURED);
CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNeedsFrameState, node->op()->properties());
+ CallDescriptor::kNeedsFrameState, properties);
DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(callable.code()));
@@ -556,16 +600,6 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
Reduction JSTypedLowering::ReduceNumberBinop(Node* node) {
JSBinopReduction r(this, node);
- NumberOperationHint hint;
- if (r.GetBinaryNumberOperationHint(&hint)) {
- if (hint == NumberOperationHint::kNumberOrOddball &&
- r.BothInputsAre(Type::NumberOrOddball())) {
- r.ConvertInputsToNumber();
- return r.ChangeToPureOperator(r.NumberOp(), Type::Number());
- }
- return r.ChangeToSpeculativeOperator(r.SpeculativeNumberOp(hint),
- Type::Number());
- }
if (r.BothInputsAre(Type::PlainPrimitive()) ||
!(flags() & kDeoptimizationEnabled)) {
r.ConvertInputsToNumber();
@@ -574,13 +608,20 @@ Reduction JSTypedLowering::ReduceNumberBinop(Node* node) {
return NoChange();
}
-Reduction JSTypedLowering::ReduceInt32Binop(Node* node) {
+Reduction JSTypedLowering::ReduceSpeculativeNumberBinop(Node* node) {
JSBinopReduction r(this, node);
- NumberOperationHint hint;
- if (r.GetBinaryNumberOperationHint(&hint)) {
- return r.ChangeToSpeculativeOperator(r.SpeculativeNumberOp(hint),
- Type::Signed32());
+ NumberOperationHint hint = NumberOperationHintOf(node->op());
+ if (hint == NumberOperationHint::kNumberOrOddball &&
+ r.BothInputsAre(Type::NumberOrOddball())) {
+ r.ConvertInputsToNumber();
+ return r.ChangeToPureOperator(r.NumberOpFromSpeculativeNumberOp(),
+ Type::Number());
}
+ return NoChange();
+}
+
+Reduction JSTypedLowering::ReduceInt32Binop(Node* node) {
+ JSBinopReduction r(this, node);
if (r.BothInputsAre(Type::PlainPrimitive()) ||
!(flags() & kDeoptimizationEnabled)) {
r.ConvertInputsToNumber();
@@ -592,12 +633,6 @@ Reduction JSTypedLowering::ReduceInt32Binop(Node* node) {
Reduction JSTypedLowering::ReduceUI32Shift(Node* node, Signedness signedness) {
JSBinopReduction r(this, node);
- NumberOperationHint hint;
- if (r.GetBinaryNumberOperationHint(&hint)) {
- return r.ChangeToSpeculativeOperator(
- r.SpeculativeNumberOp(hint),
- signedness == kUnsigned ? Type::Unsigned32() : Type::Signed32());
- }
if (r.BothInputsAre(Type::PlainPrimitive()) ||
!(flags() & kDeoptimizationEnabled)) {
r.ConvertInputsToNumber();
@@ -779,6 +814,10 @@ Reduction JSTypedLowering::ReduceJSComparison(Node* node) {
r.ConvertInputsToNumber();
less_than = simplified()->NumberLessThan();
less_than_or_equal = simplified()->NumberLessThanOrEqual();
+ } else if (r.IsStringCompareOperation()) {
+ r.CheckInputsToString();
+ less_than = simplified()->StringLessThan();
+ less_than_or_equal = simplified()->StringLessThanOrEqual();
} else {
return NoChange();
}
@@ -820,61 +859,72 @@ Reduction JSTypedLowering::ReduceJSTypeOf(Node* node) {
return Replace(jsgraph()->Constant(f->string_string()));
} else if (type->Is(Type::Symbol())) {
return Replace(jsgraph()->Constant(f->symbol_string()));
- } else if (type->Is(Type::Union(Type::Undefined(), Type::OtherUndetectable(),
- graph()->zone()))) {
+ } else if (type->Is(Type::OtherUndetectableOrUndefined())) {
return Replace(jsgraph()->Constant(f->undefined_string()));
- } else if (type->Is(Type::Null())) {
+ } else if (type->Is(Type::NonCallableOrNull())) {
return Replace(jsgraph()->Constant(f->object_string()));
} else if (type->Is(Type::Function())) {
return Replace(jsgraph()->Constant(f->function_string()));
} else if (type->IsHeapConstant()) {
return Replace(jsgraph()->Constant(
Object::TypeOf(isolate(), type->AsHeapConstant()->Value())));
- } else if (type->IsOtherNumberConstant()) {
- return Replace(jsgraph()->Constant(f->number_string()));
}
return NoChange();
}
Reduction JSTypedLowering::ReduceJSEqualTypeOf(Node* node, bool invert) {
+ Node* input;
+ Handle<String> type;
HeapObjectBinopMatcher m(node);
if (m.left().IsJSTypeOf() && m.right().HasValue() &&
m.right().Value()->IsString()) {
- Node* replacement;
- Node* input = m.left().InputAt(0);
- Handle<String> value = Handle<String>::cast(m.right().Value());
- if (String::Equals(value, factory()->boolean_string())) {
- replacement =
- graph()->NewNode(common()->Select(MachineRepresentation::kTagged),
- graph()->NewNode(simplified()->ReferenceEqual(),
- input, jsgraph()->TrueConstant()),
- jsgraph()->TrueConstant(),
- graph()->NewNode(simplified()->ReferenceEqual(),
- input, jsgraph()->FalseConstant()));
- } else if (String::Equals(value, factory()->function_string())) {
- replacement = graph()->NewNode(simplified()->ObjectIsCallable(), input);
- } else if (String::Equals(value, factory()->number_string())) {
- replacement = graph()->NewNode(simplified()->ObjectIsNumber(), input);
- } else if (String::Equals(value, factory()->string_string())) {
- replacement = graph()->NewNode(simplified()->ObjectIsString(), input);
- } else if (String::Equals(value, factory()->undefined_string())) {
- replacement = graph()->NewNode(
- common()->Select(MachineRepresentation::kTagged),
- graph()->NewNode(simplified()->ReferenceEqual(), input,
- jsgraph()->NullConstant()),
- jsgraph()->FalseConstant(),
- graph()->NewNode(simplified()->ObjectIsUndetectable(), input));
- } else {
- return NoChange();
- }
- if (invert) {
- replacement = graph()->NewNode(simplified()->BooleanNot(), replacement);
- }
- ReplaceWithValue(node, replacement);
- return Replace(replacement);
+ input = m.left().InputAt(0);
+ type = Handle<String>::cast(m.right().Value());
+ } else if (m.right().IsJSTypeOf() && m.left().HasValue() &&
+ m.left().Value()->IsString()) {
+ input = m.right().InputAt(0);
+ type = Handle<String>::cast(m.left().Value());
+ } else {
+ return NoChange();
}
- return NoChange();
+ Node* value;
+ if (String::Equals(type, factory()->boolean_string())) {
+ value =
+ graph()->NewNode(common()->Select(MachineRepresentation::kTagged),
+ graph()->NewNode(simplified()->ReferenceEqual(), input,
+ jsgraph()->TrueConstant()),
+ jsgraph()->TrueConstant(),
+ graph()->NewNode(simplified()->ReferenceEqual(), input,
+ jsgraph()->FalseConstant()));
+ } else if (String::Equals(type, factory()->function_string())) {
+ value = graph()->NewNode(simplified()->ObjectIsDetectableCallable(), input);
+ } else if (String::Equals(type, factory()->number_string())) {
+ value = graph()->NewNode(simplified()->ObjectIsNumber(), input);
+ } else if (String::Equals(type, factory()->object_string())) {
+ value = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged),
+ graph()->NewNode(simplified()->ObjectIsNonCallable(), input),
+ jsgraph()->TrueConstant(),
+ graph()->NewNode(simplified()->ReferenceEqual(), input,
+ jsgraph()->NullConstant()));
+ } else if (String::Equals(type, factory()->string_string())) {
+ value = graph()->NewNode(simplified()->ObjectIsString(), input);
+ } else if (String::Equals(type, factory()->undefined_string())) {
+ value = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged),
+ graph()->NewNode(simplified()->ReferenceEqual(), input,
+ jsgraph()->NullConstant()),
+ jsgraph()->FalseConstant(),
+ graph()->NewNode(simplified()->ObjectIsUndetectable(), input));
+ } else {
+ return NoChange();
+ }
+ if (invert) {
+ value = graph()->NewNode(simplified()->BooleanNot(), value);
+ }
+ ReplaceWithValue(node, value);
+ return Replace(value);
}
Reduction JSTypedLowering::ReduceJSEqual(Node* node, bool invert) {
@@ -924,6 +974,12 @@ Reduction JSTypedLowering::ReduceJSEqual(Node* node, bool invert) {
simplified()->SpeculativeNumberEqual(hint), invert, Type::Boolean());
} else if (r.BothInputsAre(Type::Number())) {
return r.ChangeToPureOperator(simplified()->NumberEqual(), invert);
+ } else if (r.IsReceiverCompareOperation()) {
+ r.CheckInputsToReceiver();
+ return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
+ } else if (r.IsStringCompareOperation()) {
+ r.CheckInputsToString();
+ return r.ChangeToPureOperator(simplified()->StringEqual(), invert);
}
return NoChange();
}
@@ -938,10 +994,10 @@ Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node, bool invert) {
return Replace(replacement);
}
}
- if (r.OneInputCannotBe(Type::NumberOrSimdOrString())) {
+ if (r.OneInputCannotBe(Type::NumberOrString())) {
// For values with canonical representation (i.e. neither String, nor
- // Simd128Value nor Number) an empty type intersection means the values
- // cannot be strictly equal.
+ // Number) an empty type intersection means the values cannot be strictly
+ // equal.
if (!r.left_type()->Maybe(r.right_type())) {
Node* replacement = jsgraph()->BooleanConstant(invert);
ReplaceWithValue(node, replacement);
@@ -955,7 +1011,7 @@ Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node, bool invert) {
if (r.BothInputsAre(Type::Unique())) {
return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
}
- if (r.OneInputIs(Type::NonStringUniqueOrHole())) {
+ if (r.OneInputIs(pointer_comparable_type_)) {
return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
}
if (r.IsInternalizedStringCompareOperation()) {
@@ -975,6 +1031,15 @@ Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node, bool invert) {
simplified()->SpeculativeNumberEqual(hint), invert, Type::Boolean());
} else if (r.BothInputsAre(Type::Number())) {
return r.ChangeToPureOperator(simplified()->NumberEqual(), invert);
+ } else if (r.IsReceiverCompareOperation()) {
+ // For strict equality, it's enough to know that one input is a Receiver,
+ // as a strict equality comparison with a Receiver can only yield true if
+ // both sides refer to the same Receiver than.
+ r.CheckLeftInputToReceiver();
+ return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
+ } else if (r.IsStringCompareOperation()) {
+ r.CheckInputsToString();
+ return r.ChangeToPureOperator(simplified()->StringEqual(), invert);
}
return NoChange();
}
@@ -1013,6 +1078,14 @@ Reduction JSTypedLowering::ReduceJSToBoolean(Node* node) {
node->TrimInputCount(1);
NodeProperties::ChangeOp(node, simplified()->BooleanNot());
return Changed(node);
+ } else if (input_type->Is(Type::String())) {
+ // JSToBoolean(x:string) => BooleanNot(ReferenceEqual(x,""))
+ node->ReplaceInput(0,
+ graph()->NewNode(simplified()->ReferenceEqual(), input,
+ jsgraph()->EmptyStringConstant()));
+ node->TrimInputCount(1);
+ NodeProperties::ChangeOp(node, simplified()->BooleanNot());
+ return Changed(node);
}
return NoChange();
}
@@ -1654,7 +1727,6 @@ Reduction JSTypedLowering::ReduceJSConvertReceiver(Node* node) {
Type* receiver_type = NodeProperties::GetType(receiver);
Node* context = NodeProperties::GetContextInput(node);
Type* context_type = NodeProperties::GetType(context);
- Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
@@ -1701,14 +1773,15 @@ Reduction JSTypedLowering::ReduceJSConvertReceiver(Node* node) {
Node* efalse = effect;
Node* rfalse;
{
- // Convert {receiver} using the ToObjectStub.
+ // Convert {receiver} using the ToObjectStub. The call does not require a
+ // frame-state in this case, because neither null nor undefined is passed.
Callable callable = CodeFactory::ToObject(isolate());
CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNeedsFrameState, node->op()->properties());
+ CallDescriptor::kNoFlags, node->op()->properties());
rfalse = efalse = graph()->NewNode(
common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
- receiver, context, frame_state, efalse);
+ receiver, context, efalse);
}
control = graph()->NewNode(common()->Merge(2), if_true, if_false);
@@ -1758,14 +1831,15 @@ Reduction JSTypedLowering::ReduceJSConvertReceiver(Node* node) {
Node* econvert = effect;
Node* rconvert;
{
- // Convert {receiver} using the ToObjectStub.
+ // Convert {receiver} using the ToObjectStub. The call does not require a
+ // frame-state in this case, because neither null nor undefined is passed.
Callable callable = CodeFactory::ToObject(isolate());
CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNeedsFrameState, node->op()->properties());
+ CallDescriptor::kNoFlags, node->op()->properties());
rconvert = econvert = graph()->NewNode(
common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
- receiver, context, frame_state, econvert);
+ receiver, context, econvert);
}
// Replace {receiver} with global proxy of {context}.
@@ -1827,7 +1901,7 @@ void ReduceBuiltin(Isolate* isolate, JSGraph* jsgraph, Node* node,
// The logic contained here is mirrored in Builtins::Generate_Adaptor.
// Keep these in sync.
- const bool is_construct = (node->opcode() == IrOpcode::kJSCallConstruct);
+ const bool is_construct = (node->opcode() == IrOpcode::kJSConstruct);
DCHECK(Builtins::HasCppImplementation(builtin_index));
DCHECK_EQ(0, flags & CallDescriptor::kSupportsTailCalls);
@@ -1887,9 +1961,9 @@ bool NeedsArgumentAdaptorFrame(Handle<SharedFunctionInfo> shared, int arity) {
} // namespace
-Reduction JSTypedLowering::ReduceJSCallConstruct(Node* node) {
- DCHECK_EQ(IrOpcode::kJSCallConstruct, node->opcode());
- CallConstructParameters const& p = CallConstructParametersOf(node->op());
+Reduction JSTypedLowering::ReduceJSConstruct(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSConstruct, node->opcode());
+ ConstructParameters const& p = ConstructParametersOf(node->op());
DCHECK_LE(2u, p.arity());
int const arity = static_cast<int>(p.arity() - 2);
Node* target = NodeProperties::GetValueInput(node, 0);
@@ -1962,10 +2036,38 @@ Reduction JSTypedLowering::ReduceJSCallConstruct(Node* node) {
return NoChange();
}
+Reduction JSTypedLowering::ReduceJSCallForwardVarargs(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCallForwardVarargs, node->opcode());
+ CallForwardVarargsParameters p = CallForwardVarargsParametersOf(node->op());
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ Type* target_type = NodeProperties::GetType(target);
+
+ // Check if {target} is a JSFunction.
+ if (target_type->Is(Type::Function())) {
+ // Compute flags for the call.
+ CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
+ if (p.tail_call_mode() == TailCallMode::kAllow) {
+ flags |= CallDescriptor::kSupportsTailCalls;
+ }
+
+ // Patch {node} to an indirect call via CallFunctionForwardVarargs.
+ Callable callable = CodeFactory::CallFunctionForwardVarargs(isolate());
+ node->InsertInput(graph()->zone(), 0,
+ jsgraph()->HeapConstant(callable.code()));
+ node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(p.start_index()));
+ NodeProperties::ChangeOp(
+ node,
+ common()->Call(Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 1, flags)));
+ return Changed(node);
+ }
+
+ return NoChange();
+}
-Reduction JSTypedLowering::ReduceJSCallFunction(Node* node) {
- DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
- CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+Reduction JSTypedLowering::ReduceJSCall(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
int const arity = static_cast<int>(p.arity() - 2);
ConvertReceiverMode convert_mode = p.convert_mode();
Node* target = NodeProperties::GetValueInput(node, 0);
@@ -1974,7 +2076,6 @@ Reduction JSTypedLowering::ReduceJSCallFunction(Node* node) {
Type* receiver_type = NodeProperties::GetType(receiver);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- Node* frame_state = NodeProperties::FindFrameStateBefore(node);
// Try to infer receiver {convert_mode} from {receiver} type.
if (receiver_type->Is(Type::NullOrUndefined())) {
@@ -2007,7 +2108,7 @@ Reduction JSTypedLowering::ReduceJSCallFunction(Node* node) {
!receiver_type->Is(Type::Receiver())) {
receiver = effect =
graph()->NewNode(javascript()->ConvertReceiver(convert_mode),
- receiver, context, frame_state, effect, control);
+ receiver, context, effect, control);
NodeProperties::ReplaceValueInput(node, receiver, 1);
}
@@ -2074,8 +2175,9 @@ Reduction JSTypedLowering::ReduceJSCallFunction(Node* node) {
// Maybe we did at least learn something about the {receiver}.
if (p.convert_mode() != convert_mode) {
NodeProperties::ChangeOp(
- node, javascript()->CallFunction(p.arity(), p.frequency(), p.feedback(),
- convert_mode, p.tail_call_mode()));
+ node,
+ javascript()->Call(p.arity(), p.frequency(), p.feedback(), convert_mode,
+ p.tail_call_mode()));
return Changed(node);
}
@@ -2094,6 +2196,9 @@ Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
+ // We don't support lowering JSForInNext inside try blocks.
+ if (NodeProperties::IsExceptionalCall(node)) return NoChange();
+
// We know that the {index} is in Unsigned32 range here, otherwise executing
// the JSForInNext wouldn't be valid. Unfortunately due to OSR and generators
// this is not always reflected in the types, hence we might need to rename
@@ -2323,10 +2428,12 @@ Reduction JSTypedLowering::Reduce(Node* node) {
return ReduceJSStoreModule(node);
case IrOpcode::kJSConvertReceiver:
return ReduceJSConvertReceiver(node);
- case IrOpcode::kJSCallConstruct:
- return ReduceJSCallConstruct(node);
- case IrOpcode::kJSCallFunction:
- return ReduceJSCallFunction(node);
+ case IrOpcode::kJSConstruct:
+ return ReduceJSConstruct(node);
+ case IrOpcode::kJSCallForwardVarargs:
+ return ReduceJSCallForwardVarargs(node);
+ case IrOpcode::kJSCall:
+ return ReduceJSCall(node);
case IrOpcode::kJSForInNext:
return ReduceJSForInNext(node);
case IrOpcode::kJSLoadMessage:
@@ -2339,6 +2446,15 @@ Reduction JSTypedLowering::Reduce(Node* node) {
return ReduceJSGeneratorRestoreContinuation(node);
case IrOpcode::kJSGeneratorRestoreRegister:
return ReduceJSGeneratorRestoreRegister(node);
+ // TODO(mstarzinger): Simplified operations hiding in JS-level reducer not
+ // fooling anyone. Consider moving this into a separate reducer.
+ case IrOpcode::kSpeculativeNumberAdd:
+ return ReduceSpeculativeNumberAdd(node);
+ case IrOpcode::kSpeculativeNumberSubtract:
+ case IrOpcode::kSpeculativeNumberMultiply:
+ case IrOpcode::kSpeculativeNumberDivide:
+ case IrOpcode::kSpeculativeNumberModulus:
+ return ReduceSpeculativeNumberBinop(node);
default:
break;
}
diff --git a/deps/v8/src/compiler/js-typed-lowering.h b/deps/v8/src/compiler/js-typed-lowering.h
index 20f35f1fe1..35195ec09d 100644
--- a/deps/v8/src/compiler/js-typed-lowering.h
+++ b/deps/v8/src/compiler/js-typed-lowering.h
@@ -70,8 +70,9 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
Reduction ReduceJSToString(Node* node);
Reduction ReduceJSToObject(Node* node);
Reduction ReduceJSConvertReceiver(Node* node);
- Reduction ReduceJSCallConstruct(Node* node);
- Reduction ReduceJSCallFunction(Node* node);
+ Reduction ReduceJSConstruct(Node* node);
+ Reduction ReduceJSCallForwardVarargs(Node* node);
+ Reduction ReduceJSCall(Node* node);
Reduction ReduceJSForInNext(Node* node);
Reduction ReduceJSLoadMessage(Node* node);
Reduction ReduceJSStoreMessage(Node* node);
@@ -83,6 +84,8 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
Reduction ReduceInt32Binop(Node* node);
Reduction ReduceUI32Shift(Node* node, Signedness signedness);
Reduction ReduceCreateConsString(Node* node);
+ Reduction ReduceSpeculativeNumberAdd(Node* node);
+ Reduction ReduceSpeculativeNumberBinop(Node* node);
Factory* factory() const;
Graph* graph() const;
@@ -98,6 +101,7 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
Flags flags_;
JSGraph* jsgraph_;
Type* shifted_int32_ranges_[4];
+ Type* pointer_comparable_type_;
TypeCache const& type_cache_;
};
diff --git a/deps/v8/src/compiler/jump-threading.cc b/deps/v8/src/compiler/jump-threading.cc
index d7d4f91c94..86d25de327 100644
--- a/deps/v8/src/compiler/jump-threading.cc
+++ b/deps/v8/src/compiler/jump-threading.cc
@@ -4,6 +4,7 @@
#include "src/compiler/jump-threading.h"
#include "src/compiler/code-generator-impl.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index 2458f65867..06f967a6a4 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -12,6 +12,7 @@
#include "src/compiler/node.h"
#include "src/compiler/osr.h"
#include "src/compiler/pipeline.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -52,8 +53,7 @@ std::ostream& operator<<(std::ostream& os, const CallDescriptor& d) {
MachineSignature* CallDescriptor::GetMachineSignature(Zone* zone) const {
size_t param_count = ParameterCount();
size_t return_count = ReturnCount();
- MachineType* types = reinterpret_cast<MachineType*>(
- zone->New(sizeof(MachineType*) * (param_count + return_count)));
+ MachineType* types = zone->NewArray<MachineType>(param_count + return_count);
int current = 0;
for (size_t i = 0; i < return_count; ++i) {
types[current++] = GetReturnType(i);
@@ -142,11 +142,11 @@ CallDescriptor* Linkage::ComputeIncoming(Zone* zone, CompilationInfo* info) {
bool Linkage::NeedsFrameStateInput(Runtime::FunctionId function) {
switch (function) {
// Most runtime functions need a FrameState. A few chosen ones that we know
- // not to call into arbitrary JavaScript, not to throw, and not to
- // deoptimize
- // are whitelisted here and can be called without a FrameState.
+ // not to call into arbitrary JavaScript, not to throw, and not to lazily
+ // deoptimize are whitelisted here and can be called without a FrameState.
case Runtime::kAbort:
case Runtime::kAllocateInTargetSpace:
+ case Runtime::kConvertReceiver:
case Runtime::kCreateIterResultObject:
case Runtime::kDefineGetterPropertyUnchecked: // TODO(jarin): Is it safe?
case Runtime::kDefineSetterPropertyUnchecked: // TODO(jarin): Is it safe?
@@ -171,6 +171,7 @@ bool Linkage::NeedsFrameStateInput(Runtime::FunctionId function) {
return false;
// Some inline intrinsics are also safe to call without a FrameState.
+ case Runtime::kInlineClassOf:
case Runtime::kInlineCreateIterResultObject:
case Runtime::kInlineFixedArrayGet:
case Runtime::kInlineFixedArraySet:
diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc
index 6c2935f7ca..10140e1406 100644
--- a/deps/v8/src/compiler/load-elimination.cc
+++ b/deps/v8/src/compiler/load-elimination.cc
@@ -835,6 +835,9 @@ Reduction LoadElimination::ReduceStoreElement(Node* node) {
// Only record the new value if the store doesn't have an implicit truncation.
switch (access.machine_type.representation()) {
case MachineRepresentation::kNone:
+ case MachineRepresentation::kSimd1x4:
+ case MachineRepresentation::kSimd1x8:
+ case MachineRepresentation::kSimd1x16:
case MachineRepresentation::kBit:
UNREACHABLE();
break;
@@ -1037,6 +1040,9 @@ int LoadElimination::FieldIndexOf(FieldAccess const& access) {
case MachineRepresentation::kNone:
case MachineRepresentation::kBit:
case MachineRepresentation::kSimd128:
+ case MachineRepresentation::kSimd1x4:
+ case MachineRepresentation::kSimd1x8:
+ case MachineRepresentation::kSimd1x16:
UNREACHABLE();
break;
case MachineRepresentation::kWord32:
diff --git a/deps/v8/src/compiler/loop-variable-optimizer.cc b/deps/v8/src/compiler/loop-variable-optimizer.cc
index 55cce265d8..9bade2732a 100644
--- a/deps/v8/src/compiler/loop-variable-optimizer.cc
+++ b/deps/v8/src/compiler/loop-variable-optimizer.cc
@@ -303,9 +303,11 @@ InductionVariable* LoopVariableOptimizer::TryGetInductionVariable(Node* phi) {
Node* initial = phi->InputAt(0);
Node* arith = phi->InputAt(1);
InductionVariable::ArithmeticType arithmeticType;
- if (arith->opcode() == IrOpcode::kJSAdd) {
+ if (arith->opcode() == IrOpcode::kJSAdd ||
+ arith->opcode() == IrOpcode::kSpeculativeNumberAdd) {
arithmeticType = InductionVariable::ArithmeticType::kAddition;
- } else if (arith->opcode() == IrOpcode::kJSSubtract) {
+ } else if (arith->opcode() == IrOpcode::kJSSubtract ||
+ arith->opcode() == IrOpcode::kSpeculativeNumberSubtract) {
arithmeticType = InductionVariable::ArithmeticType::kSubtraction;
} else {
return nullptr;
diff --git a/deps/v8/src/compiler/machine-graph-verifier.cc b/deps/v8/src/compiler/machine-graph-verifier.cc
index ecabbe0575..2d5fce5f0a 100644
--- a/deps/v8/src/compiler/machine-graph-verifier.cc
+++ b/deps/v8/src/compiler/machine-graph-verifier.cc
@@ -208,6 +208,8 @@ class MachineRepresentationInferrer {
case IrOpcode::kTruncateFloat32ToUint32:
case IrOpcode::kBitcastFloat32ToInt32:
case IrOpcode::kInt32x4ExtractLane:
+ case IrOpcode::kInt16x8ExtractLane:
+ case IrOpcode::kInt8x16ExtractLane:
case IrOpcode::kInt32Constant:
case IrOpcode::kRelocatableInt32Constant:
case IrOpcode::kTruncateFloat64ToWord32:
@@ -352,6 +354,8 @@ class MachineRepresentationChecker {
CheckValueInputForInt64Op(node, 1);
break;
case IrOpcode::kInt32x4ExtractLane:
+ case IrOpcode::kInt16x8ExtractLane:
+ case IrOpcode::kInt8x16ExtractLane:
CheckValueInputRepresentationIs(node, 0,
MachineRepresentation::kSimd128);
break;
@@ -516,6 +520,9 @@ class MachineRepresentationChecker {
}
break;
}
+ case IrOpcode::kThrow:
+ CheckValueInputIsTagged(node, 0);
+ break;
case IrOpcode::kTypedStateValues:
case IrOpcode::kFrameState:
break;
@@ -763,6 +770,9 @@ class MachineRepresentationChecker {
case MachineRepresentation::kFloat32:
case MachineRepresentation::kFloat64:
case MachineRepresentation::kSimd128:
+ case MachineRepresentation::kSimd1x4:
+ case MachineRepresentation::kSimd1x8:
+ case MachineRepresentation::kSimd1x16:
case MachineRepresentation::kBit:
case MachineRepresentation::kWord8:
case MachineRepresentation::kWord16:
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index f7fe19d494..a50f0dcb1b 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -12,6 +12,7 @@
#include "src/compiler/graph.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/node-matchers.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -331,7 +332,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
if (m.IsFoldable()) { // L - R => (L - R)
return ReplaceFloat32(m.left().Value() - m.right().Value());
}
- if (m.left().IsMinusZero()) {
+ if (allow_signalling_nan_ && m.left().IsMinusZero()) {
// -0.0 - round_down(-0.0 - R) => round_up(R)
if (machine()->Float32RoundUp().IsSupported() &&
m.right().IsFloat32RoundDown()) {
@@ -378,7 +379,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
if (m.IsFoldable()) { // L - R => (L - R)
return ReplaceFloat64(m.left().Value() - m.right().Value());
}
- if (m.left().IsMinusZero()) {
+ if (allow_signalling_nan_ && m.left().IsMinusZero()) {
// -0.0 - round_down(-0.0 - R) => round_up(R)
if (machine()->Float64RoundUp().IsSupported() &&
m.right().IsFloat64RoundDown()) {
@@ -437,7 +438,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
if (m.IsFoldable()) { // K / K => K
return ReplaceFloat64(m.left().Value() / m.right().Value());
}
- if (m.right().Is(-1)) { // x / -1.0 => -x
+ if (allow_signalling_nan_ && m.right().Is(-1)) { // x / -1.0 => -x
node->RemoveInput(1);
NodeProperties::ChangeOp(node, machine()->Float64Neg());
return Changed(node);
@@ -606,7 +607,13 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kChangeFloat32ToFloat64: {
Float32Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(m.Value());
+ if (m.HasValue()) {
+ if (!allow_signalling_nan_ && std::isnan(m.Value())) {
+ // Do some calculation to make guarantee the value is a quiet NaN.
+ return ReplaceFloat64(m.Value() + m.Value());
+ }
+ return ReplaceFloat64(m.Value());
+ }
break;
}
case IrOpcode::kChangeFloat64ToInt32: {
@@ -655,8 +662,15 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kTruncateFloat64ToFloat32: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat32(DoubleToFloat32(m.Value()));
- if (m.IsChangeFloat32ToFloat64()) return Replace(m.node()->InputAt(0));
+ if (m.HasValue()) {
+ if (!allow_signalling_nan_ && std::isnan(m.Value())) {
+ // Do some calculation to make guarantee the value is a quiet NaN.
+ return ReplaceFloat32(DoubleToFloat32(m.Value() + m.Value()));
+ }
+ return ReplaceFloat32(DoubleToFloat32(m.Value()));
+ }
+ if (allow_signalling_nan_ && m.IsChangeFloat32ToFloat64())
+ return Replace(m.node()->InputAt(0));
break;
}
case IrOpcode::kRoundFloat64ToInt32: {
@@ -1167,8 +1181,9 @@ Reduction MachineOperatorReducer::ReduceWord32And(Node* node) {
if (m.left().IsWord32Shl()) {
Uint32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue() &&
- mleft.right().Value() >= base::bits::CountTrailingZeros32(mask)) {
- // (x << L) & (-1 << K) => x << L iff K >= L
+ (mleft.right().Value() & 0x1f) >=
+ base::bits::CountTrailingZeros32(mask)) {
+ // (x << L) & (-1 << K) => x << L iff L >= K
return Replace(mleft.node());
}
} else if (m.left().IsInt32Add()) {
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index 80310e1f5a..854c22e15e 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -130,7 +130,6 @@ MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
V(Word32Clz, Operator::kNoProperties, 1, 0, 1) \
V(Word64Clz, Operator::kNoProperties, 1, 0, 1) \
V(BitcastTaggedToWord, Operator::kNoProperties, 1, 0, 1) \
- V(BitcastWordToTagged, Operator::kNoProperties, 1, 0, 1) \
V(BitcastWordToTaggedSigned, Operator::kNoProperties, 1, 0, 1) \
V(TruncateFloat64ToWord32, Operator::kNoProperties, 1, 0, 1) \
V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
@@ -221,8 +220,6 @@ MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
V(Word32PairShr, Operator::kNoProperties, 3, 0, 2) \
V(Word32PairSar, Operator::kNoProperties, 3, 0, 2) \
V(CreateFloat32x4, Operator::kNoProperties, 4, 0, 1) \
- V(Float32x4ExtractLane, Operator::kNoProperties, 2, 0, 1) \
- V(Float32x4ReplaceLane, Operator::kNoProperties, 3, 0, 1) \
V(Float32x4Abs, Operator::kNoProperties, 1, 0, 1) \
V(Float32x4Neg, Operator::kNoProperties, 1, 0, 1) \
V(Float32x4Sqrt, Operator::kNoProperties, 1, 0, 1) \
@@ -245,16 +242,12 @@ MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
V(Float32x4FromInt32x4, Operator::kNoProperties, 1, 0, 1) \
V(Float32x4FromUint32x4, Operator::kNoProperties, 1, 0, 1) \
V(CreateInt32x4, Operator::kNoProperties, 4, 0, 1) \
- V(Int32x4ExtractLane, Operator::kNoProperties, 2, 0, 1) \
- V(Int32x4ReplaceLane, Operator::kNoProperties, 3, 0, 1) \
V(Int32x4Neg, Operator::kNoProperties, 1, 0, 1) \
V(Int32x4Add, Operator::kCommutative, 2, 0, 1) \
V(Int32x4Sub, Operator::kNoProperties, 2, 0, 1) \
V(Int32x4Mul, Operator::kCommutative, 2, 0, 1) \
V(Int32x4Min, Operator::kCommutative, 2, 0, 1) \
V(Int32x4Max, Operator::kCommutative, 2, 0, 1) \
- V(Int32x4ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1) \
- V(Int32x4ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1) \
V(Int32x4Equal, Operator::kCommutative, 2, 0, 1) \
V(Int32x4NotEqual, Operator::kCommutative, 2, 0, 1) \
V(Int32x4LessThan, Operator::kNoProperties, 2, 0, 1) \
@@ -264,29 +257,18 @@ MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
V(Int32x4FromFloat32x4, Operator::kNoProperties, 1, 0, 1) \
V(Uint32x4Min, Operator::kCommutative, 2, 0, 1) \
V(Uint32x4Max, Operator::kCommutative, 2, 0, 1) \
- V(Uint32x4ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1) \
- V(Uint32x4ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1) \
V(Uint32x4LessThan, Operator::kNoProperties, 2, 0, 1) \
V(Uint32x4LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
V(Uint32x4GreaterThan, Operator::kNoProperties, 2, 0, 1) \
V(Uint32x4GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
V(Uint32x4FromFloat32x4, Operator::kNoProperties, 1, 0, 1) \
- V(CreateBool32x4, Operator::kNoProperties, 4, 0, 1) \
- V(Bool32x4ExtractLane, Operator::kNoProperties, 2, 0, 1) \
- V(Bool32x4ReplaceLane, Operator::kNoProperties, 3, 0, 1) \
V(Bool32x4And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Bool32x4Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Bool32x4Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Bool32x4Not, Operator::kNoProperties, 1, 0, 1) \
V(Bool32x4AnyTrue, Operator::kNoProperties, 1, 0, 1) \
V(Bool32x4AllTrue, Operator::kNoProperties, 1, 0, 1) \
- V(Bool32x4Swizzle, Operator::kNoProperties, 5, 0, 1) \
- V(Bool32x4Shuffle, Operator::kNoProperties, 6, 0, 1) \
- V(Bool32x4Equal, Operator::kCommutative, 2, 0, 1) \
- V(Bool32x4NotEqual, Operator::kCommutative, 2, 0, 1) \
V(CreateInt16x8, Operator::kNoProperties, 8, 0, 1) \
- V(Int16x8ExtractLane, Operator::kNoProperties, 2, 0, 1) \
- V(Int16x8ReplaceLane, Operator::kNoProperties, 3, 0, 1) \
V(Int16x8Neg, Operator::kNoProperties, 1, 0, 1) \
V(Int16x8Add, Operator::kCommutative, 2, 0, 1) \
V(Int16x8AddSaturate, Operator::kCommutative, 2, 0, 1) \
@@ -295,43 +277,27 @@ MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
V(Int16x8Mul, Operator::kCommutative, 2, 0, 1) \
V(Int16x8Min, Operator::kCommutative, 2, 0, 1) \
V(Int16x8Max, Operator::kCommutative, 2, 0, 1) \
- V(Int16x8ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1) \
- V(Int16x8ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1) \
V(Int16x8Equal, Operator::kCommutative, 2, 0, 1) \
V(Int16x8NotEqual, Operator::kCommutative, 2, 0, 1) \
V(Int16x8LessThan, Operator::kNoProperties, 2, 0, 1) \
V(Int16x8LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
V(Int16x8GreaterThan, Operator::kNoProperties, 2, 0, 1) \
V(Int16x8GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Int16x8Select, Operator::kNoProperties, 3, 0, 1) \
- V(Int16x8Swizzle, Operator::kNoProperties, 9, 0, 1) \
- V(Int16x8Shuffle, Operator::kNoProperties, 10, 0, 1) \
V(Uint16x8AddSaturate, Operator::kCommutative, 2, 0, 1) \
V(Uint16x8SubSaturate, Operator::kNoProperties, 2, 0, 1) \
V(Uint16x8Min, Operator::kCommutative, 2, 0, 1) \
V(Uint16x8Max, Operator::kCommutative, 2, 0, 1) \
- V(Uint16x8ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1) \
- V(Uint16x8ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1) \
V(Uint16x8LessThan, Operator::kNoProperties, 2, 0, 1) \
V(Uint16x8LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
V(Uint16x8GreaterThan, Operator::kNoProperties, 2, 0, 1) \
V(Uint16x8GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(CreateBool16x8, Operator::kNoProperties, 8, 0, 1) \
- V(Bool16x8ExtractLane, Operator::kNoProperties, 2, 0, 1) \
- V(Bool16x8ReplaceLane, Operator::kNoProperties, 3, 0, 1) \
V(Bool16x8And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Bool16x8Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Bool16x8Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Bool16x8Not, Operator::kNoProperties, 1, 0, 1) \
V(Bool16x8AnyTrue, Operator::kNoProperties, 1, 0, 1) \
V(Bool16x8AllTrue, Operator::kNoProperties, 1, 0, 1) \
- V(Bool16x8Swizzle, Operator::kNoProperties, 9, 0, 1) \
- V(Bool16x8Shuffle, Operator::kNoProperties, 10, 0, 1) \
- V(Bool16x8Equal, Operator::kCommutative, 2, 0, 1) \
- V(Bool16x8NotEqual, Operator::kCommutative, 2, 0, 1) \
V(CreateInt8x16, Operator::kNoProperties, 16, 0, 1) \
- V(Int8x16ExtractLane, Operator::kNoProperties, 2, 0, 1) \
- V(Int8x16ReplaceLane, Operator::kNoProperties, 3, 0, 1) \
V(Int8x16Neg, Operator::kNoProperties, 1, 0, 1) \
V(Int8x16Add, Operator::kCommutative, 2, 0, 1) \
V(Int8x16AddSaturate, Operator::kCommutative, 2, 0, 1) \
@@ -340,40 +306,26 @@ MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
V(Int8x16Mul, Operator::kCommutative, 2, 0, 1) \
V(Int8x16Min, Operator::kCommutative, 2, 0, 1) \
V(Int8x16Max, Operator::kCommutative, 2, 0, 1) \
- V(Int8x16ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1) \
- V(Int8x16ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1) \
V(Int8x16Equal, Operator::kCommutative, 2, 0, 1) \
V(Int8x16NotEqual, Operator::kCommutative, 2, 0, 1) \
V(Int8x16LessThan, Operator::kNoProperties, 2, 0, 1) \
V(Int8x16LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
V(Int8x16GreaterThan, Operator::kNoProperties, 2, 0, 1) \
V(Int8x16GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Int8x16Select, Operator::kNoProperties, 3, 0, 1) \
- V(Int8x16Swizzle, Operator::kNoProperties, 17, 0, 1) \
- V(Int8x16Shuffle, Operator::kNoProperties, 18, 0, 1) \
V(Uint8x16AddSaturate, Operator::kCommutative, 2, 0, 1) \
V(Uint8x16SubSaturate, Operator::kNoProperties, 2, 0, 1) \
V(Uint8x16Min, Operator::kCommutative, 2, 0, 1) \
V(Uint8x16Max, Operator::kCommutative, 2, 0, 1) \
- V(Uint8x16ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1) \
- V(Uint8x16ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1) \
V(Uint8x16LessThan, Operator::kNoProperties, 2, 0, 1) \
V(Uint8x16LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
V(Uint8x16GreaterThan, Operator::kNoProperties, 2, 0, 1) \
V(Uint8x16GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(CreateBool8x16, Operator::kNoProperties, 16, 0, 1) \
- V(Bool8x16ExtractLane, Operator::kNoProperties, 2, 0, 1) \
- V(Bool8x16ReplaceLane, Operator::kNoProperties, 3, 0, 1) \
V(Bool8x16And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Bool8x16Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Bool8x16Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Bool8x16Not, Operator::kNoProperties, 1, 0, 1) \
V(Bool8x16AnyTrue, Operator::kNoProperties, 1, 0, 1) \
V(Bool8x16AllTrue, Operator::kNoProperties, 1, 0, 1) \
- V(Bool8x16Swizzle, Operator::kNoProperties, 17, 0, 1) \
- V(Bool8x16Shuffle, Operator::kNoProperties, 18, 0, 1) \
- V(Bool8x16Equal, Operator::kCommutative, 2, 0, 1) \
- V(Bool8x16NotEqual, Operator::kCommutative, 2, 0, 1) \
V(Simd128Load, Operator::kNoProperties, 2, 0, 1) \
V(Simd128Load1, Operator::kNoProperties, 2, 0, 1) \
V(Simd128Load2, Operator::kNoProperties, 2, 0, 1) \
@@ -387,8 +339,8 @@ MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
V(Simd128Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Simd128Not, Operator::kNoProperties, 1, 0, 1) \
V(Simd32x4Select, Operator::kNoProperties, 3, 0, 1) \
- V(Simd32x4Swizzle, Operator::kNoProperties, 5, 0, 1) \
- V(Simd32x4Shuffle, Operator::kNoProperties, 6, 0, 1)
+ V(Simd16x8Select, Operator::kNoProperties, 3, 0, 1) \
+ V(Simd8x16Select, Operator::kNoProperties, 3, 0, 1)
#define PURE_OPTIONAL_OP_LIST(V) \
V(Word32Ctz, Operator::kNoProperties, 1, 0, 1) \
@@ -458,6 +410,17 @@ MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
V(kWord16) \
V(kWord32)
+#define SIMD_LANE_OP_LIST(V) \
+ V(Float32x4, 4) \
+ V(Int32x4, 4) \
+ V(Int16x8, 8) \
+ V(Int8x16, 16)
+
+#define SIMD_FORMAT_LIST(V) \
+ V(32x4, 32) \
+ V(16x8, 16) \
+ V(8x16, 8)
+
#define STACK_SLOT_CACHED_SIZES_LIST(V) V(4) V(8) V(16)
struct StackSlotOperator : public Operator1<int> {
@@ -634,6 +597,19 @@ struct MachineOperatorGlobalCache {
ATOMIC_REPRESENTATION_LIST(ATOMIC_STORE)
#undef STORE
+ // The {BitcastWordToTagged} operator must not be marked as pure (especially
+ // not idempotent), because otherwise the splitting logic in the Scheduler
+ // might decide to split these operators, thus potentially creating live
+ // ranges of allocation top across calls or other things that might allocate.
+ // See https://bugs.chromium.org/p/v8/issues/detail?id=6059 for more details.
+ struct BitcastWordToTaggedOperator : public Operator {
+ BitcastWordToTaggedOperator()
+ : Operator(IrOpcode::kBitcastWordToTagged,
+ Operator::kEliminatable | Operator::kNoWrite,
+ "BitcastWordToTagged", 1, 0, 0, 1, 0, 0) {}
+ };
+ BitcastWordToTaggedOperator kBitcastWordToTagged;
+
struct DebugBreakOperator : public Operator {
DebugBreakOperator()
: Operator(IrOpcode::kDebugBreak, Operator::kNoThrow, "DebugBreak", 0,
@@ -691,6 +667,9 @@ const Operator* MachineOperatorBuilder::UnalignedStore(
MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
case MachineRepresentation::kBit:
+ case MachineRepresentation::kSimd1x4:
+ case MachineRepresentation::kSimd1x8:
+ case MachineRepresentation::kSimd1x16:
case MachineRepresentation::kNone:
break;
}
@@ -774,6 +753,9 @@ const Operator* MachineOperatorBuilder::Store(StoreRepresentation store_rep) {
MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
case MachineRepresentation::kBit:
+ case MachineRepresentation::kSimd1x4:
+ case MachineRepresentation::kSimd1x8:
+ case MachineRepresentation::kSimd1x16:
case MachineRepresentation::kNone:
break;
}
@@ -791,6 +773,9 @@ const Operator* MachineOperatorBuilder::ProtectedStore(
MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
case MachineRepresentation::kBit:
+ case MachineRepresentation::kSimd1x4:
+ case MachineRepresentation::kSimd1x8:
+ case MachineRepresentation::kSimd1x16:
case MachineRepresentation::kNone:
break;
}
@@ -802,6 +787,10 @@ const Operator* MachineOperatorBuilder::UnsafePointerAdd() {
return &cache_.kUnsafePointerAdd;
}
+const Operator* MachineOperatorBuilder::BitcastWordToTagged() {
+ return &cache_.kBitcastWordToTagged;
+}
+
const Operator* MachineOperatorBuilder::DebugBreak() {
return &cache_.kDebugBreak;
}
@@ -832,6 +821,9 @@ const Operator* MachineOperatorBuilder::CheckedStore(
MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
case MachineRepresentation::kBit:
+ case MachineRepresentation::kSimd1x4:
+ case MachineRepresentation::kSimd1x8:
+ case MachineRepresentation::kSimd1x16:
case MachineRepresentation::kNone:
break;
}
@@ -861,6 +853,60 @@ const Operator* MachineOperatorBuilder::AtomicStore(MachineRepresentation rep) {
return nullptr;
}
+#define SIMD_LANE_OPS(Type, lane_count) \
+ const Operator* MachineOperatorBuilder::Type##ExtractLane( \
+ int32_t lane_index) { \
+ DCHECK(0 <= lane_index && lane_index < lane_count); \
+ return new (zone_) \
+ Operator1<int32_t>(IrOpcode::k##Type##ExtractLane, Operator::kPure, \
+ "Extract lane", 1, 0, 0, 1, 0, 0, lane_index); \
+ } \
+ const Operator* MachineOperatorBuilder::Type##ReplaceLane( \
+ int32_t lane_index) { \
+ DCHECK(0 <= lane_index && lane_index < lane_count); \
+ return new (zone_) \
+ Operator1<int32_t>(IrOpcode::k##Type##ReplaceLane, Operator::kPure, \
+ "Replace lane", 2, 0, 0, 1, 0, 0, lane_index); \
+ }
+SIMD_LANE_OP_LIST(SIMD_LANE_OPS)
+#undef SIMD_LANE_OPS
+
+#define SIMD_SHIFT_OPS(format, bits) \
+ const Operator* MachineOperatorBuilder::Int##format##ShiftLeftByScalar( \
+ int32_t shift) { \
+ DCHECK(0 <= shift && shift < bits); \
+ return new (zone_) Operator1<int32_t>( \
+ IrOpcode::kInt##format##ShiftLeftByScalar, Operator::kPure, \
+ "Shift left", 1, 0, 0, 1, 0, 0, shift); \
+ } \
+ const Operator* MachineOperatorBuilder::Int##format##ShiftRightByScalar( \
+ int32_t shift) { \
+ DCHECK(0 < shift && shift <= bits); \
+ return new (zone_) Operator1<int32_t>( \
+ IrOpcode::kInt##format##ShiftRightByScalar, Operator::kPure, \
+ "Arithmetic shift right", 1, 0, 0, 1, 0, 0, shift); \
+ } \
+ const Operator* MachineOperatorBuilder::Uint##format##ShiftRightByScalar( \
+ int32_t shift) { \
+ DCHECK(0 <= shift && shift < bits); \
+ return new (zone_) Operator1<int32_t>( \
+ IrOpcode::kUint##format##ShiftRightByScalar, Operator::kPure, \
+ "Shift right", 1, 0, 0, 1, 0, 0, shift); \
+ }
+SIMD_FORMAT_LIST(SIMD_SHIFT_OPS)
+#undef SIMD_SHIFT_OPS
+
+// TODO(bbudge) Add Shuffle, DCHECKs based on format.
+#define SIMD_PERMUTE_OPS(format, bits) \
+ const Operator* MachineOperatorBuilder::Simd##format##Swizzle( \
+ uint32_t swizzle) { \
+ return new (zone_) \
+ Operator1<uint32_t>(IrOpcode::kSimd##format##Swizzle, Operator::kPure, \
+ "Swizzle", 2, 0, 0, 1, 0, 0, swizzle); \
+ }
+SIMD_FORMAT_LIST(SIMD_PERMUTE_OPS)
+#undef SIMD_PERMUTE_OPS
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index d226879521..0558279183 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -426,8 +426,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
// SIMD operators.
const Operator* CreateFloat32x4();
- const Operator* Float32x4ExtractLane();
- const Operator* Float32x4ReplaceLane();
+ const Operator* Float32x4ExtractLane(int32_t);
+ const Operator* Float32x4ReplaceLane(int32_t);
const Operator* Float32x4Abs();
const Operator* Float32x4Neg();
const Operator* Float32x4Sqrt();
@@ -451,16 +451,16 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* Float32x4FromUint32x4();
const Operator* CreateInt32x4();
- const Operator* Int32x4ExtractLane();
- const Operator* Int32x4ReplaceLane();
+ const Operator* Int32x4ExtractLane(int32_t);
+ const Operator* Int32x4ReplaceLane(int32_t);
const Operator* Int32x4Neg();
const Operator* Int32x4Add();
const Operator* Int32x4Sub();
const Operator* Int32x4Mul();
const Operator* Int32x4Min();
const Operator* Int32x4Max();
- const Operator* Int32x4ShiftLeftByScalar();
- const Operator* Int32x4ShiftRightByScalar();
+ const Operator* Int32x4ShiftLeftByScalar(int32_t);
+ const Operator* Int32x4ShiftRightByScalar(int32_t);
const Operator* Int32x4Equal();
const Operator* Int32x4NotEqual();
const Operator* Int32x4LessThan();
@@ -471,31 +471,23 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* Uint32x4Min();
const Operator* Uint32x4Max();
- const Operator* Uint32x4ShiftLeftByScalar();
- const Operator* Uint32x4ShiftRightByScalar();
+ const Operator* Uint32x4ShiftRightByScalar(int32_t);
const Operator* Uint32x4LessThan();
const Operator* Uint32x4LessThanOrEqual();
const Operator* Uint32x4GreaterThan();
const Operator* Uint32x4GreaterThanOrEqual();
const Operator* Uint32x4FromFloat32x4();
- const Operator* CreateBool32x4();
- const Operator* Bool32x4ExtractLane();
- const Operator* Bool32x4ReplaceLane();
const Operator* Bool32x4And();
const Operator* Bool32x4Or();
const Operator* Bool32x4Xor();
const Operator* Bool32x4Not();
const Operator* Bool32x4AnyTrue();
const Operator* Bool32x4AllTrue();
- const Operator* Bool32x4Swizzle();
- const Operator* Bool32x4Shuffle();
- const Operator* Bool32x4Equal();
- const Operator* Bool32x4NotEqual();
const Operator* CreateInt16x8();
- const Operator* Int16x8ExtractLane();
- const Operator* Int16x8ReplaceLane();
+ const Operator* Int16x8ExtractLane(int32_t);
+ const Operator* Int16x8ReplaceLane(int32_t);
const Operator* Int16x8Neg();
const Operator* Int16x8Add();
const Operator* Int16x8AddSaturate();
@@ -504,46 +496,35 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* Int16x8Mul();
const Operator* Int16x8Min();
const Operator* Int16x8Max();
- const Operator* Int16x8ShiftLeftByScalar();
- const Operator* Int16x8ShiftRightByScalar();
+ const Operator* Int16x8ShiftLeftByScalar(int32_t);
+ const Operator* Int16x8ShiftRightByScalar(int32_t);
const Operator* Int16x8Equal();
const Operator* Int16x8NotEqual();
const Operator* Int16x8LessThan();
const Operator* Int16x8LessThanOrEqual();
const Operator* Int16x8GreaterThan();
const Operator* Int16x8GreaterThanOrEqual();
- const Operator* Int16x8Select();
- const Operator* Int16x8Swizzle();
- const Operator* Int16x8Shuffle();
const Operator* Uint16x8AddSaturate();
const Operator* Uint16x8SubSaturate();
const Operator* Uint16x8Min();
const Operator* Uint16x8Max();
- const Operator* Uint16x8ShiftLeftByScalar();
- const Operator* Uint16x8ShiftRightByScalar();
+ const Operator* Uint16x8ShiftRightByScalar(int32_t);
const Operator* Uint16x8LessThan();
const Operator* Uint16x8LessThanOrEqual();
const Operator* Uint16x8GreaterThan();
const Operator* Uint16x8GreaterThanOrEqual();
- const Operator* CreateBool16x8();
- const Operator* Bool16x8ExtractLane();
- const Operator* Bool16x8ReplaceLane();
const Operator* Bool16x8And();
const Operator* Bool16x8Or();
const Operator* Bool16x8Xor();
const Operator* Bool16x8Not();
const Operator* Bool16x8AnyTrue();
const Operator* Bool16x8AllTrue();
- const Operator* Bool16x8Swizzle();
- const Operator* Bool16x8Shuffle();
- const Operator* Bool16x8Equal();
- const Operator* Bool16x8NotEqual();
const Operator* CreateInt8x16();
- const Operator* Int8x16ExtractLane();
- const Operator* Int8x16ReplaceLane();
+ const Operator* Int8x16ExtractLane(int32_t);
+ const Operator* Int8x16ReplaceLane(int32_t);
const Operator* Int8x16Neg();
const Operator* Int8x16Add();
const Operator* Int8x16AddSaturate();
@@ -552,42 +533,31 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* Int8x16Mul();
const Operator* Int8x16Min();
const Operator* Int8x16Max();
- const Operator* Int8x16ShiftLeftByScalar();
- const Operator* Int8x16ShiftRightByScalar();
+ const Operator* Int8x16ShiftLeftByScalar(int32_t);
+ const Operator* Int8x16ShiftRightByScalar(int32_t);
const Operator* Int8x16Equal();
const Operator* Int8x16NotEqual();
const Operator* Int8x16LessThan();
const Operator* Int8x16LessThanOrEqual();
const Operator* Int8x16GreaterThan();
const Operator* Int8x16GreaterThanOrEqual();
- const Operator* Int8x16Select();
- const Operator* Int8x16Swizzle();
- const Operator* Int8x16Shuffle();
const Operator* Uint8x16AddSaturate();
const Operator* Uint8x16SubSaturate();
const Operator* Uint8x16Min();
const Operator* Uint8x16Max();
- const Operator* Uint8x16ShiftLeftByScalar();
- const Operator* Uint8x16ShiftRightByScalar();
+ const Operator* Uint8x16ShiftRightByScalar(int32_t);
const Operator* Uint8x16LessThan();
const Operator* Uint8x16LessThanOrEqual();
const Operator* Uint8x16GreaterThan();
const Operator* Uint8x16GreaterThanOrEqual();
- const Operator* CreateBool8x16();
- const Operator* Bool8x16ExtractLane();
- const Operator* Bool8x16ReplaceLane();
const Operator* Bool8x16And();
const Operator* Bool8x16Or();
const Operator* Bool8x16Xor();
const Operator* Bool8x16Not();
const Operator* Bool8x16AnyTrue();
const Operator* Bool8x16AllTrue();
- const Operator* Bool8x16Swizzle();
- const Operator* Bool8x16Shuffle();
- const Operator* Bool8x16Equal();
- const Operator* Bool8x16NotEqual();
const Operator* Simd128Load();
const Operator* Simd128Load1();
@@ -602,8 +572,14 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* Simd128Xor();
const Operator* Simd128Not();
const Operator* Simd32x4Select();
- const Operator* Simd32x4Swizzle();
+ const Operator* Simd32x4Swizzle(uint32_t);
const Operator* Simd32x4Shuffle();
+ const Operator* Simd16x8Select();
+ const Operator* Simd16x8Swizzle(uint32_t);
+ const Operator* Simd16x8Shuffle();
+ const Operator* Simd8x16Select();
+ const Operator* Simd8x16Swizzle(uint32_t);
+ const Operator* Simd8x16Shuffle();
// load [base + index]
const Operator* Load(LoadRepresentation rep);
diff --git a/deps/v8/src/compiler/mips/code-generator-mips.cc b/deps/v8/src/compiler/mips/code-generator-mips.cc
index 60f634254c..db4b5293a4 100644
--- a/deps/v8/src/compiler/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/mips/code-generator-mips.cc
@@ -562,7 +562,7 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
// Check if current frame is an arguments adaptor frame.
__ lw(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ Branch(&done, ne, scratch1,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
// Load arguments count from current arguments adaptor frame (note, it
// does not include receiver).
@@ -732,10 +732,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchDeoptimize: {
int deopt_state_id =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
- Deoptimizer::BailoutType bailout_type =
- Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
- CodeGenResult result = AssembleDeoptimizerCall(
- deopt_state_id, bailout_type, current_source_position_);
+ CodeGenResult result =
+ AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
if (result != kSuccess) return result;
break;
}
@@ -1750,8 +1748,8 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
void Generate() final {
MipsOperandConverter i(gen_, instr_);
- Runtime::FunctionId trap_id = static_cast<Runtime::FunctionId>(
- i.InputInt32(instr_->InputCount() - 1));
+ Builtins::Name trap_id =
+ static_cast<Builtins::Name>(i.InputInt32(instr_->InputCount() - 1));
bool old_has_frame = __ has_frame();
if (frame_elided_) {
__ set_has_frame(true);
@@ -1761,14 +1759,11 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
if (frame_elided_) {
__ set_has_frame(old_has_frame);
}
- if (FLAG_debug_code) {
- __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
- }
}
private:
- void GenerateCallToTrap(Runtime::FunctionId trap_id) {
- if (trap_id == Runtime::kNumFunctions) {
+ void GenerateCallToTrap(Builtins::Name trap_id) {
+ if (trap_id == Builtins::builtin_count) {
// We cannot test calls to the runtime in cctest/test-run-wasm.
// Therefore we emit a call to C here instead of a call to the runtime.
// We use the context register as the scratch register, because we do
@@ -1777,15 +1772,20 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ CallCFunction(
ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
0);
+ __ LeaveFrame(StackFrame::WASM_COMPILED);
+ __ Ret();
} else {
- __ Move(cp, isolate()->native_context());
gen_->AssembleSourcePosition(instr_);
- __ CallRuntime(trap_id);
+ __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
+ RelocInfo::CODE_TARGET);
+ ReferenceMap* reference_map =
+ new (gen_->zone()) ReferenceMap(gen_->zone());
+ gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ if (FLAG_debug_code) {
+ __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
+ }
}
- ReferenceMap* reference_map =
- new (gen_->zone()) ReferenceMap(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
- Safepoint::kNoLazyDeopt);
}
bool frame_elided_;
@@ -1986,13 +1986,16 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
}
CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
- int deoptimization_id, Deoptimizer::BailoutType bailout_type,
- SourcePosition pos) {
+ int deoptimization_id, SourcePosition pos) {
+ DeoptimizeKind deoptimization_kind = GetDeoptimizationKind(deoptimization_id);
+ DeoptimizeReason deoptimization_reason =
+ GetDeoptimizationReason(deoptimization_id);
+ Deoptimizer::BailoutType bailout_type =
+ deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
+ : Deoptimizer::EAGER;
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
- DeoptimizeReason deoptimization_reason =
- GetDeoptimizationReason(deoptimization_id);
__ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
return kSuccess;
diff --git a/deps/v8/src/compiler/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
index 3dcf708349..d0ceac12b9 100644
--- a/deps/v8/src/compiler/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
@@ -173,10 +173,9 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
&inputs[1])) {
inputs[0] = g.UseRegister(m.left().node());
input_count++;
- }
- if (has_reverse_opcode &&
- TryMatchImmediate(selector, &reverse_opcode, m.left().node(),
- &input_count, &inputs[1])) {
+ } else if (has_reverse_opcode &&
+ TryMatchImmediate(selector, &reverse_opcode, m.left().node(),
+ &input_count, &inputs[1])) {
inputs[0] = g.UseRegister(m.right().node());
opcode = reverse_opcode;
input_count++;
@@ -212,7 +211,7 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -265,6 +264,9 @@ void InstructionSelector::VisitLoad(Node* node) {
break;
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kSimd1x4: // Fall through.
+ case MachineRepresentation::kSimd1x8: // Fall through.
+ case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -350,6 +352,9 @@ void InstructionSelector::VisitStore(Node* node) {
break;
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kSimd1x4: // Fall through.
+ case MachineRepresentation::kSimd1x8: // Fall through.
+ case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -400,9 +405,13 @@ void InstructionSelector::VisitWord32And(Node* node) {
// zeros.
if (lsb + mask_width > 32) mask_width = 32 - lsb;
- Emit(kMipsExt, g.DefineAsRegister(node),
- g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
- g.TempImmediate(mask_width));
+ if (lsb == 0 && mask_width == 32) {
+ Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(mleft.left().node()));
+ } else {
+ Emit(kMipsExt, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(mask_width));
+ }
return;
}
// Other cases fall through to the normal And operation.
@@ -1195,6 +1204,9 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
break;
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kSimd1x4: // Fall through.
+ case MachineRepresentation::kSimd1x8: // Fall through.
+ case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -1245,6 +1257,9 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
break;
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kSimd1x4: // Fall through.
+ case MachineRepresentation::kSimd1x8: // Fall through.
+ case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -1293,6 +1308,9 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kSimd1x4: // Fall through.
+ case MachineRepresentation::kSimd1x8: // Fall through.
+ case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -1368,8 +1386,8 @@ static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
selector->Emit(opcode, g.NoOutput(), left, right,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
- cont->frame_state());
+ selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
+ cont->reason(), cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
} else {
@@ -1581,7 +1599,7 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand,
- g.TempImmediate(0), cont->reason(),
+ g.TempImmediate(0), cont->kind(), cont->reason(),
cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
@@ -1602,14 +1620,16 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+ DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+ kNotEqual, p.kind(), p.reason(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+ DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+ kEqual, p.kind(), p.reason(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
diff --git a/deps/v8/src/compiler/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
index ba921e265b..3ab85e03b7 100644
--- a/deps/v8/src/compiler/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
@@ -599,7 +599,7 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
// Check if current frame is an arguments adaptor frame.
__ ld(scratch3, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ Branch(&done, ne, scratch3,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
// Load arguments count from current arguments adaptor frame (note, it
// does not include receiver).
@@ -768,10 +768,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchDeoptimize: {
int deopt_state_id =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
- Deoptimizer::BailoutType bailout_type =
- Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
- CodeGenResult result = AssembleDeoptimizerCall(
- deopt_state_id, bailout_type, current_source_position_);
+ CodeGenResult result =
+ AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
if (result != kSuccess) return result;
break;
}
@@ -2085,8 +2083,8 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
gen_(gen) {}
void Generate() final {
MipsOperandConverter i(gen_, instr_);
- Runtime::FunctionId trap_id = static_cast<Runtime::FunctionId>(
- i.InputInt32(instr_->InputCount() - 1));
+ Builtins::Name trap_id =
+ static_cast<Builtins::Name>(i.InputInt32(instr_->InputCount() - 1));
bool old_has_frame = __ has_frame();
if (frame_elided_) {
__ set_has_frame(true);
@@ -2096,14 +2094,11 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
if (frame_elided_) {
__ set_has_frame(old_has_frame);
}
- if (FLAG_debug_code) {
- __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
- }
}
private:
- void GenerateCallToTrap(Runtime::FunctionId trap_id) {
- if (trap_id == Runtime::kNumFunctions) {
+ void GenerateCallToTrap(Builtins::Name trap_id) {
+ if (trap_id == Builtins::builtin_count) {
// We cannot test calls to the runtime in cctest/test-run-wasm.
// Therefore we emit a call to C here instead of a call to the runtime.
// We use the context register as the scratch register, because we do
@@ -2112,15 +2107,20 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ CallCFunction(
ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
0);
+ __ LeaveFrame(StackFrame::WASM_COMPILED);
+ __ Ret();
} else {
- __ Move(cp, isolate()->native_context());
gen_->AssembleSourcePosition(instr_);
- __ CallRuntime(trap_id);
+ __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
+ RelocInfo::CODE_TARGET);
+ ReferenceMap* reference_map =
+ new (gen_->zone()) ReferenceMap(gen_->zone());
+ gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ if (FLAG_debug_code) {
+ __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
+ }
}
- ReferenceMap* reference_map =
- new (gen_->zone()) ReferenceMap(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
- Safepoint::kNoLazyDeopt);
}
bool frame_elided_;
Instruction* instr_;
@@ -2331,13 +2331,16 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
}
CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
- int deoptimization_id, Deoptimizer::BailoutType bailout_type,
- SourcePosition pos) {
+ int deoptimization_id, SourcePosition pos) {
+ DeoptimizeKind deoptimization_kind = GetDeoptimizationKind(deoptimization_id);
+ DeoptimizeReason deoptimization_reason =
+ GetDeoptimizationReason(deoptimization_id);
+ Deoptimizer::BailoutType bailout_type =
+ deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
+ : Deoptimizer::EAGER;
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
- DeoptimizeReason deoptimization_reason =
- GetDeoptimizationReason(deoptimization_id);
__ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
return kSuccess;
diff --git a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
index d48007b858..4f19a17a30 100644
--- a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
@@ -269,10 +269,9 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
&inputs[1])) {
inputs[0] = g.UseRegister(m.left().node());
input_count++;
- }
- if (has_reverse_opcode &&
- TryMatchImmediate(selector, &reverse_opcode, m.left().node(),
- &input_count, &inputs[1])) {
+ } else if (has_reverse_opcode &&
+ TryMatchImmediate(selector, &reverse_opcode, m.left().node(),
+ &input_count, &inputs[1])) {
inputs[0] = g.UseRegister(m.right().node());
opcode = reverse_opcode;
input_count++;
@@ -308,7 +307,7 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -380,6 +379,9 @@ void InstructionSelector::VisitLoad(Node* node) {
opcode = kMips64Ld;
break;
case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kSimd1x4: // Fall through.
+ case MachineRepresentation::kSimd1x8: // Fall through.
+ case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -457,6 +459,9 @@ void InstructionSelector::VisitStore(Node* node) {
opcode = kMips64Sd;
break;
case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kSimd1x4: // Fall through.
+ case MachineRepresentation::kSimd1x8: // Fall through.
+ case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -557,9 +562,13 @@ void InstructionSelector::VisitWord64And(Node* node) {
// zeros.
if (lsb + mask_width > 64) mask_width = 64 - lsb;
- Emit(kMips64Dext, g.DefineAsRegister(node),
- g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
- g.TempImmediate(static_cast<int32_t>(mask_width)));
+ if (lsb == 0 && mask_width == 64) {
+ Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(mleft.left().node()));
+ } else {
+ Emit(kMips64Dext, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(static_cast<int32_t>(mask_width)));
+ }
return;
}
// Other cases fall through to the normal And operation.
@@ -1740,6 +1749,9 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
opcode = kMips64Uld;
break;
case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kSimd1x4: // Fall through.
+ case MachineRepresentation::kSimd1x8: // Fall through.
+ case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -1790,6 +1802,9 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
opcode = kMips64Usd;
break;
case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kSimd1x4: // Fall through.
+ case MachineRepresentation::kSimd1x8: // Fall through.
+ case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -1840,6 +1855,9 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged:
case MachineRepresentation::kSimd128:
+ case MachineRepresentation::kSimd1x4: // Fall through.
+ case MachineRepresentation::kSimd1x8: // Fall through.
+ case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -1901,6 +1919,9 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged:
case MachineRepresentation::kSimd128:
+ case MachineRepresentation::kSimd1x4: // Fall through.
+ case MachineRepresentation::kSimd1x8: // Fall through.
+ case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -1942,8 +1963,8 @@ static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
selector->Emit(opcode, g.NoOutput(), left, right,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
- cont->frame_state());
+ selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
+ cont->reason(), cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
} else {
@@ -2159,7 +2180,7 @@ void EmitWordCompareZero(InstructionSelector* selector, Node* value,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand,
- g.TempImmediate(0), cont->reason(),
+ g.TempImmediate(0), cont->kind(), cont->reason(),
cont->frame_state());
} else if (cont->IsTrap()) {
selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
@@ -2298,14 +2319,16 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+ DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+ kNotEqual, p.kind(), p.reason(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+ DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+ kEqual, p.kind(), p.reason(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
diff --git a/deps/v8/src/compiler/node-matchers.h b/deps/v8/src/compiler/node-matchers.h
index c317fdd5e7..d2bdb8bff5 100644
--- a/deps/v8/src/compiler/node-matchers.h
+++ b/deps/v8/src/compiler/node-matchers.h
@@ -489,13 +489,14 @@ struct BaseWithIndexAndDisplacementMatcher {
bool power_of_two_plus_one = false;
DisplacementMode displacement_mode = kPositiveDisplacement;
int scale = 0;
- if (m.HasIndexInput() && left->OwnedBy(node)) {
+ if (m.HasIndexInput() && left->OwnedByAddressingOperand()) {
index = m.IndexInput();
scale = m.scale();
scale_expression = left;
power_of_two_plus_one = m.power_of_two_plus_one();
bool match_found = false;
- if (right->opcode() == AddMatcher::kSubOpcode && right->OwnedBy(node)) {
+ if (right->opcode() == AddMatcher::kSubOpcode &&
+ right->OwnedByAddressingOperand()) {
AddMatcher right_matcher(right);
if (right_matcher.right().HasValue()) {
// (S + (B - D))
@@ -506,7 +507,8 @@ struct BaseWithIndexAndDisplacementMatcher {
}
}
if (!match_found) {
- if (right->opcode() == AddMatcher::kAddOpcode && right->OwnedBy(node)) {
+ if (right->opcode() == AddMatcher::kAddOpcode &&
+ right->OwnedByAddressingOperand()) {
AddMatcher right_matcher(right);
if (right_matcher.right().HasValue()) {
// (S + (B + D))
@@ -526,7 +528,8 @@ struct BaseWithIndexAndDisplacementMatcher {
}
} else {
bool match_found = false;
- if (left->opcode() == AddMatcher::kSubOpcode && left->OwnedBy(node)) {
+ if (left->opcode() == AddMatcher::kSubOpcode &&
+ left->OwnedByAddressingOperand()) {
AddMatcher left_matcher(left);
Node* left_left = left_matcher.left().node();
Node* left_right = left_matcher.right().node();
@@ -551,7 +554,8 @@ struct BaseWithIndexAndDisplacementMatcher {
}
}
if (!match_found) {
- if (left->opcode() == AddMatcher::kAddOpcode && left->OwnedBy(node)) {
+ if (left->opcode() == AddMatcher::kAddOpcode &&
+ left->OwnedByAddressingOperand()) {
AddMatcher left_matcher(left);
Node* left_left = left_matcher.left().node();
Node* left_right = left_matcher.right().node();
@@ -565,13 +569,19 @@ struct BaseWithIndexAndDisplacementMatcher {
displacement = left_right;
base = right;
} else if (m.right().HasValue()) {
- // ((S + B) + D)
- index = left_matcher.IndexInput();
- scale = left_matcher.scale();
- scale_expression = left_left;
- power_of_two_plus_one = left_matcher.power_of_two_plus_one();
- base = left_right;
- displacement = right;
+ if (left->OwnedBy(node)) {
+ // ((S + B) + D)
+ index = left_matcher.IndexInput();
+ scale = left_matcher.scale();
+ scale_expression = left_left;
+ power_of_two_plus_one = left_matcher.power_of_two_plus_one();
+ base = left_right;
+ displacement = right;
+ } else {
+ // (B + D)
+ base = left;
+ displacement = right;
+ }
} else {
// (B + B)
index = left;
@@ -584,10 +594,16 @@ struct BaseWithIndexAndDisplacementMatcher {
displacement = left_right;
base = right;
} else if (m.right().HasValue()) {
- // ((B + B) + D)
- index = left_left;
- base = left_right;
- displacement = right;
+ if (left->OwnedBy(node)) {
+ // ((B + B) + D)
+ index = left_left;
+ base = left_right;
+ displacement = right;
+ } else {
+ // (B + D)
+ base = left;
+ displacement = right;
+ }
} else {
// (B + B)
index = left;
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index cc3a07d7e3..9243a08583 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -7,7 +7,9 @@
#include "src/compiler/graph.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/linkage.h"
+#include "src/compiler/node-matchers.h"
#include "src/compiler/operator-properties.h"
+#include "src/compiler/simplified-operator.h"
#include "src/compiler/verifier.h"
#include "src/handles-inl.h"
#include "src/objects-inl.h"
@@ -312,6 +314,111 @@ void NodeProperties::CollectControlProjections(Node* node, Node** projections,
#endif
}
+// static
+bool NodeProperties::IsSame(Node* a, Node* b) {
+ for (;;) {
+ if (a->opcode() == IrOpcode::kCheckHeapObject) {
+ a = GetValueInput(a, 0);
+ continue;
+ }
+ if (b->opcode() == IrOpcode::kCheckHeapObject) {
+ b = GetValueInput(b, 0);
+ continue;
+ }
+ return a == b;
+ }
+}
+
+// static
+NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMaps(
+ Node* receiver, Node* effect, ZoneHandleSet<Map>* maps_return) {
+ HeapObjectMatcher m(receiver);
+ if (m.HasValue()) {
+ Handle<Map> receiver_map(m.Value()->map());
+ if (receiver_map->is_stable()) {
+ // The {receiver_map} is only reliable when we install a stability
+ // code dependency.
+ *maps_return = ZoneHandleSet<Map>(receiver_map);
+ return kUnreliableReceiverMaps;
+ }
+ }
+ InferReceiverMapsResult result = kReliableReceiverMaps;
+ while (true) {
+ switch (effect->opcode()) {
+ case IrOpcode::kCheckMaps: {
+ Node* const object = GetValueInput(effect, 0);
+ if (IsSame(receiver, object)) {
+ *maps_return = CheckMapsParametersOf(effect->op()).maps();
+ return result;
+ }
+ break;
+ }
+ case IrOpcode::kJSCreate: {
+ if (IsSame(receiver, effect)) {
+ HeapObjectMatcher mtarget(GetValueInput(effect, 0));
+ HeapObjectMatcher mnewtarget(GetValueInput(effect, 1));
+ if (mtarget.HasValue() && mnewtarget.HasValue()) {
+ Handle<JSFunction> original_constructor =
+ Handle<JSFunction>::cast(mnewtarget.Value());
+ if (original_constructor->has_initial_map()) {
+ Handle<Map> initial_map(original_constructor->initial_map());
+ if (initial_map->constructor_or_backpointer() ==
+ *mtarget.Value()) {
+ *maps_return = ZoneHandleSet<Map>(initial_map);
+ return result;
+ }
+ }
+ }
+ // We reached the allocation of the {receiver}.
+ return kNoReceiverMaps;
+ }
+ break;
+ }
+ case IrOpcode::kStoreField: {
+ // We only care about StoreField of maps.
+ Node* const object = GetValueInput(effect, 0);
+ FieldAccess const& access = FieldAccessOf(effect->op());
+ if (access.base_is_tagged == kTaggedBase &&
+ access.offset == HeapObject::kMapOffset) {
+ if (IsSame(receiver, object)) {
+ Node* const value = GetValueInput(effect, 1);
+ HeapObjectMatcher m(value);
+ if (m.HasValue()) {
+ *maps_return = ZoneHandleSet<Map>(Handle<Map>::cast(m.Value()));
+ return result;
+ }
+ }
+ // Without alias analysis we cannot tell whether this
+ // StoreField[map] affects {receiver} or not.
+ result = kUnreliableReceiverMaps;
+ }
+ break;
+ }
+ case IrOpcode::kJSStoreMessage:
+ case IrOpcode::kJSStoreModule:
+ case IrOpcode::kStoreElement:
+ case IrOpcode::kStoreTypedElement: {
+ // These never change the map of objects.
+ break;
+ }
+ default: {
+ DCHECK_EQ(1, effect->op()->EffectOutputCount());
+ if (effect->op()->EffectInputCount() != 1) {
+ // Didn't find any appropriate CheckMaps node.
+ return kNoReceiverMaps;
+ }
+ if (!effect->op()->HasProperty(Operator::kNoWrite)) {
+ // Without alias/escape analysis we cannot tell whether this
+ // {effect} affects {receiver} or not.
+ result = kUnreliableReceiverMaps;
+ }
+ break;
+ }
+ }
+ DCHECK_EQ(1, effect->op()->EffectInputCount());
+ effect = NodeProperties::GetEffectInput(effect);
+ }
+}
// static
MaybeHandle<Context> NodeProperties::GetSpecializationContext(
diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h
index d428160651..5ed85402d1 100644
--- a/deps/v8/src/compiler/node-properties.h
+++ b/deps/v8/src/compiler/node-properties.h
@@ -8,6 +8,7 @@
#include "src/compiler/node.h"
#include "src/compiler/types.h"
#include "src/globals.h"
+#include "src/zone/zone-handle-set.h"
namespace v8 {
namespace internal {
@@ -123,6 +124,20 @@ class V8_EXPORT_PRIVATE NodeProperties final {
// - Switch: [ IfValue, ..., IfDefault ]
static void CollectControlProjections(Node* node, Node** proj, size_t count);
+ // Checks if two nodes are the same, looking past {CheckHeapObject}.
+ static bool IsSame(Node* a, Node* b);
+
+ // Walks up the {effect} chain to find a witness that provides map
+ // information about the {receiver}. Can look through potentially
+ // side effecting nodes.
+ enum InferReceiverMapsResult {
+ kNoReceiverMaps, // No receiver maps inferred.
+ kReliableReceiverMaps, // Receiver maps can be trusted.
+ kUnreliableReceiverMaps // Receiver maps might have changed (side-effect).
+ };
+ static InferReceiverMapsResult InferReceiverMaps(
+ Node* receiver, Node* effect, ZoneHandleSet<Map>* maps_return);
+
// ---------------------------------------------------------------------------
// Context.
diff --git a/deps/v8/src/compiler/node.cc b/deps/v8/src/compiler/node.cc
index 1410ab436c..16dc2dbab2 100644
--- a/deps/v8/src/compiler/node.cc
+++ b/deps/v8/src/compiler/node.cc
@@ -296,12 +296,44 @@ bool Node::OwnedBy(Node const* owner1, Node const* owner2) const {
return mask == 3;
}
+bool Node::OwnedByAddressingOperand() const {
+ for (Use* use = first_use_; use; use = use->next) {
+ Node* from = use->from();
+ if (from->opcode() != IrOpcode::kLoad &&
+ // If {from} is store, make sure it does not use {this} as value
+ (from->opcode() != IrOpcode::kStore || from->InputAt(2) == this) &&
+ from->opcode() != IrOpcode::kInt32Add &&
+ from->opcode() != IrOpcode::kInt64Add) {
+ return false;
+ }
+ }
+ return true;
+}
void Node::Print() const {
OFStream os(stdout);
os << *this << std::endl;
+ for (Node* input : this->inputs()) {
+ os << " " << *input << std::endl;
+ }
}
+std::ostream& operator<<(std::ostream& os, const Node& n) {
+ os << n.id() << ": " << *n.op();
+ if (n.InputCount() > 0) {
+ os << "(";
+ for (int i = 0; i < n.InputCount(); ++i) {
+ if (i != 0) os << ", ";
+ if (n.InputAt(i)) {
+ os << n.InputAt(i)->id();
+ } else {
+ os << "null";
+ }
+ }
+ os << ")";
+ }
+ return os;
+}
Node::Node(NodeId id, const Operator* op, int inline_count, int inline_capacity)
: op_(op),
@@ -378,25 +410,6 @@ void Node::Verify() {
}
#endif
-
-std::ostream& operator<<(std::ostream& os, const Node& n) {
- os << n.id() << ": " << *n.op();
- if (n.InputCount() > 0) {
- os << "(";
- for (int i = 0; i < n.InputCount(); ++i) {
- if (i != 0) os << ", ";
- if (n.InputAt(i)) {
- os << n.InputAt(i)->id();
- } else {
- os << "null";
- }
- }
- os << ")";
- }
- return os;
-}
-
-
Node::InputEdges::iterator Node::InputEdges::iterator::operator++(int n) {
iterator result(*this);
++(*this);
diff --git a/deps/v8/src/compiler/node.h b/deps/v8/src/compiler/node.h
index 7c9f3ad26f..b291af236f 100644
--- a/deps/v8/src/compiler/node.h
+++ b/deps/v8/src/compiler/node.h
@@ -158,6 +158,10 @@ class V8_EXPORT_PRIVATE Node final {
// Returns true if {owner1} and {owner2} are the only users of {this} node.
bool OwnedBy(Node const* owner1, Node const* owner2) const;
+
+ // Returns true if addressing related operands (such as load, store, lea)
+ // are the only users of {this} node.
+ bool OwnedByAddressingOperand() const;
void Print() const;
private:
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index 1d90095769..b50754c235 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -59,6 +59,7 @@
V(FrameState) \
V(StateValues) \
V(TypedStateValues) \
+ V(ArgumentsObjectState) \
V(ObjectState) \
V(TypedObjectState) \
V(Call) \
@@ -120,6 +121,7 @@
V(JSToString)
#define JS_OTHER_UNOP_LIST(V) \
+ V(JSClassOf) \
V(JSTypeOf)
#define JS_SIMPLE_UNOP_LIST(V) \
@@ -141,6 +143,7 @@
V(JSLoadGlobal) \
V(JSStoreProperty) \
V(JSStoreNamed) \
+ V(JSStoreNamedOwn) \
V(JSStoreGlobal) \
V(JSStoreDataPropertyInLiteral) \
V(JSDeleteProperty) \
@@ -157,9 +160,11 @@
V(JSCreateScriptContext)
#define JS_OTHER_OP_LIST(V) \
- V(JSCallConstruct) \
- V(JSCallConstructWithSpread) \
- V(JSCallFunction) \
+ V(JSConstruct) \
+ V(JSConstructWithSpread) \
+ V(JSCallForwardVarargs) \
+ V(JSCall) \
+ V(JSCallWithSpread) \
V(JSCallRuntime) \
V(JSConvertReceiver) \
V(JSForInNext) \
@@ -171,7 +176,8 @@
V(JSGeneratorStore) \
V(JSGeneratorRestoreContinuation) \
V(JSGeneratorRestoreRegister) \
- V(JSStackCheck)
+ V(JSStackCheck) \
+ V(JSDebugger)
#define JS_OP_LIST(V) \
JS_SIMPLE_BINOP_LIST(V) \
@@ -186,6 +192,7 @@
V(ChangeTaggedToInt32) \
V(ChangeTaggedToUint32) \
V(ChangeTaggedToFloat64) \
+ V(ChangeTaggedToTaggedSigned) \
V(ChangeInt31ToTaggedSigned) \
V(ChangeInt32ToTagged) \
V(ChangeUint32ToTagged) \
@@ -303,11 +310,13 @@
V(StringCharCodeAt) \
V(StringFromCharCode) \
V(StringFromCodePoint) \
+ V(StringIndexOf) \
V(CheckBounds) \
V(CheckIf) \
V(CheckMaps) \
V(CheckNumber) \
V(CheckInternalizedString) \
+ V(CheckReceiver) \
V(CheckString) \
V(CheckSmi) \
V(CheckHeapObject) \
@@ -323,7 +332,8 @@
V(StoreBuffer) \
V(StoreElement) \
V(StoreTypedElement) \
- V(ObjectIsCallable) \
+ V(ObjectIsDetectableCallable) \
+ V(ObjectIsNonCallable) \
V(ObjectIsNumber) \
V(ObjectIsReceiver) \
V(ObjectIsSmi) \
@@ -591,16 +601,10 @@
V(Uint32x4GreaterThan) \
V(Uint32x4GreaterThanOrEqual) \
V(Uint32x4FromFloat32x4) \
- V(CreateBool32x4) \
- V(Bool32x4ReplaceLane) \
V(Bool32x4And) \
V(Bool32x4Or) \
V(Bool32x4Xor) \
V(Bool32x4Not) \
- V(Bool32x4Swizzle) \
- V(Bool32x4Shuffle) \
- V(Bool32x4Equal) \
- V(Bool32x4NotEqual) \
V(CreateInt16x8) \
V(Int16x8ReplaceLane) \
V(Int16x8Neg) \
@@ -619,9 +623,6 @@
V(Int16x8LessThanOrEqual) \
V(Int16x8GreaterThan) \
V(Int16x8GreaterThanOrEqual) \
- V(Int16x8Select) \
- V(Int16x8Swizzle) \
- V(Int16x8Shuffle) \
V(Uint16x8AddSaturate) \
V(Uint16x8SubSaturate) \
V(Uint16x8Min) \
@@ -632,16 +633,10 @@
V(Uint16x8LessThanOrEqual) \
V(Uint16x8GreaterThan) \
V(Uint16x8GreaterThanOrEqual) \
- V(CreateBool16x8) \
- V(Bool16x8ReplaceLane) \
V(Bool16x8And) \
V(Bool16x8Or) \
V(Bool16x8Xor) \
V(Bool16x8Not) \
- V(Bool16x8Swizzle) \
- V(Bool16x8Shuffle) \
- V(Bool16x8Equal) \
- V(Bool16x8NotEqual) \
V(CreateInt8x16) \
V(Int8x16ReplaceLane) \
V(Int8x16Neg) \
@@ -660,9 +655,6 @@
V(Int8x16LessThanOrEqual) \
V(Int8x16GreaterThan) \
V(Int8x16GreaterThanOrEqual) \
- V(Int8x16Select) \
- V(Int8x16Swizzle) \
- V(Int8x16Shuffle) \
V(Uint8x16AddSaturate) \
V(Uint8x16SubSaturate) \
V(Uint8x16Min) \
@@ -673,16 +665,23 @@
V(Uint8x16LessThanOrEqual) \
V(Uint8x16GreaterThan) \
V(Uint8x16GreaterThanOrEqual) \
- V(CreateBool8x16) \
- V(Bool8x16ReplaceLane) \
V(Bool8x16And) \
V(Bool8x16Or) \
V(Bool8x16Xor) \
V(Bool8x16Not) \
- V(Bool8x16Swizzle) \
- V(Bool8x16Shuffle) \
- V(Bool8x16Equal) \
- V(Bool8x16NotEqual)
+ V(Simd128And) \
+ V(Simd128Or) \
+ V(Simd128Xor) \
+ V(Simd128Not) \
+ V(Simd32x4Select) \
+ V(Simd32x4Swizzle) \
+ V(Simd32x4Shuffle) \
+ V(Simd16x8Select) \
+ V(Simd16x8Swizzle) \
+ V(Simd16x8Shuffle) \
+ V(Simd8x16Select) \
+ V(Simd8x16Swizzle) \
+ V(Simd8x16Shuffle)
#define MACHINE_SIMD_RETURN_NUM_OP_LIST(V) \
V(Float32x4ExtractLane) \
@@ -691,13 +690,10 @@
V(Int8x16ExtractLane)
#define MACHINE_SIMD_RETURN_BOOL_OP_LIST(V) \
- V(Bool32x4ExtractLane) \
V(Bool32x4AnyTrue) \
V(Bool32x4AllTrue) \
- V(Bool16x8ExtractLane) \
V(Bool16x8AnyTrue) \
V(Bool16x8AllTrue) \
- V(Bool8x16ExtractLane) \
V(Bool8x16AnyTrue) \
V(Bool8x16AllTrue)
@@ -709,14 +705,7 @@
V(Simd128Store) \
V(Simd128Store1) \
V(Simd128Store2) \
- V(Simd128Store3) \
- V(Simd128And) \
- V(Simd128Or) \
- V(Simd128Xor) \
- V(Simd128Not) \
- V(Simd32x4Select) \
- V(Simd32x4Swizzle) \
- V(Simd32x4Shuffle)
+ V(Simd128Store3)
#define MACHINE_SIMD_OP_LIST(V) \
MACHINE_SIMD_RETURN_SIMD_OP_LIST(V) \
@@ -769,7 +758,7 @@ class V8_EXPORT_PRIVATE IrOpcode {
// Returns true if opcode for JavaScript operator.
static bool IsJsOpcode(Value value) {
- return kJSEqual <= value && value <= kJSStackCheck;
+ return kJSEqual <= value && value <= kJSDebugger;
}
// Returns true if opcode for constant operator.
@@ -791,7 +780,7 @@ class V8_EXPORT_PRIVATE IrOpcode {
// Returns true if opcode can be inlined.
static bool IsInlineeOpcode(Value value) {
- return value == kJSCallConstruct || value == kJSCallFunction;
+ return value == kJSConstruct || value == kJSCall;
}
// Returns true if opcode for comparison operator.
diff --git a/deps/v8/src/compiler/operation-typer.cc b/deps/v8/src/compiler/operation-typer.cc
index c422f0986b..dfd4c4b604 100644
--- a/deps/v8/src/compiler/operation-typer.cc
+++ b/deps/v8/src/compiler/operation-typer.cc
@@ -864,12 +864,29 @@ Type* OperationTyper::NumberShiftRightLogical(Type* lhs, Type* rhs) {
DCHECK(lhs->Is(Type::Number()));
DCHECK(rhs->Is(Type::Number()));
- if (!lhs->IsInhabited()) return Type::None();
+ if (!lhs->IsInhabited() || !rhs->IsInhabited()) return Type::None();
lhs = NumberToUint32(lhs);
+ rhs = NumberToUint32(rhs);
+
+ uint32_t min_lhs = lhs->Min();
+ uint32_t max_lhs = lhs->Max();
+ uint32_t min_rhs = rhs->Min();
+ uint32_t max_rhs = rhs->Max();
+ if (max_rhs > 31) {
+ // rhs can be larger than the bitmask
+ max_rhs = 31;
+ min_rhs = 0;
+ }
+
+ double min = min_lhs >> max_rhs;
+ double max = max_lhs >> min_rhs;
+ DCHECK_LE(0, min);
+ DCHECK_LE(max, kMaxUInt32);
- // Logical right-shifting any value cannot make it larger.
- return Type::Range(0.0, lhs->Max(), zone());
+ if (min == 0 && max == kMaxInt) return Type::Unsigned31();
+ if (min == 0 && max == kMaxUInt32) return Type::Unsigned32();
+ return Type::Range(min, max, zone());
}
Type* OperationTyper::NumberAtan2(Type* lhs, Type* rhs) {
diff --git a/deps/v8/src/compiler/operator-properties.cc b/deps/v8/src/compiler/operator-properties.cc
index 02b2f64a30..0d488d8514 100644
--- a/deps/v8/src/compiler/operator-properties.cc
+++ b/deps/v8/src/compiler/operator-properties.cc
@@ -78,6 +78,7 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
case IrOpcode::kJSStoreProperty:
case IrOpcode::kJSLoadGlobal:
case IrOpcode::kJSStoreGlobal:
+ case IrOpcode::kJSStoreNamedOwn:
case IrOpcode::kJSStoreDataPropertyInLiteral:
case IrOpcode::kJSDeleteProperty:
@@ -93,15 +94,17 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
case IrOpcode::kJSToString:
// Call operations
- case IrOpcode::kJSCallConstruct:
- case IrOpcode::kJSCallConstructWithSpread:
- case IrOpcode::kJSCallFunction:
+ case IrOpcode::kJSConstruct:
+ case IrOpcode::kJSConstructWithSpread:
+ case IrOpcode::kJSCallForwardVarargs:
+ case IrOpcode::kJSCall:
+ case IrOpcode::kJSCallWithSpread:
// Misc operations
- case IrOpcode::kJSConvertReceiver:
case IrOpcode::kJSForInNext:
case IrOpcode::kJSForInPrepare:
case IrOpcode::kJSStackCheck:
+ case IrOpcode::kJSDebugger:
case IrOpcode::kJSGetSuperConstructor:
return true;
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index d0f4f18ea3..330b0960ec 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -37,7 +37,6 @@
#include "src/compiler/js-create-lowering.h"
#include "src/compiler/js-frame-specialization.h"
#include "src/compiler/js-generic-lowering.h"
-#include "src/compiler/js-global-object-specialization.h"
#include "src/compiler/js-inlining-heuristic.h"
#include "src/compiler/js-intrinsic-lowering.h"
#include "src/compiler/js-native-context-specialization.h"
@@ -546,14 +545,13 @@ PipelineStatistics* CreatePipelineStatistics(CompilationInfo* info,
class PipelineCompilationJob final : public CompilationJob {
public:
- PipelineCompilationJob(Isolate* isolate, Handle<JSFunction> function)
+ PipelineCompilationJob(ParseInfo* parse_info, Handle<JSFunction> function)
// Note that the CompilationInfo is not initialized at the time we pass it
// to the CompilationJob constructor, but it is not dereferenced there.
- : CompilationJob(isolate, &info_, "TurboFan"),
- zone_(isolate->allocator(), ZONE_NAME),
- zone_stats_(isolate->allocator()),
- parse_info_(&zone_, handle(function->shared())),
- info_(&parse_info_, function),
+ : CompilationJob(parse_info->isolate(), &info_, "TurboFan"),
+ parse_info_(parse_info),
+ zone_stats_(parse_info->isolate()->allocator()),
+ info_(parse_info_.get()->zone(), parse_info_.get(), function),
pipeline_statistics_(CreatePipelineStatistics(info(), &zone_stats_)),
data_(&zone_stats_, info(), pipeline_statistics_.get()),
pipeline_(&data_),
@@ -565,9 +563,8 @@ class PipelineCompilationJob final : public CompilationJob {
Status FinalizeJobImpl() final;
private:
- Zone zone_;
+ std::unique_ptr<ParseInfo> parse_info_;
ZoneStats zone_stats_;
- ParseInfo parse_info_;
CompilationInfo info_;
std::unique_ptr<PipelineStatistics> pipeline_statistics_;
PipelineData data_;
@@ -597,6 +594,10 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl() {
if (FLAG_inline_accessors) {
info()->MarkAsAccessorInliningEnabled();
}
+ if (info()->closure()->feedback_vector_cell()->map() ==
+ isolate()->heap()->one_closure_cell_map()) {
+ info()->MarkAsFunctionContextSpecializing();
+ }
}
if (!info()->is_optimizing_from_bytecode()) {
if (!Compiler::EnsureDeoptimizationSupport(info())) return FAILED;
@@ -604,7 +605,8 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl() {
info()->MarkAsInliningEnabled();
}
- linkage_ = new (&zone_) Linkage(Linkage::ComputeIncoming(&zone_, info()));
+ linkage_ = new (info()->zone())
+ Linkage(Linkage::ComputeIncoming(info()->zone(), info()));
if (!pipeline_.CreateGraph()) {
if (isolate()->has_pending_exception()) return FAILED; // Stack overflowed.
@@ -778,6 +780,7 @@ struct InliningPhase {
JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common());
+ CheckpointElimination checkpoint_elimination(&graph_reducer);
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->common(), data->machine());
JSCallReducer::Flags call_reducer_flags = JSCallReducer::kNoFlags;
@@ -785,7 +788,8 @@ struct InliningPhase {
call_reducer_flags |= JSCallReducer::kDeoptimizationEnabled;
}
JSCallReducer call_reducer(&graph_reducer, data->jsgraph(),
- call_reducer_flags, data->native_context());
+ call_reducer_flags, data->native_context(),
+ data->info()->dependencies());
JSContextSpecialization context_specialization(
&graph_reducer, data->jsgraph(),
data->info()->is_function_context_specializing()
@@ -793,9 +797,6 @@ struct InliningPhase {
: MaybeHandle<Context>());
JSFrameSpecialization frame_specialization(
&graph_reducer, data->info()->osr_frame(), data->jsgraph());
- JSGlobalObjectSpecialization global_object_specialization(
- &graph_reducer, data->jsgraph(), data->global_object(),
- data->info()->dependencies());
JSNativeContextSpecialization::Flags flags =
JSNativeContextSpecialization::kNoFlags;
if (data->info()->is_accessor_inlining_enabled()) {
@@ -821,13 +822,11 @@ struct InliningPhase {
? JSIntrinsicLowering::kDeoptimizationEnabled
: JSIntrinsicLowering::kDeoptimizationDisabled);
AddReducer(data, &graph_reducer, &dead_code_elimination);
+ AddReducer(data, &graph_reducer, &checkpoint_elimination);
AddReducer(data, &graph_reducer, &common_reducer);
if (data->info()->is_frame_specializing()) {
AddReducer(data, &graph_reducer, &frame_specialization);
}
- if (data->info()->is_deoptimization_enabled()) {
- AddReducer(data, &graph_reducer, &global_object_specialization);
- }
AddReducer(data, &graph_reducer, &native_context_specialization);
AddReducer(data, &graph_reducer, &context_specialization);
AddReducer(data, &graph_reducer, &intrinsic_lowering);
@@ -907,10 +906,11 @@ struct TypedLoweringPhase {
? JSBuiltinReducer::kDeoptimizationEnabled
: JSBuiltinReducer::kNoFlags,
data->info()->dependencies(), data->native_context());
- Handle<LiteralsArray> literals_array(data->info()->closure()->literals());
+ Handle<FeedbackVector> feedback_vector(
+ data->info()->closure()->feedback_vector());
JSCreateLowering create_lowering(
&graph_reducer, data->info()->dependencies(), data->jsgraph(),
- literals_array, data->native_context(), temp_zone);
+ feedback_vector, data->native_context(), temp_zone);
JSTypedLowering::Flags typed_lowering_flags = JSTypedLowering::kNoFlags;
if (data->info()->is_deoptimization_enabled()) {
typed_lowering_flags |= JSTypedLowering::kDeoptimizationEnabled;
@@ -949,7 +949,7 @@ struct EscapeAnalysisPhase {
void Run(PipelineData* data, Zone* temp_zone) {
EscapeAnalysis escape_analysis(data->graph(), data->jsgraph()->common(),
temp_zone);
- escape_analysis.Run();
+ if (!escape_analysis.Run()) return;
JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
EscapeAnalysisReducer escape_reducer(&graph_reducer, data->jsgraph(),
&escape_analysis, temp_zone);
@@ -1425,7 +1425,7 @@ struct GenerateCodePhase {
void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
CodeGenerator generator(data->frame(), linkage, data->sequence(),
- data->info(), data->protected_instructions());
+ data->info());
data->set_code(generator.GenerateCode());
}
};
@@ -1666,7 +1666,7 @@ Handle<Code> Pipeline::GenerateCodeForCodeStub(Isolate* isolate,
ZoneStats zone_stats(isolate->allocator());
SourcePositionTable source_positions(graph);
PipelineData data(&zone_stats, &info, graph, schedule, &source_positions);
- data.set_verify_graph(FLAG_csa_verify);
+ data.set_verify_graph(FLAG_verify_csa);
std::unique_ptr<PipelineStatistics> pipeline_statistics;
if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
pipeline_statistics.reset(new PipelineStatistics(&info, &zone_stats));
@@ -1750,8 +1750,16 @@ Handle<Code> Pipeline::GenerateCodeForTesting(
}
// static
-CompilationJob* Pipeline::NewCompilationJob(Handle<JSFunction> function) {
- return new PipelineCompilationJob(function->GetIsolate(), function);
+CompilationJob* Pipeline::NewCompilationJob(Handle<JSFunction> function,
+ bool has_script) {
+ Handle<SharedFunctionInfo> shared = handle(function->shared());
+ ParseInfo* parse_info;
+ if (!has_script) {
+ parse_info = ParseInfo::AllocateWithoutScript(shared);
+ } else {
+ parse_info = new ParseInfo(shared);
+ }
+ return new PipelineCompilationJob(parse_info, function);
}
// static
@@ -1802,7 +1810,7 @@ bool PipelineImpl::ScheduleAndSelectInstructions(Linkage* linkage,
(FLAG_turbo_verify_machine_graph != nullptr &&
(!strcmp(FLAG_turbo_verify_machine_graph, "*") ||
!strcmp(FLAG_turbo_verify_machine_graph, data->debug_name())))) {
- if (FLAG_trace_csa_verify) {
+ if (FLAG_trace_verify_csa) {
AllowHandleDereference allow_deref;
CompilationInfo* info = data->info();
CodeTracer::Scope tracing_scope(info->isolate()->GetCodeTracer());
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index 0c3e4ea7cb..624ef01ead 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -34,7 +34,8 @@ class SourcePositionTable;
class Pipeline : public AllStatic {
public:
// Returns a new compilation job for the given function.
- static CompilationJob* NewCompilationJob(Handle<JSFunction> function);
+ static CompilationJob* NewCompilationJob(Handle<JSFunction> function,
+ bool has_script);
// Returns a new compilation job for the WebAssembly compilation info.
static CompilationJob* NewWasmCompilationJob(
diff --git a/deps/v8/src/compiler/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
index 56755d2446..455b0ae97e 100644
--- a/deps/v8/src/compiler/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
@@ -813,7 +813,8 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
// Check if current frame is an arguments adaptor frame.
__ LoadP(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CmpSmiLiteral(scratch1, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ cmpi(scratch1,
+ Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ bne(&done);
// Load arguments count from current arguments adaptor frame (note, it
@@ -1082,10 +1083,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchDeoptimize: {
int deopt_state_id =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
- Deoptimizer::BailoutType bailout_type =
- Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
- CodeGenResult result = AssembleDeoptimizerCall(
- deopt_state_id, bailout_type, current_source_position_);
+ CodeGenResult result =
+ AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
if (result != kSuccess) return result;
break;
}
@@ -2029,8 +2028,8 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
void Generate() final {
PPCOperandConverter i(gen_, instr_);
- Runtime::FunctionId trap_id = static_cast<Runtime::FunctionId>(
- i.InputInt32(instr_->InputCount() - 1));
+ Builtins::Name trap_id =
+ static_cast<Builtins::Name>(i.InputInt32(instr_->InputCount() - 1));
bool old_has_frame = __ has_frame();
if (frame_elided_) {
__ set_has_frame(true);
@@ -2040,14 +2039,11 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
if (frame_elided_) {
__ set_has_frame(old_has_frame);
}
- if (FLAG_debug_code) {
- __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
- }
}
private:
- void GenerateCallToTrap(Runtime::FunctionId trap_id) {
- if (trap_id == Runtime::kNumFunctions) {
+ void GenerateCallToTrap(Builtins::Name trap_id) {
+ if (trap_id == Builtins::builtin_count) {
// We cannot test calls to the runtime in cctest/test-run-wasm.
// Therefore we emit a call to C here instead of a call to the runtime.
// We use the context register as the scratch register, because we do
@@ -2056,15 +2052,20 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ CallCFunction(
ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
0);
+ __ LeaveFrame(StackFrame::WASM_COMPILED);
+ __ Ret();
} else {
- __ Move(cp, isolate()->native_context());
gen_->AssembleSourcePosition(instr_);
- __ CallRuntime(trap_id);
+ __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
+ RelocInfo::CODE_TARGET);
+ ReferenceMap* reference_map =
+ new (gen_->zone()) ReferenceMap(gen_->zone());
+ gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ if (FLAG_debug_code) {
+ __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
+ }
}
- ReferenceMap* reference_map =
- new (gen_->zone()) ReferenceMap(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
- Safepoint::kNoLazyDeopt);
}
bool frame_elided_;
@@ -2180,16 +2181,19 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
}
CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
- int deoptimization_id, Deoptimizer::BailoutType bailout_type,
- SourcePosition pos) {
+ int deoptimization_id, SourcePosition pos) {
+ DeoptimizeKind deoptimization_kind = GetDeoptimizationKind(deoptimization_id);
+ DeoptimizeReason deoptimization_reason =
+ GetDeoptimizationReason(deoptimization_id);
+ Deoptimizer::BailoutType bailout_type =
+ deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
+ : Deoptimizer::EAGER;
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
// TODO(turbofan): We should be able to generate better code by sharing the
// actual final call site and just bl'ing to it here, similar to what we do
// in the lithium backend.
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
- DeoptimizeReason deoptimization_reason =
- GetDeoptimizationReason(deoptimization_id);
__ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
return kSuccess;
diff --git a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
index c7e1fa34c1..c2770b3ce8 100644
--- a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
@@ -154,7 +154,7 @@ void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->frame_state());
} else if (cont->IsTrap()) {
inputs[input_count++] = g.UseImmediate(cont->trap_id());
selector->Emit(opcode, output_count, outputs, input_count, inputs);
@@ -216,6 +216,9 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kWord64: // Fall through.
#endif
case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kSimd1x4: // Fall through.
+ case MachineRepresentation::kSimd1x8: // Fall through.
+ case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -325,6 +328,9 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kWord64: // Fall through.
#endif
case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kSimd1x4: // Fall through.
+ case MachineRepresentation::kSimd1x8: // Fall through.
+ case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -389,6 +395,9 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
case MachineRepresentation::kWord64: // Fall through.
#endif
case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kSimd1x4: // Fall through.
+ case MachineRepresentation::kSimd1x8: // Fall through.
+ case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -437,6 +446,9 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
case MachineRepresentation::kWord64: // Fall through.
#endif
case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kSimd1x4: // Fall through.
+ case MachineRepresentation::kSimd1x8: // Fall through.
+ case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -1536,8 +1548,8 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
selector->Emit(opcode, g.NoOutput(), left, right,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
- cont->frame_state());
+ selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
+ cont->reason(), cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
} else {
@@ -1782,14 +1794,16 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+ DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+ kNotEqual, p.kind(), p.reason(), node->InputAt(1));
VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+ DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+ kEqual, p.kind(), p.reason(), node->InputAt(1));
VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
}
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index a318dd02ae..0e101770c3 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -166,6 +166,13 @@ void RawMachineAssembler::PopAndReturn(Node* pop, Node* v1, Node* v2,
void RawMachineAssembler::DebugBreak() { AddNode(machine()->DebugBreak()); }
+void RawMachineAssembler::Unreachable() {
+ Node* values[] = {UndefinedConstant()}; // Unused.
+ Node* ret = MakeNode(common()->Throw(), 1, values);
+ schedule()->AddThrow(CurrentBlock(), ret);
+ current_block_ = nullptr;
+}
+
void RawMachineAssembler::Comment(const char* msg) {
AddNode(machine()->Comment(msg));
}
@@ -332,7 +339,11 @@ Node* RawMachineAssembler::MakeNode(const Operator* op, int input_count,
return graph()->NewNodeUnchecked(op, input_count, inputs);
}
-RawMachineLabel::~RawMachineLabel() { DCHECK(bound_ || !used_); }
+RawMachineLabel::~RawMachineLabel() {
+ // If this DCHECK fails, it means that the label has been bound but it's not
+ // used, or the opposite. This would cause the register allocator to crash.
+ DCHECK_EQ(bound_, used_);
+}
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index af36b8c08a..d726217ed4 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -534,13 +534,21 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
// Conversions.
Node* BitcastTaggedToWord(Node* a) {
+#ifdef ENABLE_VERIFY_CSA
return AddNode(machine()->BitcastTaggedToWord(), a);
+#else
+ return a;
+#endif
}
Node* BitcastWordToTagged(Node* a) {
return AddNode(machine()->BitcastWordToTagged(), a);
}
Node* BitcastWordToTaggedSigned(Node* a) {
+#ifdef ENABLE_VERIFY_CSA
return AddNode(machine()->BitcastWordToTaggedSigned(), a);
+#else
+ return a;
+#endif
}
Node* TruncateFloat64ToWord32(Node* a) {
return AddNode(machine()->TruncateFloat64ToWord32(), a);
@@ -761,6 +769,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
void Bind(RawMachineLabel* label);
void Deoptimize(Node* state);
void DebugBreak();
+ void Unreachable();
void Comment(const char* msg);
// Add success / exception successor blocks and ends the current block ending
diff --git a/deps/v8/src/compiler/redundancy-elimination.cc b/deps/v8/src/compiler/redundancy-elimination.cc
index 707752f364..38feb8b751 100644
--- a/deps/v8/src/compiler/redundancy-elimination.cc
+++ b/deps/v8/src/compiler/redundancy-elimination.cc
@@ -24,6 +24,7 @@ Reduction RedundancyElimination::Reduce(Node* node) {
case IrOpcode::kCheckIf:
case IrOpcode::kCheckInternalizedString:
case IrOpcode::kCheckNumber:
+ case IrOpcode::kCheckReceiver:
case IrOpcode::kCheckSmi:
case IrOpcode::kCheckString:
case IrOpcode::kCheckTaggedHole:
diff --git a/deps/v8/src/compiler/register-allocator-verifier.cc b/deps/v8/src/compiler/register-allocator-verifier.cc
index 5a2ed93827..d589a9d371 100644
--- a/deps/v8/src/compiler/register-allocator-verifier.cc
+++ b/deps/v8/src/compiler/register-allocator-verifier.cc
@@ -2,9 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/compiler/register-allocator-verifier.h"
+
#include "src/bit-vector.h"
#include "src/compiler/instruction.h"
-#include "src/compiler/register-allocator-verifier.h"
+#include "src/ostreams.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/register-allocator-verifier.h b/deps/v8/src/compiler/register-allocator-verifier.h
index 9a605d62da..989589e6fb 100644
--- a/deps/v8/src/compiler/register-allocator-verifier.h
+++ b/deps/v8/src/compiler/register-allocator-verifier.h
@@ -5,13 +5,14 @@
#ifndef V8_REGISTER_ALLOCATOR_VERIFIER_H_
#define V8_REGISTER_ALLOCATOR_VERIFIER_H_
+#include "src/compiler/instruction.h"
#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
namespace compiler {
-class InstructionOperand;
+class InstructionBlock;
class InstructionSequence;
// The register allocator validator traverses instructions in the instruction
diff --git a/deps/v8/src/compiler/register-allocator.cc b/deps/v8/src/compiler/register-allocator.cc
index 5515843612..403c344aee 100644
--- a/deps/v8/src/compiler/register-allocator.cc
+++ b/deps/v8/src/compiler/register-allocator.cc
@@ -86,6 +86,10 @@ int GetByteWidth(MachineRepresentation rep) {
return kDoubleSize;
case MachineRepresentation::kSimd128:
return kSimd128Size;
+ case MachineRepresentation::kSimd1x4:
+ case MachineRepresentation::kSimd1x8:
+ case MachineRepresentation::kSimd1x16:
+ return kSimdMaskRegisters ? kPointerSize : kSimd128Size;
case MachineRepresentation::kNone:
break;
}
@@ -3271,19 +3275,18 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
}
}
- LifetimePosition pos = use_pos[reg];
-
- if (pos < register_use->pos()) {
+ if (use_pos[reg] < register_use->pos()) {
+ // If there is a gap position before the next register use, we can
+ // spill until there. The gap position will then fit the fill move.
if (LifetimePosition::ExistsGapPositionBetween(current->Start(),
register_use->pos())) {
SpillBetween(current, current->Start(), register_use->pos());
- } else {
- SetLiveRangeAssignedRegister(current, reg);
- SplitAndSpillIntersecting(current);
+ return;
}
- return;
}
+ // We couldn't spill until the next register use. Split before the register
+ // is blocked, if applicable.
if (block_pos[reg] < current->End()) {
// Register becomes blocked before the current range end. Split before that
// position.
diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc
index 7a5a43e61a..4b4f8c91c6 100644
--- a/deps/v8/src/compiler/representation-change.cc
+++ b/deps/v8/src/compiler/representation-change.cc
@@ -10,6 +10,7 @@
#include "src/code-factory.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -169,9 +170,10 @@ Node* RepresentationChanger::GetRepresentationFor(
case MachineRepresentation::kWord64:
DCHECK(use_info.type_check() == TypeCheckKind::kNone);
return GetWord64RepresentationFor(node, output_rep, output_type);
- case MachineRepresentation::kSimd128: // Fall through.
- // TODO(bbudge) Handle conversions between tagged and untagged.
- break;
+ case MachineRepresentation::kSimd128:
+ case MachineRepresentation::kSimd1x4:
+ case MachineRepresentation::kSimd1x8:
+ case MachineRepresentation::kSimd1x16:
case MachineRepresentation::kNone:
return node;
}
@@ -271,9 +273,15 @@ Node* RepresentationChanger::GetTaggedSignedRepresentationFor(
return TypeError(node, output_rep, output_type,
MachineRepresentation::kTaggedSigned);
}
- } else if (CanBeTaggedPointer(output_rep) &&
- use_info.type_check() == TypeCheckKind::kSignedSmall) {
- op = simplified()->CheckedTaggedToTaggedSigned();
+ } else if (CanBeTaggedPointer(output_rep)) {
+ if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
+ op = simplified()->CheckedTaggedToTaggedSigned();
+ } else if (output_type->Is(Type::SignedSmall())) {
+ op = simplified()->ChangeTaggedToTaggedSigned();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kTaggedSigned);
+ }
} else if (output_rep == MachineRepresentation::kBit &&
use_info.type_check() == TypeCheckKind::kSignedSmall) {
// TODO(turbofan): Consider adding a Bailout operator that just deopts.
diff --git a/deps/v8/src/compiler/s390/code-generator-s390.cc b/deps/v8/src/compiler/s390/code-generator-s390.cc
index f99ab37838..8e9db3dcb0 100644
--- a/deps/v8/src/compiler/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/s390/code-generator-s390.cc
@@ -131,10 +131,18 @@ class S390OperandConverter final : public InstructionOperandConverter {
}
};
+static inline bool HasRegisterOutput(Instruction* instr, int index = 0) {
+ return instr->OutputCount() > 0 && instr->OutputAt(index)->IsRegister();
+}
+
static inline bool HasRegisterInput(Instruction* instr, int index) {
return instr->InputAt(index)->IsRegister();
}
+static inline bool HasFPRegisterInput(Instruction* instr, int index) {
+ return instr->InputAt(index)->IsFPRegister();
+}
+
static inline bool HasImmediateInput(Instruction* instr, size_t index) {
return instr->InputAt(index)->IsImmediate();
}
@@ -143,6 +151,10 @@ static inline bool HasStackSlotInput(Instruction* instr, size_t index) {
return instr->InputAt(index)->IsStackSlot();
}
+static inline bool HasFPStackSlotInput(Instruction* instr, size_t index) {
+ return instr->InputAt(index)->IsFPStackSlot();
+}
+
namespace {
class OutOfLineLoadNAN32 final : public OutOfLineCode {
@@ -260,17 +272,33 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
return eq;
case kNotEqual:
return ne;
- case kSignedLessThan:
case kUnsignedLessThan:
+ // unsigned number never less than 0
+ if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
+ return CC_NOP;
+ // fall through
+ case kSignedLessThan:
return lt;
- case kSignedGreaterThanOrEqual:
case kUnsignedGreaterThanOrEqual:
+ // unsigned number always greater than or equal 0
+ if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
+ return CC_ALWAYS;
+ // fall through
+ case kSignedGreaterThanOrEqual:
return ge;
- case kSignedLessThanOrEqual:
case kUnsignedLessThanOrEqual:
+ // unsigned number never less than 0
+ if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
+ return CC_EQ;
+ // fall through
+ case kSignedLessThanOrEqual:
return le;
- case kSignedGreaterThan:
case kUnsignedGreaterThan:
+ // unsigned number always greater than or equal 0
+ if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
+ return ne;
+ // fall through
+ case kSignedGreaterThan:
return gt;
case kOverflow:
// Overflow checked for AddP/SubP only.
@@ -302,8 +330,176 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
return kNoCondition;
}
+typedef void (MacroAssembler::*RRTypeInstr)(Register, Register);
+typedef void (MacroAssembler::*RMTypeInstr)(Register, const MemOperand&);
+typedef void (MacroAssembler::*RITypeInstr)(Register, const Operand&);
+typedef void (MacroAssembler::*RRRTypeInstr)(Register, Register, Register);
+typedef void (MacroAssembler::*RRMTypeInstr)(Register, Register,
+ const MemOperand&);
+typedef void (MacroAssembler::*RRITypeInstr)(Register, Register,
+ const Operand&);
+
+#define CHECK_AND_ZERO_EXT_OUTPUT(num) \
+ { \
+ CHECK(HasImmediateInput(instr, (num))); \
+ int doZeroExt = i.InputInt32(num); \
+ if (doZeroExt) masm->LoadlW(i.OutputRegister(), i.OutputRegister()); \
+ }
+
+void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
+ Instruction* instr, RRTypeInstr rr_instr,
+ RMTypeInstr rm_instr, RITypeInstr ri_instr) {
+ CHECK(i.OutputRegister().is(i.InputRegister(0)));
+ AddressingMode mode = AddressingModeField::decode(instr->opcode());
+ int zeroExtIndex = 2;
+ if (mode != kMode_None) {
+ size_t first_index = 1;
+ MemOperand operand = i.MemoryOperand(&mode, &first_index);
+ zeroExtIndex = first_index;
+ CHECK(rm_instr != NULL);
+ (masm->*rm_instr)(i.OutputRegister(), operand);
+ } else if (HasRegisterInput(instr, 1)) {
+ (masm->*rr_instr)(i.OutputRegister(), i.InputRegister(1));
+ } else if (HasImmediateInput(instr, 1)) {
+ (masm->*ri_instr)(i.OutputRegister(), i.InputImmediate(1));
+ } else if (HasStackSlotInput(instr, 1)) {
+ (masm->*rm_instr)(i.OutputRegister(), i.InputStackSlot32(1));
+ } else {
+ UNREACHABLE();
+ }
+ CHECK_AND_ZERO_EXT_OUTPUT(zeroExtIndex);
+}
+
+void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
+ Instruction* instr, RRRTypeInstr rrr_instr,
+ RMTypeInstr rm_instr, RITypeInstr ri_instr) {
+ AddressingMode mode = AddressingModeField::decode(instr->opcode());
+ int zeroExtIndex = 2;
+ if (mode != kMode_None) {
+ CHECK(i.OutputRegister().is(i.InputRegister(0)));
+ size_t first_index = 1;
+ MemOperand operand = i.MemoryOperand(&mode, &first_index);
+ zeroExtIndex = first_index;
+ CHECK(rm_instr != NULL);
+ (masm->*rm_instr)(i.OutputRegister(), operand);
+ } else if (HasRegisterInput(instr, 1)) {
+ (masm->*rrr_instr)(i.OutputRegister(), i.InputRegister(0),
+ i.InputRegister(1));
+ } else if (HasImmediateInput(instr, 1)) {
+ CHECK(i.OutputRegister().is(i.InputRegister(0)));
+ (masm->*ri_instr)(i.OutputRegister(), i.InputImmediate(1));
+ } else if (HasStackSlotInput(instr, 1)) {
+ CHECK(i.OutputRegister().is(i.InputRegister(0)));
+ (masm->*rm_instr)(i.OutputRegister(), i.InputStackSlot32(1));
+ } else {
+ UNREACHABLE();
+ }
+ CHECK_AND_ZERO_EXT_OUTPUT(zeroExtIndex);
+}
+
+void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
+ Instruction* instr, RRRTypeInstr rrr_instr,
+ RMTypeInstr rm_instr, RRITypeInstr rri_instr) {
+ AddressingMode mode = AddressingModeField::decode(instr->opcode());
+ int zeroExtIndex = 2;
+ if (mode != kMode_None) {
+ CHECK(i.OutputRegister().is(i.InputRegister(0)));
+ size_t first_index = 1;
+ MemOperand operand = i.MemoryOperand(&mode, &first_index);
+ zeroExtIndex = first_index;
+ CHECK(rm_instr != NULL);
+ (masm->*rm_instr)(i.OutputRegister(), operand);
+ } else if (HasRegisterInput(instr, 1)) {
+ (masm->*rrr_instr)(i.OutputRegister(), i.InputRegister(0),
+ i.InputRegister(1));
+ } else if (HasImmediateInput(instr, 1)) {
+ (masm->*rri_instr)(i.OutputRegister(), i.InputRegister(0),
+ i.InputImmediate(1));
+ } else if (HasStackSlotInput(instr, 1)) {
+ CHECK(i.OutputRegister().is(i.InputRegister(0)));
+ (masm->*rm_instr)(i.OutputRegister(), i.InputStackSlot32(1));
+ } else {
+ UNREACHABLE();
+ }
+ CHECK_AND_ZERO_EXT_OUTPUT(zeroExtIndex);
+}
+
+void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
+ Instruction* instr, RRRTypeInstr rrr_instr,
+ RRMTypeInstr rrm_instr, RRITypeInstr rri_instr) {
+ AddressingMode mode = AddressingModeField::decode(instr->opcode());
+ int zeroExtIndex = 2;
+ if (mode != kMode_None) {
+ size_t first_index = 1;
+ MemOperand operand = i.MemoryOperand(&mode, &first_index);
+ zeroExtIndex = first_index;
+ CHECK(rrm_instr != NULL);
+ (masm->*rrm_instr)(i.OutputRegister(), i.InputRegister(0), operand);
+ } else if (HasRegisterInput(instr, 1)) {
+ (masm->*rrr_instr)(i.OutputRegister(), i.InputRegister(0),
+ i.InputRegister(1));
+ } else if (HasImmediateInput(instr, 1)) {
+ (masm->*rri_instr)(i.OutputRegister(), i.InputRegister(0),
+ i.InputImmediate(1));
+ } else if (HasStackSlotInput(instr, 1)) {
+ (masm->*rrm_instr)(i.OutputRegister(), i.InputRegister(0),
+ i.InputStackSlot32(1));
+ } else {
+ UNREACHABLE();
+ }
+ CHECK_AND_ZERO_EXT_OUTPUT(zeroExtIndex);
+}
+
+void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
+ Instruction* instr, RRRTypeInstr rrr_instr,
+ RRITypeInstr rri_instr) {
+ AddressingMode mode = AddressingModeField::decode(instr->opcode());
+ CHECK(mode == kMode_None);
+ int zeroExtIndex = 2;
+ if (HasRegisterInput(instr, 1)) {
+ (masm->*rrr_instr)(i.OutputRegister(), i.InputRegister(0),
+ i.InputRegister(1));
+ } else if (HasImmediateInput(instr, 1)) {
+ (masm->*rri_instr)(i.OutputRegister(), i.InputRegister(0),
+ i.InputImmediate(1));
+ } else {
+ UNREACHABLE();
+ }
+ CHECK_AND_ZERO_EXT_OUTPUT(zeroExtIndex);
+}
+
+void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
+ Instruction* instr, RRTypeInstr rr_instr,
+ RITypeInstr ri_instr) {
+ AddressingMode mode = AddressingModeField::decode(instr->opcode());
+ CHECK(mode == kMode_None);
+ CHECK(i.OutputRegister().is(i.InputRegister(0)));
+ int zeroExtIndex = 2;
+ if (HasRegisterInput(instr, 1)) {
+ (masm->*rr_instr)(i.OutputRegister(), i.InputRegister(1));
+ } else if (HasImmediateInput(instr, 1)) {
+ (masm->*ri_instr)(i.OutputRegister(), i.InputImmediate(1));
+ } else {
+ UNREACHABLE();
+ }
+ CHECK_AND_ZERO_EXT_OUTPUT(zeroExtIndex);
+}
+
+#define ASSEMBLE_BIN_OP(instr1, instr2, instr3) \
+ AssembleBinOp(i, masm(), instr, &MacroAssembler::instr1, \
+ &MacroAssembler::instr2, &MacroAssembler::instr3)
+
+#undef CHECK_AND_ZERO_EXT_OUTPUT
+
} // namespace
+#define CHECK_AND_ZERO_EXT_OUTPUT(num) \
+ { \
+ CHECK(HasImmediateInput(instr, (num))); \
+ int doZeroExt = i.InputInt32(num); \
+ if (doZeroExt) __ LoadlW(i.OutputRegister(), i.OutputRegister()); \
+ }
+
#define ASSEMBLE_FLOAT_UNOP(asm_instr) \
do { \
__ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
@@ -328,26 +524,92 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
} \
} while (0)
-#define ASSEMBLE_COMPARE(cmp_instr, cmpl_instr) \
- do { \
- if (HasRegisterInput(instr, 1)) { \
- if (i.CompareLogical()) { \
- __ cmpl_instr(i.InputRegister(0), i.InputRegister(1)); \
- } else { \
- __ cmp_instr(i.InputRegister(0), i.InputRegister(1)); \
- } \
- } else { \
- if (i.CompareLogical()) { \
- __ cmpl_instr(i.InputRegister(0), i.InputImmediate(1)); \
- } else { \
- __ cmp_instr(i.InputRegister(0), i.InputImmediate(1)); \
- } \
- } \
+#define ASSEMBLE_COMPARE(cmp_instr, cmpl_instr) \
+ do { \
+ AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
+ if (mode != kMode_None) { \
+ size_t first_index = 1; \
+ MemOperand operand = i.MemoryOperand(&mode, &first_index); \
+ if (i.CompareLogical()) { \
+ __ cmpl_instr(i.InputRegister(0), operand); \
+ } else { \
+ __ cmp_instr(i.InputRegister(0), operand); \
+ } \
+ } else if (HasRegisterInput(instr, 1)) { \
+ if (i.CompareLogical()) { \
+ __ cmpl_instr(i.InputRegister(0), i.InputRegister(1)); \
+ } else { \
+ __ cmp_instr(i.InputRegister(0), i.InputRegister(1)); \
+ } \
+ } else if (HasImmediateInput(instr, 1)) { \
+ if (i.CompareLogical()) { \
+ __ cmpl_instr(i.InputRegister(0), i.InputImmediate(1)); \
+ } else { \
+ __ cmp_instr(i.InputRegister(0), i.InputImmediate(1)); \
+ } \
+ } else { \
+ DCHECK(HasStackSlotInput(instr, 1)); \
+ if (i.CompareLogical()) { \
+ __ cmpl_instr(i.InputRegister(0), i.InputStackSlot(1)); \
+ } else { \
+ __ cmp_instr(i.InputRegister(0), i.InputStackSlot(1)); \
+ } \
+ } \
} while (0)
-#define ASSEMBLE_FLOAT_COMPARE(cmp_instr) \
- do { \
- __ cmp_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
+#define ASSEMBLE_COMPARE32(cmp_instr, cmpl_instr) \
+ do { \
+ AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
+ if (mode != kMode_None) { \
+ size_t first_index = 1; \
+ MemOperand operand = i.MemoryOperand(&mode, &first_index); \
+ if (i.CompareLogical()) { \
+ __ cmpl_instr(i.InputRegister(0), operand); \
+ } else { \
+ __ cmp_instr(i.InputRegister(0), operand); \
+ } \
+ } else if (HasRegisterInput(instr, 1)) { \
+ if (i.CompareLogical()) { \
+ __ cmpl_instr(i.InputRegister(0), i.InputRegister(1)); \
+ } else { \
+ __ cmp_instr(i.InputRegister(0), i.InputRegister(1)); \
+ } \
+ } else if (HasImmediateInput(instr, 1)) { \
+ if (i.CompareLogical()) { \
+ __ cmpl_instr(i.InputRegister(0), i.InputImmediate(1)); \
+ } else { \
+ __ cmp_instr(i.InputRegister(0), i.InputImmediate(1)); \
+ } \
+ } else { \
+ DCHECK(HasStackSlotInput(instr, 1)); \
+ if (i.CompareLogical()) { \
+ __ cmpl_instr(i.InputRegister(0), i.InputStackSlot32(1)); \
+ } else { \
+ __ cmp_instr(i.InputRegister(0), i.InputStackSlot32(1)); \
+ } \
+ } \
+ } while (0)
+
+#define ASSEMBLE_FLOAT_COMPARE(cmp_rr_instr, cmp_rm_instr, load_instr) \
+ do { \
+ AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
+ if (mode != kMode_None) { \
+ size_t first_index = 1; \
+ MemOperand operand = i.MemoryOperand(&mode, &first_index); \
+ __ cmp_rm_instr(i.InputDoubleRegister(0), operand); \
+ } else if (HasFPRegisterInput(instr, 1)) { \
+ __ cmp_rr_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
+ } else { \
+ USE(HasFPStackSlotInput); \
+ DCHECK(HasFPStackSlotInput(instr, 1)); \
+ MemOperand operand = i.InputStackSlot(1); \
+ if (operand.offset() >= 0) { \
+ __ cmp_rm_instr(i.InputDoubleRegister(0), operand); \
+ } else { \
+ __ load_instr(kScratchDoubleReg, operand); \
+ __ cmp_rr_instr(i.InputDoubleRegister(0), kScratchDoubleReg); \
+ } \
+ } \
} while (0)
// Divide instruction dr will implicity use register pair
@@ -359,7 +621,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
__ LoadRR(r0, i.InputRegister(0)); \
__ shift_instr(r0, Operand(32)); \
__ div_instr(r0, i.InputRegister(1)); \
- __ ltr(i.OutputRegister(), r0); \
+ __ LoadlW(i.OutputRegister(), r0); \
} while (0)
#define ASSEMBLE_FLOAT_MODULO() \
@@ -579,6 +841,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
} \
__ bind(&done); \
} while (0)
+//
// Only MRI mode for these instructions available
#define ASSEMBLE_LOAD_FLOAT(asm_instr) \
do { \
@@ -596,6 +859,38 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
__ asm_instr(result, operand); \
} while (0)
+#define ASSEMBLE_LOADANDTEST64(asm_instr_rr, asm_instr_rm) \
+ { \
+ AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
+ Register dst = HasRegisterOutput(instr) ? i.OutputRegister() : r0; \
+ if (mode != kMode_None) { \
+ size_t first_index = 0; \
+ MemOperand operand = i.MemoryOperand(&mode, &first_index); \
+ __ asm_instr_rm(dst, operand); \
+ } else if (HasRegisterInput(instr, 0)) { \
+ __ asm_instr_rr(dst, i.InputRegister(0)); \
+ } else { \
+ DCHECK(HasStackSlotInput(instr, 0)); \
+ __ asm_instr_rm(dst, i.InputStackSlot(0)); \
+ } \
+ }
+
+#define ASSEMBLE_LOADANDTEST32(asm_instr_rr, asm_instr_rm) \
+ { \
+ AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
+ Register dst = HasRegisterOutput(instr) ? i.OutputRegister() : r0; \
+ if (mode != kMode_None) { \
+ size_t first_index = 0; \
+ MemOperand operand = i.MemoryOperand(&mode, &first_index); \
+ __ asm_instr_rm(dst, operand); \
+ } else if (HasRegisterInput(instr, 0)) { \
+ __ asm_instr_rr(dst, i.InputRegister(0)); \
+ } else { \
+ DCHECK(HasStackSlotInput(instr, 0)); \
+ __ asm_instr_rm(dst, i.InputStackSlot32(0)); \
+ } \
+ }
+
#define ASSEMBLE_STORE_FLOAT32() \
do { \
size_t index = 0; \
@@ -739,7 +1034,8 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
// Check if current frame is an arguments adaptor frame.
__ LoadP(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CmpSmiLiteral(scratch1, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ CmpP(scratch1,
+ Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ bne(&done);
// Load arguments count from current arguments adaptor frame (note, it
@@ -994,10 +1290,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchDeoptimize: {
int deopt_state_id =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
- Deoptimizer::BailoutType bailout_type =
- Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
- CodeGenResult result = AssembleDeoptimizerCall(
- deopt_state_id, bailout_type, current_source_position_);
+ CodeGenResult result =
+ AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
if (result != kSuccess) return result;
break;
}
@@ -1058,35 +1352,43 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kS390_And32:
- ASSEMBLE_BINOP(And);
+ if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ ASSEMBLE_BIN_OP(nrk, And, nilf);
+ } else {
+ ASSEMBLE_BIN_OP(nr, And, nilf);
+ }
break;
case kS390_And64:
ASSEMBLE_BINOP(AndP);
break;
case kS390_Or32:
- ASSEMBLE_BINOP(Or);
+ if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ ASSEMBLE_BIN_OP(ork, Or, oilf);
+ } else {
+ ASSEMBLE_BIN_OP(or_z, Or, oilf);
+ }
+ break;
case kS390_Or64:
ASSEMBLE_BINOP(OrP);
break;
case kS390_Xor32:
- ASSEMBLE_BINOP(Xor);
+ if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ ASSEMBLE_BIN_OP(xrk, Xor, xilf);
+ } else {
+ ASSEMBLE_BIN_OP(xr, Xor, xilf);
+ }
break;
case kS390_Xor64:
ASSEMBLE_BINOP(XorP);
break;
case kS390_ShiftLeft32:
- if (HasRegisterInput(instr, 1)) {
- if (i.OutputRegister().is(i.InputRegister(1)) &&
- !CpuFeatures::IsSupported(DISTINCT_OPS)) {
- __ LoadRR(kScratchReg, i.InputRegister(1));
- __ ShiftLeft(i.OutputRegister(), i.InputRegister(0), kScratchReg);
- } else {
- ASSEMBLE_BINOP(ShiftLeft);
- }
+ if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ AssembleBinOp(i, masm(), instr, &MacroAssembler::ShiftLeft,
+ &MacroAssembler::ShiftLeft);
} else {
- ASSEMBLE_BINOP(ShiftLeft);
+ AssembleBinOp(i, masm(), instr, &MacroAssembler::sll,
+ &MacroAssembler::sll);
}
- __ LoadlW(i.OutputRegister(0), i.OutputRegister(0));
break;
#if V8_TARGET_ARCH_S390X
case kS390_ShiftLeft64:
@@ -1094,18 +1396,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
#endif
case kS390_ShiftRight32:
- if (HasRegisterInput(instr, 1)) {
- if (i.OutputRegister().is(i.InputRegister(1)) &&
- !CpuFeatures::IsSupported(DISTINCT_OPS)) {
- __ LoadRR(kScratchReg, i.InputRegister(1));
- __ ShiftRight(i.OutputRegister(), i.InputRegister(0), kScratchReg);
- } else {
- ASSEMBLE_BINOP(ShiftRight);
- }
+ if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ AssembleBinOp(i, masm(), instr, &MacroAssembler::srlk,
+ &MacroAssembler::srlk);
} else {
- ASSEMBLE_BINOP(ShiftRight);
+ AssembleBinOp(i, masm(), instr, &MacroAssembler::srl,
+ &MacroAssembler::srl);
}
- __ LoadlW(i.OutputRegister(0), i.OutputRegister(0));
break;
#if V8_TARGET_ARCH_S390X
case kS390_ShiftRight64:
@@ -1113,19 +1410,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
#endif
case kS390_ShiftRightArith32:
- if (HasRegisterInput(instr, 1)) {
- if (i.OutputRegister().is(i.InputRegister(1)) &&
- !CpuFeatures::IsSupported(DISTINCT_OPS)) {
- __ LoadRR(kScratchReg, i.InputRegister(1));
- __ ShiftRightArith(i.OutputRegister(), i.InputRegister(0),
- kScratchReg);
- } else {
- ASSEMBLE_BINOP(ShiftRightArith);
- }
+ if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ AssembleBinOp(i, masm(), instr, &MacroAssembler::srak,
+ &MacroAssembler::srak);
} else {
- ASSEMBLE_BINOP(ShiftRightArith);
+ AssembleBinOp(i, masm(), instr, &MacroAssembler::sra,
+ &MacroAssembler::sra);
}
- __ LoadlW(i.OutputRegister(), i.OutputRegister());
break;
#if V8_TARGET_ARCH_S390X
case kS390_ShiftRightArith64:
@@ -1207,7 +1498,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
#endif
- case kS390_RotRight32:
+ case kS390_RotRight32: {
if (HasRegisterInput(instr, 1)) {
__ LoadComplementRR(kScratchReg, i.InputRegister(1));
__ rll(i.OutputRegister(), i.InputRegister(0), kScratchReg);
@@ -1215,7 +1506,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ rll(i.OutputRegister(), i.InputRegister(0),
Operand(32 - i.InputInt32(1)));
}
+ CHECK_AND_ZERO_EXT_OUTPUT(2);
break;
+ }
#if V8_TARGET_ARCH_S390X
case kS390_RotRight64:
if (HasRegisterInput(instr, 1)) {
@@ -1226,14 +1519,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand(64 - i.InputInt32(1)));
}
break;
-#endif
- case kS390_Not32:
- __ Not32(i.OutputRegister(), i.InputRegister(0));
- break;
- case kS390_Not64:
- __ Not64(i.OutputRegister(), i.InputRegister(0));
- break;
-#if V8_TARGET_ARCH_S390X
case kS390_RotLeftAndClear64:
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
int shiftAmount = i.InputInt32(1);
@@ -1282,10 +1567,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
#endif
- case kS390_Add32:
- ASSEMBLE_BINOP(Add32);
- __ LoadW(i.OutputRegister(), i.OutputRegister());
+ case kS390_Add32: {
+ if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ ASSEMBLE_BIN_OP(ark, Add32, Add32_RRI);
+ } else {
+ ASSEMBLE_BIN_OP(ar, Add32, Add32_RI);
+ }
break;
+ }
case kS390_Add64:
ASSEMBLE_BINOP(AddP);
break;
@@ -1310,8 +1599,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kS390_Sub32:
- ASSEMBLE_BINOP(Sub32);
- __ LoadW(i.OutputRegister(), i.OutputRegister());
+ if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ ASSEMBLE_BIN_OP(srk, Sub32, Sub32_RRI);
+ } else {
+ ASSEMBLE_BIN_OP(sr, Sub32, Sub32_RI);
+ }
break;
case kS390_Sub64:
ASSEMBLE_BINOP(SubP);
@@ -1343,17 +1635,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kS390_Mul32:
- if (HasRegisterInput(instr, 1)) {
- __ Mul32(i.InputRegister(0), i.InputRegister(1));
- } else if (HasImmediateInput(instr, 1)) {
- __ Mul32(i.InputRegister(0), i.InputImmediate(1));
- } else if (HasStackSlotInput(instr, 1)) {
- __ Mul32(i.InputRegister(0), i.InputStackSlot32(1));
- } else {
- UNIMPLEMENTED();
- }
+ ASSEMBLE_BIN_OP(Mul32, Mul32, Mul32);
+ break;
+ case kS390_Mul32WithOverflow:
+ ASSEMBLE_BIN_OP(Mul32WithOverflowIfCCUnequal,
+ Mul32WithOverflowIfCCUnequal,
+ Mul32WithOverflowIfCCUnequal);
break;
case kS390_Mul64:
+ CHECK(i.OutputRegister().is(i.InputRegister(0)));
if (HasRegisterInput(instr, 1)) {
__ Mul64(i.InputRegister(0), i.InputRegister(1));
} else if (HasImmediateInput(instr, 1)) {
@@ -1365,32 +1655,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kS390_MulHigh32:
- __ LoadRR(r1, i.InputRegister(0));
- if (HasRegisterInput(instr, 1)) {
- __ mr_z(r0, i.InputRegister(1));
- } else if (HasStackSlotInput(instr, 1)) {
- __ mfy(r0, i.InputStackSlot32(1));
- } else {
- UNIMPLEMENTED();
- }
- __ LoadW(i.OutputRegister(), r0);
- break;
- case kS390_Mul32WithHigh32:
- __ LoadRR(r1, i.InputRegister(0));
- __ mr_z(r0, i.InputRegister(1));
- __ LoadW(i.OutputRegister(0), r1); // low
- __ LoadW(i.OutputRegister(1), r0); // high
+ ASSEMBLE_BIN_OP(MulHigh32, MulHigh32, MulHigh32);
break;
case kS390_MulHighU32:
- __ LoadRR(r1, i.InputRegister(0));
- if (HasRegisterInput(instr, 1)) {
- __ mlr(r0, i.InputRegister(1));
- } else if (HasStackSlotInput(instr, 1)) {
- __ ml(r0, i.InputStackSlot32(1));
- } else {
- UNIMPLEMENTED();
- }
- __ LoadlW(i.OutputRegister(), r0);
+ ASSEMBLE_BIN_OP(MulHighU32, MulHighU32, MulHighU32);
break;
case kS390_MulFloat:
// Ensure we don't clobber right
@@ -1419,13 +1687,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ltgr(i.OutputRegister(), r1); // Copy R1: Quotient to output
break;
#endif
- case kS390_Div32:
- __ LoadRR(r0, i.InputRegister(0));
- __ srda(r0, Operand(32));
- __ dr(r0, i.InputRegister(1));
- __ LoadAndTestP_ExtendSrc(i.OutputRegister(),
- r1); // Copy R1: Quotient to output
+ case kS390_Div32: {
+ ASSEMBLE_BIN_OP(Div32, Div32, Div32);
break;
+ }
#if V8_TARGET_ARCH_S390X
case kS390_DivU64:
__ LoadRR(r1, i.InputRegister(0));
@@ -1434,14 +1699,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ltgr(i.OutputRegister(), r1); // Copy R1: Quotient to output
break;
#endif
- case kS390_DivU32:
- __ LoadRR(r0, i.InputRegister(0));
- __ srdl(r0, Operand(32));
- __ dlr(r0, i.InputRegister(1)); // R0:R1: Dividend
- __ LoadlW(i.OutputRegister(), r1); // Copy R1: Quotient to output
- __ LoadAndTestP_ExtendSrc(r1, r1);
+ case kS390_DivU32: {
+ ASSEMBLE_BIN_OP(DivU32, DivU32, DivU32);
break;
-
+ }
case kS390_DivFloat:
// InputDoubleRegister(1)=InputDoubleRegister(0)/InputDoubleRegister(1)
if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
@@ -1467,10 +1728,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kS390_Mod32:
- ASSEMBLE_MODULO(dr, srda);
+ ASSEMBLE_BIN_OP(Mod32, Mod32, Mod32);
break;
case kS390_ModU32:
- ASSEMBLE_MODULO(dlr, srdl);
+ ASSEMBLE_BIN_OP(ModU32, ModU32, ModU32);
break;
#if V8_TARGET_ARCH_S390X
case kS390_Mod64:
@@ -1575,7 +1836,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kS390_Neg32:
__ lcr(i.OutputRegister(), i.InputRegister(0));
- __ LoadW(i.OutputRegister(), i.OutputRegister());
+ CHECK_AND_ZERO_EXT_OUTPUT(1);
break;
case kS390_Neg64:
__ lcgr(i.OutputRegister(), i.InputRegister(0));
@@ -1623,14 +1884,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kS390_Cntlz32: {
__ llgfr(i.OutputRegister(), i.InputRegister(0));
__ flogr(r0, i.OutputRegister());
- __ LoadRR(i.OutputRegister(), r0);
- __ SubP(i.OutputRegister(), Operand(32));
- } break;
+ __ Add32(i.OutputRegister(), r0, Operand(-32));
+ // No need to zero-ext b/c llgfr is done already
+ break;
+ }
#if V8_TARGET_ARCH_S390X
case kS390_Cntlz64: {
__ flogr(r0, i.InputRegister(0));
__ LoadRR(i.OutputRegister(), r0);
- } break;
+ break;
+ }
#endif
case kS390_Popcnt32:
__ Popcnt32(i.OutputRegister(), i.InputRegister(0));
@@ -1641,7 +1904,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
#endif
case kS390_Cmp32:
- ASSEMBLE_COMPARE(Cmp32, CmpLogical32);
+ ASSEMBLE_COMPARE32(Cmp32, CmpLogical32);
break;
#if V8_TARGET_ARCH_S390X
case kS390_Cmp64:
@@ -1649,15 +1912,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
#endif
case kS390_CmpFloat:
- __ cebr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ ASSEMBLE_FLOAT_COMPARE(cebr, ceb, ley);
+ // __ cebr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
break;
case kS390_CmpDouble:
- __ cdbr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ ASSEMBLE_FLOAT_COMPARE(cdbr, cdb, ldy);
+ // __ cdbr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
break;
case kS390_Tst32:
if (HasRegisterInput(instr, 1)) {
- __ lr(r0, i.InputRegister(0));
- __ nr(r0, i.InputRegister(1));
+ __ And(r0, i.InputRegister(0), i.InputRegister(1));
} else {
Operand opnd = i.InputImmediate(1);
if (is_uint16(opnd.immediate())) {
@@ -1731,18 +1995,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kS390_ExtendSignWord8:
-#if V8_TARGET_ARCH_S390X
- __ lgbr(i.OutputRegister(), i.InputRegister(0));
-#else
__ lbr(i.OutputRegister(), i.InputRegister(0));
-#endif
+ CHECK_AND_ZERO_EXT_OUTPUT(1);
break;
case kS390_ExtendSignWord16:
-#if V8_TARGET_ARCH_S390X
- __ lghr(i.OutputRegister(), i.InputRegister(0));
-#else
__ lhr(i.OutputRegister(), i.InputRegister(0));
-#endif
+ CHECK_AND_ZERO_EXT_OUTPUT(1);
break;
#if V8_TARGET_ARCH_S390X
case kS390_ExtendSignWord32:
@@ -1978,6 +2236,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_LOAD_INTEGER(lg);
break;
#endif
+ case kS390_LoadAndTestWord32: {
+ ASSEMBLE_LOADANDTEST32(ltr, lt_z);
+ break;
+ }
+ case kS390_LoadAndTestWord64: {
+ ASSEMBLE_LOADANDTEST64(ltgr, ltg);
+ break;
+ }
case kS390_LoadFloat32:
ASSEMBLE_LOAD_FLOAT(LoadFloat32);
break;
@@ -2013,6 +2279,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kS390_StoreDouble:
ASSEMBLE_STORE_DOUBLE();
break;
+ case kS390_Lay:
+ __ lay(i.OutputRegister(), i.MemoryOperand());
+ break;
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(LoadlB);
#if V8_TARGET_ARCH_S390X
@@ -2138,8 +2407,8 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
void Generate() final {
S390OperandConverter i(gen_, instr_);
- Runtime::FunctionId trap_id = static_cast<Runtime::FunctionId>(
- i.InputInt32(instr_->InputCount() - 1));
+ Builtins::Name trap_id =
+ static_cast<Builtins::Name>(i.InputInt32(instr_->InputCount() - 1));
bool old_has_frame = __ has_frame();
if (frame_elided_) {
__ set_has_frame(true);
@@ -2149,14 +2418,11 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
if (frame_elided_) {
__ set_has_frame(old_has_frame);
}
- if (FLAG_debug_code) {
- __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
- }
}
private:
- void GenerateCallToTrap(Runtime::FunctionId trap_id) {
- if (trap_id == Runtime::kNumFunctions) {
+ void GenerateCallToTrap(Builtins::Name trap_id) {
+ if (trap_id == Builtins::builtin_count) {
// We cannot test calls to the runtime in cctest/test-run-wasm.
// Therefore we emit a call to C here instead of a call to the runtime.
// We use the context register as the scratch register, because we do
@@ -2165,15 +2431,20 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ CallCFunction(
ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
0);
+ __ LeaveFrame(StackFrame::WASM_COMPILED);
+ __ Ret();
} else {
- __ Move(cp, isolate()->native_context());
gen_->AssembleSourcePosition(instr_);
- __ CallRuntime(trap_id);
+ __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
+ RelocInfo::CODE_TARGET);
+ ReferenceMap* reference_map =
+ new (gen_->zone()) ReferenceMap(gen_->zone());
+ gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ if (FLAG_debug_code) {
+ __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
+ }
}
- ReferenceMap* reference_map =
- new (gen_->zone()) ReferenceMap(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
- Safepoint::kNoLazyDeopt);
}
bool frame_elided_;
@@ -2259,16 +2530,19 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
}
CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
- int deoptimization_id, Deoptimizer::BailoutType bailout_type,
- SourcePosition pos) {
+ int deoptimization_id, SourcePosition pos) {
+ DeoptimizeKind deoptimization_kind = GetDeoptimizationKind(deoptimization_id);
+ DeoptimizeReason deoptimization_reason =
+ GetDeoptimizationReason(deoptimization_id);
+ Deoptimizer::BailoutType bailout_type =
+ deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
+ : Deoptimizer::EAGER;
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
// TODO(turbofan): We should be able to generate better code by sharing the
// actual final call site and just bl'ing to it here, similar to what we do
// in the lithium backend.
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
- DeoptimizeReason deoptimization_reason =
- GetDeoptimizationReason(deoptimization_id);
__ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
return kSuccess;
@@ -2432,7 +2706,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
#endif
__ mov(dst, Operand(src.ToInt32(), src.rmode()));
} else {
- __ mov(dst, Operand(src.ToInt32()));
+ __ Load(dst, Operand(src.ToInt32()));
}
break;
case Constant::kInt64:
@@ -2441,7 +2715,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ mov(dst, Operand(src.ToInt64(), src.rmode()));
} else {
DCHECK(!RelocInfo::IsWasmSizeReference(src.rmode()));
- __ mov(dst, Operand(src.ToInt64()));
+ __ Load(dst, Operand(src.ToInt64()));
}
#else
__ mov(dst, Operand(src.ToInt64()));
diff --git a/deps/v8/src/compiler/s390/instruction-codes-s390.h b/deps/v8/src/compiler/s390/instruction-codes-s390.h
index ad5d7cbc74..b99e79f68b 100644
--- a/deps/v8/src/compiler/s390/instruction-codes-s390.h
+++ b/deps/v8/src/compiler/s390/instruction-codes-s390.h
@@ -34,6 +34,7 @@ namespace compiler {
V(S390_RotLeftAndClear64) \
V(S390_RotLeftAndClearLeft64) \
V(S390_RotLeftAndClearRight64) \
+ V(S390_Lay) \
V(S390_Add32) \
V(S390_Add64) \
V(S390_AddPair) \
@@ -46,7 +47,7 @@ namespace compiler {
V(S390_SubPair) \
V(S390_MulPair) \
V(S390_Mul32) \
- V(S390_Mul32WithHigh32) \
+ V(S390_Mul32WithOverflow) \
V(S390_Mul64) \
V(S390_MulHigh32) \
V(S390_MulHighU32) \
@@ -134,6 +135,10 @@ namespace compiler {
V(S390_LoadWordU16) \
V(S390_LoadWordS32) \
V(S390_LoadWordU32) \
+ V(S390_LoadAndTestWord32) \
+ V(S390_LoadAndTestWord64) \
+ V(S390_LoadAndTestFloat32) \
+ V(S390_LoadAndTestFloat64) \
V(S390_LoadReverse16RR) \
V(S390_LoadReverse32RR) \
V(S390_LoadReverse64RR) \
diff --git a/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc b/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc
index 8fc1cfb8be..d6ec3deaab 100644
--- a/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc
+++ b/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc
@@ -35,6 +35,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_RotLeftAndClear64:
case kS390_RotLeftAndClearLeft64:
case kS390_RotLeftAndClearRight64:
+ case kS390_Lay:
case kS390_Add32:
case kS390_Add64:
case kS390_AddPair:
@@ -47,7 +48,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_SubFloat:
case kS390_SubDouble:
case kS390_Mul32:
- case kS390_Mul32WithHigh32:
+ case kS390_Mul32WithOverflow:
case kS390_Mul64:
case kS390_MulHigh32:
case kS390_MulHighU32:
@@ -129,6 +130,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_LoadReverse16RR:
case kS390_LoadReverse32RR:
case kS390_LoadReverse64RR:
+ case kS390_LoadAndTestWord32:
+ case kS390_LoadAndTestWord64:
+ case kS390_LoadAndTestFloat32:
+ case kS390_LoadAndTestFloat64:
return kNoOpcodeFlags;
case kS390_LoadWordS8:
diff --git a/deps/v8/src/compiler/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
index d906c17fbe..e591d3caeb 100644
--- a/deps/v8/src/compiler/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
@@ -12,29 +12,85 @@ namespace v8 {
namespace internal {
namespace compiler {
-enum ImmediateMode {
- kShift32Imm,
- kShift64Imm,
- kInt32Imm,
- kInt32Imm_Negate,
- kUint32Imm,
- kInt20Imm,
- kNoImmediate
+enum class OperandMode : uint32_t {
+ kNone = 0u,
+ // Immediate mode
+ kShift32Imm = 1u << 0,
+ kShift64Imm = 1u << 1,
+ kInt32Imm = 1u << 2,
+ kInt32Imm_Negate = 1u << 3,
+ kUint32Imm = 1u << 4,
+ kInt20Imm = 1u << 5,
+ kUint12Imm = 1u << 6,
+ // Instr format
+ kAllowRRR = 1u << 7,
+ kAllowRM = 1u << 8,
+ kAllowRI = 1u << 9,
+ kAllowRRI = 1u << 10,
+ kAllowRRM = 1u << 11,
+ // Useful combination
+ kAllowImmediate = kAllowRI | kAllowRRI,
+ kAllowMemoryOperand = kAllowRM | kAllowRRM,
+ kAllowDistinctOps = kAllowRRR | kAllowRRI | kAllowRRM,
+ kBitWiseCommonMode = kAllowRI,
+ kArithmeticCommonMode = kAllowRM | kAllowRI
};
+typedef base::Flags<OperandMode, uint32_t> OperandModes;
+DEFINE_OPERATORS_FOR_FLAGS(OperandModes);
+OperandModes immediateModeMask =
+ OperandMode::kShift32Imm | OperandMode::kShift64Imm |
+ OperandMode::kInt32Imm | OperandMode::kInt32Imm_Negate |
+ OperandMode::kUint32Imm | OperandMode::kInt20Imm;
+
+#define AndOperandMode \
+ ((OperandMode::kBitWiseCommonMode | OperandMode::kUint32Imm | \
+ OperandMode::kAllowRM | (CpuFeatures::IsSupported(DISTINCT_OPS) \
+ ? OperandMode::kAllowRRR \
+ : OperandMode::kBitWiseCommonMode)))
+
+#define OrOperandMode AndOperandMode
+#define XorOperandMode AndOperandMode
+
+#define ShiftOperandMode \
+ ((OperandMode::kBitWiseCommonMode | OperandMode::kShift64Imm | \
+ (CpuFeatures::IsSupported(DISTINCT_OPS) \
+ ? OperandMode::kAllowRRR \
+ : OperandMode::kBitWiseCommonMode)))
+
+#define AddOperandMode \
+ ((OperandMode::kArithmeticCommonMode | OperandMode::kInt32Imm | \
+ (CpuFeatures::IsSupported(DISTINCT_OPS) \
+ ? (OperandMode::kAllowRRR | OperandMode::kAllowRRI) \
+ : OperandMode::kArithmeticCommonMode)))
+#define SubOperandMode \
+ ((OperandMode::kArithmeticCommonMode | OperandMode::kInt32Imm_Negate | \
+ (CpuFeatures::IsSupported(DISTINCT_OPS) \
+ ? (OperandMode::kAllowRRR | OperandMode::kAllowRRI) \
+ : OperandMode::kArithmeticCommonMode)))
+#define MulOperandMode \
+ (OperandMode::kArithmeticCommonMode | OperandMode::kInt32Imm)
+
// Adds S390-specific methods for generating operands.
class S390OperandGenerator final : public OperandGenerator {
public:
explicit S390OperandGenerator(InstructionSelector* selector)
: OperandGenerator(selector) {}
- InstructionOperand UseOperand(Node* node, ImmediateMode mode) {
+ InstructionOperand UseOperand(Node* node, OperandModes mode) {
if (CanBeImmediate(node, mode)) {
return UseImmediate(node);
}
return UseRegister(node);
}
+ InstructionOperand UseAnyExceptImmediate(Node* node) {
+ if (NodeProperties::IsConstant(node))
+ return UseRegister(node);
+ else
+ return Use(node);
+ }
+
int64_t GetImmediate(Node* node) {
if (node->opcode() == IrOpcode::kInt32Constant)
return OpParameter<int32_t>(node);
@@ -45,7 +101,7 @@ class S390OperandGenerator final : public OperandGenerator {
return 0L;
}
- bool CanBeImmediate(Node* node, ImmediateMode mode) {
+ bool CanBeImmediate(Node* node, OperandModes mode) {
int64_t value;
if (node->opcode() == IrOpcode::kInt32Constant)
value = OpParameter<int32_t>(node);
@@ -56,22 +112,47 @@ class S390OperandGenerator final : public OperandGenerator {
return CanBeImmediate(value, mode);
}
- bool CanBeImmediate(int64_t value, ImmediateMode mode) {
- switch (mode) {
- case kShift32Imm:
- return 0 <= value && value < 32;
- case kShift64Imm:
- return 0 <= value && value < 64;
- case kInt32Imm:
- return is_int32(value);
- case kInt32Imm_Negate:
- return is_int32(-value);
- case kUint32Imm:
- return is_uint32(value);
- case kInt20Imm:
- return is_int20(value);
- case kNoImmediate:
- return false;
+ bool CanBeImmediate(int64_t value, OperandModes mode) {
+ if (mode & OperandMode::kShift32Imm)
+ return 0 <= value && value < 32;
+ else if (mode & OperandMode::kShift64Imm)
+ return 0 <= value && value < 64;
+ else if (mode & OperandMode::kInt32Imm)
+ return is_int32(value);
+ else if (mode & OperandMode::kInt32Imm_Negate)
+ return is_int32(-value);
+ else if (mode & OperandMode::kUint32Imm)
+ return is_uint32(value);
+ else if (mode & OperandMode::kInt20Imm)
+ return is_int20(value);
+ else if (mode & OperandMode::kUint12Imm)
+ return is_uint12(value);
+ else
+ return false;
+ }
+
+ bool CanBeMemoryOperand(InstructionCode opcode, Node* user, Node* input,
+ int effect_level) {
+ if (input->opcode() != IrOpcode::kLoad ||
+ !selector()->CanCover(user, input)) {
+ return false;
+ }
+
+ if (effect_level != selector()->GetEffectLevel(input)) {
+ return false;
+ }
+
+ MachineRepresentation rep =
+ LoadRepresentationOf(input->op()).representation();
+ switch (opcode) {
+ case kS390_Cmp64:
+ case kS390_LoadAndTestWord64:
+ return rep == MachineRepresentation::kWord64 || IsAnyTagged(rep);
+ case kS390_LoadAndTestWord32:
+ case kS390_Cmp32:
+ return rep == MachineRepresentation::kWord32;
+ default:
+ break;
}
return false;
}
@@ -119,9 +200,9 @@ class S390OperandGenerator final : public OperandGenerator {
return mode;
}
- AddressingMode GetEffectiveAddressMemoryOperand(Node* operand,
- InstructionOperand inputs[],
- size_t* input_count) {
+ AddressingMode GetEffectiveAddressMemoryOperand(
+ Node* operand, InstructionOperand inputs[], size_t* input_count,
+ OperandModes immediate_mode = OperandMode::kInt20Imm) {
#if V8_TARGET_ARCH_S390X
BaseWithIndexAndDisplacement64Matcher m(operand,
AddressOption::kAllowInputSwap);
@@ -131,7 +212,7 @@ class S390OperandGenerator final : public OperandGenerator {
#endif
DCHECK(m.matches());
if ((m.displacement() == nullptr ||
- CanBeImmediate(m.displacement(), kInt20Imm))) {
+ CanBeImmediate(m.displacement(), immediate_mode))) {
DCHECK(m.scale() == 0);
return GenerateMemoryOperandInputs(m.index(), m.base(), m.displacement(),
m.displacement_mode(), inputs,
@@ -158,6 +239,153 @@ class S390OperandGenerator final : public OperandGenerator {
namespace {
+bool S390OpcodeOnlySupport12BitDisp(ArchOpcode opcode) {
+ switch (opcode) {
+ case kS390_CmpFloat:
+ case kS390_CmpDouble:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool S390OpcodeOnlySupport12BitDisp(InstructionCode op) {
+ ArchOpcode opcode = ArchOpcodeField::decode(op);
+ return S390OpcodeOnlySupport12BitDisp(opcode);
+}
+
+#define OpcodeImmMode(op) \
+ (S390OpcodeOnlySupport12BitDisp(op) ? OperandMode::kUint12Imm \
+ : OperandMode::kInt20Imm)
+
+ArchOpcode SelectLoadOpcode(Node* node) {
+ NodeMatcher m(node);
+ DCHECK(m.IsLoad());
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kFloat32:
+ opcode = kS390_LoadFloat32;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kS390_LoadDouble;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kS390_LoadWordS8 : kS390_LoadWordU8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kS390_LoadWordS16 : kS390_LoadWordU16;
+ break;
+#if !V8_TARGET_ARCH_S390X
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+#endif
+ case MachineRepresentation::kWord32:
+ opcode = kS390_LoadWordU32;
+ break;
+#if V8_TARGET_ARCH_S390X
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
+ opcode = kS390_LoadWord64;
+ break;
+#else
+ case MachineRepresentation::kWord64: // Fall through.
+#endif
+ case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kSimd1x4: // Fall through.
+ case MachineRepresentation::kSimd1x8: // Fall through.
+ case MachineRepresentation::kSimd1x16: // Fall through.
+ case MachineRepresentation::kNone:
+ default:
+ UNREACHABLE();
+ }
+ return opcode;
+}
+
+bool AutoZeroExtendsWord32ToWord64(Node* node) {
+#if !V8_TARGET_ARCH_S390X
+ return true;
+#else
+ switch (node->opcode()) {
+ case IrOpcode::kInt32Div:
+ case IrOpcode::kUint32Div:
+ case IrOpcode::kInt32MulHigh:
+ case IrOpcode::kUint32MulHigh:
+ case IrOpcode::kInt32Mod:
+ case IrOpcode::kUint32Mod:
+ case IrOpcode::kWord32Clz:
+ case IrOpcode::kWord32Popcnt:
+ return true;
+ default:
+ return false;
+ }
+ return false;
+#endif
+}
+
+bool ZeroExtendsWord32ToWord64(Node* node) {
+#if !V8_TARGET_ARCH_S390X
+ return true;
+#else
+ switch (node->opcode()) {
+ case IrOpcode::kInt32Add:
+ case IrOpcode::kInt32Sub:
+ case IrOpcode::kWord32And:
+ case IrOpcode::kWord32Or:
+ case IrOpcode::kWord32Xor:
+ case IrOpcode::kWord32Shl:
+ case IrOpcode::kWord32Shr:
+ case IrOpcode::kWord32Sar:
+ case IrOpcode::kInt32Mul:
+ case IrOpcode::kWord32Ror:
+ case IrOpcode::kInt32Div:
+ case IrOpcode::kUint32Div:
+ case IrOpcode::kInt32MulHigh:
+ case IrOpcode::kInt32Mod:
+ case IrOpcode::kUint32Mod:
+ case IrOpcode::kWord32Popcnt:
+ return true;
+ // TODO(john.yan): consider the following case to be valid
+ // case IrOpcode::kWord32Equal:
+ // case IrOpcode::kInt32LessThan:
+ // case IrOpcode::kInt32LessThanOrEqual:
+ // case IrOpcode::kUint32LessThan:
+ // case IrOpcode::kUint32LessThanOrEqual:
+ // case IrOpcode::kUint32MulHigh:
+ // // These 32-bit operations implicitly zero-extend to 64-bit on x64, so
+ // the
+ // // zero-extension is a no-op.
+ // return true;
+ // case IrOpcode::kProjection: {
+ // Node* const value = node->InputAt(0);
+ // switch (value->opcode()) {
+ // case IrOpcode::kInt32AddWithOverflow:
+ // case IrOpcode::kInt32SubWithOverflow:
+ // case IrOpcode::kInt32MulWithOverflow:
+ // return true;
+ // default:
+ // return false;
+ // }
+ // }
+ case IrOpcode::kLoad: {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord32:
+ return true;
+ default:
+ return false;
+ }
+ }
+ default:
+ return false;
+ }
+#endif
+}
+
void VisitRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
S390OperandGenerator g(selector);
selector->Emit(opcode, g.DefineAsRegister(node),
@@ -171,15 +399,15 @@ void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
g.UseRegister(node->InputAt(1)));
}
+#if V8_TARGET_ARCH_S390X
void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
- ImmediateMode operand_mode) {
+ OperandModes operand_mode) {
S390OperandGenerator g(selector);
selector->Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)),
g.UseOperand(node->InputAt(1), operand_mode));
}
-#if V8_TARGET_ARCH_S390X
void VisitTryTruncateDouble(InstructionSelector* selector, ArchOpcode opcode,
Node* node) {
S390OperandGenerator g(selector);
@@ -200,7 +428,7 @@ void VisitTryTruncateDouble(InstructionSelector* selector, ArchOpcode opcode,
// Shared routine for multiple binary operations.
template <typename Matcher>
void VisitBinop(InstructionSelector* selector, Node* node,
- InstructionCode opcode, ImmediateMode operand_mode,
+ InstructionCode opcode, OperandModes operand_mode,
FlagsContinuation* cont) {
S390OperandGenerator g(selector);
Matcher m(node);
@@ -260,7 +488,7 @@ void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->frame_state());
} else if (cont->IsTrap()) {
inputs[input_count++] = g.UseImmediate(cont->trap_id());
selector->Emit(opcode, output_count, outputs, input_count, inputs);
@@ -272,54 +500,152 @@ void VisitBinop(InstructionSelector* selector, Node* node,
// Shared routine for multiple binary operations.
template <typename Matcher>
void VisitBinop(InstructionSelector* selector, Node* node, ArchOpcode opcode,
- ImmediateMode operand_mode) {
+ OperandModes operand_mode) {
FlagsContinuation cont;
VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
}
-} // namespace
+void VisitBin32op(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, OperandModes operand_mode,
+ FlagsContinuation* cont) {
+ S390OperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ Node* left = m.left().node();
+ Node* right = m.right().node();
+ InstructionOperand inputs[8];
+ size_t input_count = 0;
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+
+ // match left of TruncateInt64ToInt32
+ if (m.left().IsTruncateInt64ToInt32() && selector->CanCover(node, left)) {
+ left = left->InputAt(0);
+ }
+ // match right of TruncateInt64ToInt32
+ if (m.right().IsTruncateInt64ToInt32() && selector->CanCover(node, right)) {
+ right = right->InputAt(0);
+ }
-void InstructionSelector::VisitLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- S390OperandGenerator g(this);
- ArchOpcode opcode = kArchNop;
- switch (load_rep.representation()) {
- case MachineRepresentation::kFloat32:
- opcode = kS390_LoadFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kS390_LoadDouble;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kS390_LoadWordS8 : kS390_LoadWordU8;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kS390_LoadWordS16 : kS390_LoadWordU16;
- break;
-#if !V8_TARGET_ARCH_S390X
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
-#endif
- case MachineRepresentation::kWord32:
- opcode = kS390_LoadWordU32;
- break;
#if V8_TARGET_ARCH_S390X
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord64:
- opcode = kS390_LoadWord64;
- break;
+ if ((ZeroExtendsWord32ToWord64(right) || g.CanBeBetterLeftOperand(right)) &&
+ node->op()->HasProperty(Operator::kCommutative) &&
+ !g.CanBeImmediate(right, operand_mode)) {
+ std::swap(left, right);
+ }
#else
- case MachineRepresentation::kWord64: // Fall through.
+ if (node->op()->HasProperty(Operator::kCommutative) &&
+ !g.CanBeImmediate(right, operand_mode) &&
+ (g.CanBeBetterLeftOperand(right))) {
+ std::swap(left, right);
+ }
#endif
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kNone:
+
+ // left is always register
+ InstructionOperand const left_input = g.UseRegister(left);
+ inputs[input_count++] = left_input;
+
+ // TODO(turbofan): match complex addressing modes.
+ if (left == right) {
+ // If both inputs refer to the same operand, enforce allocating a register
+ // for both of them to ensure that we don't end up generating code like
+ // this:
+ //
+ // mov rax, [rbp-0x10]
+ // add rax, [rbp-0x10]
+ // jo label
+ inputs[input_count++] = left_input;
+ // Can only be RR or RRR
+ operand_mode &= OperandMode::kAllowRRR;
+ } else if ((operand_mode & OperandMode::kAllowImmediate) &&
+ g.CanBeImmediate(right, operand_mode)) {
+ inputs[input_count++] = g.UseImmediate(right);
+ // Can only be RI or RRI
+ operand_mode &= OperandMode::kAllowImmediate;
+ } else if (operand_mode & OperandMode::kAllowMemoryOperand) {
+ NodeMatcher mright(right);
+ if (mright.IsLoad() && selector->CanCover(node, right) &&
+ SelectLoadOpcode(right) == kS390_LoadWordU32) {
+ AddressingMode mode =
+ g.GetEffectiveAddressMemoryOperand(right, inputs, &input_count);
+ opcode |= AddressingModeField::encode(mode);
+ operand_mode &= ~OperandMode::kAllowImmediate;
+ if (operand_mode & OperandMode::kAllowRM)
+ operand_mode &= ~OperandMode::kAllowDistinctOps;
+ } else if (operand_mode & OperandMode::kAllowRM) {
+ DCHECK(!(operand_mode & OperandMode::kAllowRRM));
+ inputs[input_count++] = g.Use(right);
+ // Can not be Immediate
+ operand_mode &=
+ ~OperandMode::kAllowImmediate & ~OperandMode::kAllowDistinctOps;
+ } else if (operand_mode & OperandMode::kAllowRRM) {
+ DCHECK(!(operand_mode & OperandMode::kAllowRM));
+ inputs[input_count++] = g.Use(right);
+ // Can not be Immediate
+ operand_mode &= ~OperandMode::kAllowImmediate;
+ } else {
UNREACHABLE();
- return;
+ }
+ } else {
+ inputs[input_count++] = g.UseRegister(right);
+ // Can only be RR or RRR
+ operand_mode &= OperandMode::kAllowRRR;
}
+
+ bool doZeroExt =
+ AutoZeroExtendsWord32ToWord64(node) || !ZeroExtendsWord32ToWord64(left);
+
+ inputs[input_count++] =
+ g.TempImmediate(doZeroExt && (!AutoZeroExtendsWord32ToWord64(node)));
+
+ if (cont->IsBranch()) {
+ inputs[input_count++] = g.Label(cont->true_block());
+ inputs[input_count++] = g.Label(cont->false_block());
+ }
+
+ if (doZeroExt && (operand_mode & OperandMode::kAllowDistinctOps) &&
+ // If we can deoptimize as a result of the binop, we need to make sure
+ // that
+ // the deopt inputs are not overwritten by the binop result. One way
+ // to achieve that is to declare the output register as same-as-first.
+ !cont->IsDeoptimize()) {
+ outputs[output_count++] = g.DefineAsRegister(node);
+ } else {
+ outputs[output_count++] = g.DefineSameAsFirst(node);
+ }
+
+ if (cont->IsSet()) {
+ outputs[output_count++] = g.DefineAsRegister(cont->result());
+ }
+
+ DCHECK_NE(0u, input_count);
+ DCHECK_NE(0u, output_count);
+ DCHECK_GE(arraysize(inputs), input_count);
+ DCHECK_GE(arraysize(outputs), output_count);
+
+ opcode = cont->Encode(opcode);
+
+ if (cont->IsDeoptimize()) {
+ selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
+ cont->kind(), cont->reason(), cont->frame_state());
+ } else if (cont->IsTrap()) {
+ inputs[input_count++] = g.UseImmediate(cont->trap_id());
+ selector->Emit(opcode, output_count, outputs, input_count, inputs);
+ } else {
+ selector->Emit(opcode, output_count, outputs, input_count, inputs);
+ }
+}
+
+void VisitBin32op(InstructionSelector* selector, Node* node, ArchOpcode opcode,
+ OperandModes operand_mode) {
+ FlagsContinuation cont;
+ VisitBin32op(selector, node, opcode, operand_mode, &cont);
+}
+
+} // namespace
+
+void InstructionSelector::VisitLoad(Node* node) {
+ S390OperandGenerator g(this);
+ ArchOpcode opcode = SelectLoadOpcode(node);
InstructionOperand outputs[1];
outputs[0] = g.DefineAsRegister(node);
InstructionOperand inputs[3];
@@ -353,7 +679,7 @@ void InstructionSelector::VisitStore(Node* node) {
inputs[input_count++] = g.UseUniqueRegister(base);
// OutOfLineRecordWrite uses the offset in an 'AddP' instruction as well as
// for the store itself, so we must check compatibility with both.
- if (g.CanBeImmediate(offset, kInt20Imm)) {
+ if (g.CanBeImmediate(offset, OperandMode::kInt20Imm)) {
inputs[input_count++] = g.UseImmediate(offset);
addressing_mode = kMode_MRI;
} else {
@@ -426,6 +752,9 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kWord64: // Fall through.
#endif
case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kSimd1x4: // Fall through.
+ case MachineRepresentation::kSimd1x8: // Fall through.
+ case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -490,6 +819,9 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
case MachineRepresentation::kWord64: // Fall through.
#endif
case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kSimd1x4: // Fall through.
+ case MachineRepresentation::kSimd1x8: // Fall through.
+ case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -497,7 +829,7 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
AddressingMode addressingMode = kMode_MRR;
Emit(opcode | AddressingModeField::encode(addressingMode),
g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset),
- g.UseOperand(length, kUint32Imm));
+ g.UseOperand(length, OperandMode::kUint32Imm));
}
void InstructionSelector::VisitCheckedStore(Node* node) {
@@ -537,6 +869,9 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
case MachineRepresentation::kWord64: // Fall through.
#endif
case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kSimd1x4: // Fall through.
+ case MachineRepresentation::kSimd1x8: // Fall through.
+ case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -544,7 +879,7 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
AddressingMode addressingMode = kMode_MRR;
Emit(opcode | AddressingModeField::encode(addressingMode), g.NoOutput(),
g.UseRegister(base), g.UseRegister(offset),
- g.UseOperand(length, kUint32Imm), g.UseRegister(value));
+ g.UseOperand(length, OperandMode::kUint32Imm), g.UseRegister(value));
}
#if 0
@@ -574,7 +909,7 @@ static inline bool IsContiguousMask64(uint64_t value, int* mb, int* me) {
#endif
void InstructionSelector::VisitWord32And(Node* node) {
- VisitBinop<Int32BinopMatcher>(this, node, kS390_And32, kUint32Imm);
+ VisitBin32op(this, node, kS390_And32, AndOperandMode);
}
#if V8_TARGET_ARCH_S390X
@@ -626,46 +961,36 @@ void InstructionSelector::VisitWord64And(Node* node) {
}
}
}
- VisitBinop<Int64BinopMatcher>(this, node, kS390_And64, kUint32Imm);
+ VisitBinop<Int64BinopMatcher>(this, node, kS390_And64,
+ OperandMode::kUint32Imm);
}
#endif
void InstructionSelector::VisitWord32Or(Node* node) {
- Int32BinopMatcher m(node);
- VisitBinop<Int32BinopMatcher>(this, node, kS390_Or32, kUint32Imm);
+ VisitBin32op(this, node, kS390_Or32, OrOperandMode);
}
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitWord64Or(Node* node) {
Int64BinopMatcher m(node);
- VisitBinop<Int64BinopMatcher>(this, node, kS390_Or64, kUint32Imm);
+ VisitBinop<Int64BinopMatcher>(this, node, kS390_Or64,
+ OperandMode::kUint32Imm);
}
#endif
void InstructionSelector::VisitWord32Xor(Node* node) {
- S390OperandGenerator g(this);
- Int32BinopMatcher m(node);
- if (m.right().Is(-1)) {
- Emit(kS390_Not32, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
- } else {
- VisitBinop<Int32BinopMatcher>(this, node, kS390_Xor32, kUint32Imm);
- }
+ VisitBin32op(this, node, kS390_Xor32, XorOperandMode);
}
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitWord64Xor(Node* node) {
- S390OperandGenerator g(this);
- Int64BinopMatcher m(node);
- if (m.right().Is(-1)) {
- Emit(kS390_Not64, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
- } else {
- VisitBinop<Int64BinopMatcher>(this, node, kS390_Xor64, kUint32Imm);
- }
+ VisitBinop<Int64BinopMatcher>(this, node, kS390_Xor64,
+ OperandMode::kUint32Imm);
}
#endif
void InstructionSelector::VisitWord32Shl(Node* node) {
- VisitRRO(this, kS390_ShiftLeft32, node, kShift32Imm);
+ VisitBin32op(this, node, kS390_ShiftLeft32, ShiftOperandMode);
}
#if V8_TARGET_ARCH_S390X
@@ -708,12 +1033,12 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
}
}
}
- VisitRRO(this, kS390_ShiftLeft64, node, kShift64Imm);
+ VisitRRO(this, kS390_ShiftLeft64, node, OperandMode::kShift64Imm);
}
#endif
void InstructionSelector::VisitWord32Shr(Node* node) {
- VisitRRO(this, kS390_ShiftRight32, node, kShift32Imm);
+ VisitBin32op(this, node, kS390_ShiftRight32, ShiftOperandMode);
}
#if V8_TARGET_ARCH_S390X
@@ -752,7 +1077,7 @@ void InstructionSelector::VisitWord64Shr(Node* node) {
}
}
}
- VisitRRO(this, kS390_ShiftRight64, node, kShift64Imm);
+ VisitRRO(this, kS390_ShiftRight64, node, OperandMode::kShift64Imm);
}
#endif
@@ -763,16 +1088,20 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
if (CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().Is(16) && m.right().Is(16)) {
- Emit(kS390_ExtendSignWord16, g.DefineAsRegister(node),
- g.UseRegister(mleft.left().node()));
+ bool doZeroExt = !ZeroExtendsWord32ToWord64(mleft.left().node());
+ Emit(kS390_ExtendSignWord16,
+ doZeroExt ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(doZeroExt));
return;
} else if (mleft.right().Is(24) && m.right().Is(24)) {
- Emit(kS390_ExtendSignWord8, g.DefineAsRegister(node),
- g.UseRegister(mleft.left().node()));
+ bool doZeroExt = !ZeroExtendsWord32ToWord64(mleft.left().node());
+ Emit(kS390_ExtendSignWord8,
+ doZeroExt ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(doZeroExt));
return;
}
}
- VisitRRO(this, kS390_ShiftRightArith32, node, kShift32Imm);
+ VisitBin32op(this, node, kS390_ShiftRightArith32, ShiftOperandMode);
}
#if !V8_TARGET_ARCH_S390X
@@ -798,7 +1127,7 @@ void VisitPairBinop(InstructionSelector* selector, InstructionCode opcode,
// instruction.
selector->Emit(opcode2, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)),
- g.UseRegister(node->InputAt(2)));
+ g.UseRegister(node->InputAt(2)), g.TempImmediate(0));
}
}
@@ -828,7 +1157,8 @@ void InstructionSelector::VisitInt32PairMul(Node* node) {
// The high word of the result is not used, so we emit the standard 32 bit
// instruction.
Emit(kS390_Mul32, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(2)));
+ g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(2)),
+ g.TempImmediate(0));
}
}
@@ -884,24 +1214,25 @@ void InstructionSelector::VisitWord32PairSar(Node* node) {
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitWord64Sar(Node* node) {
- VisitRRO(this, kS390_ShiftRightArith64, node, kShift64Imm);
+ VisitRRO(this, kS390_ShiftRightArith64, node, OperandMode::kShift64Imm);
}
#endif
void InstructionSelector::VisitWord32Ror(Node* node) {
- VisitRRO(this, kS390_RotRight32, node, kShift32Imm);
+ // TODO(john): match dst = ror(src1, src2 + imm)
+ VisitBin32op(this, node, kS390_RotRight32,
+ OperandMode::kAllowRI | OperandMode::kAllowRRR |
+ OperandMode::kAllowRRI | OperandMode::kShift32Imm);
}
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitWord64Ror(Node* node) {
- VisitRRO(this, kS390_RotRight64, node, kShift64Imm);
+ VisitRRO(this, kS390_RotRight64, node, OperandMode::kShift64Imm);
}
#endif
void InstructionSelector::VisitWord32Clz(Node* node) {
- S390OperandGenerator g(this);
- Emit(kS390_Cntlz32, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
+ VisitRR(this, kS390_Cntlz32, node);
}
#if V8_TARGET_ARCH_S390X
@@ -914,8 +1245,8 @@ void InstructionSelector::VisitWord64Clz(Node* node) {
void InstructionSelector::VisitWord32Popcnt(Node* node) {
S390OperandGenerator g(this);
- Emit(kS390_Popcnt32, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
+ Node* value = node->InputAt(0);
+ Emit(kS390_Popcnt32, g.DefineAsRegister(node), g.UseRegister(value));
}
#if V8_TARGET_ARCH_S390X
@@ -964,12 +1295,13 @@ void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
}
void InstructionSelector::VisitInt32Add(Node* node) {
- VisitBinop<Int32BinopMatcher>(this, node, kS390_Add32, kInt32Imm);
+ VisitBin32op(this, node, kS390_Add32, AddOperandMode);
}
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitInt64Add(Node* node) {
- VisitBinop<Int64BinopMatcher>(this, node, kS390_Add64, kInt32Imm);
+ VisitBinop<Int64BinopMatcher>(this, node, kS390_Add64,
+ OperandMode::kInt32Imm);
}
#endif
@@ -977,10 +1309,12 @@ void InstructionSelector::VisitInt32Sub(Node* node) {
S390OperandGenerator g(this);
Int32BinopMatcher m(node);
if (m.left().Is(0)) {
- Emit(kS390_Neg32, g.DefineAsRegister(node),
- g.UseRegister(m.right().node()));
+ Node* right = m.right().node();
+ bool doZeroExt = ZeroExtendsWord32ToWord64(right);
+ Emit(kS390_Neg32, g.DefineAsRegister(node), g.UseRegister(right),
+ g.TempImmediate(doZeroExt));
} else {
- VisitBinop<Int32BinopMatcher>(this, node, kS390_Sub32, kInt32Imm_Negate);
+ VisitBin32op(this, node, kS390_Sub32, SubOperandMode);
}
}
@@ -992,7 +1326,8 @@ void InstructionSelector::VisitInt64Sub(Node* node) {
Emit(kS390_Neg64, g.DefineAsRegister(node),
g.UseRegister(m.right().node()));
} else {
- VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub64, kInt32Imm_Negate);
+ VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub64,
+ OperandMode::kInt32Imm_Negate);
}
}
#endif
@@ -1002,35 +1337,14 @@ namespace {
void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
InstructionOperand left, InstructionOperand right,
FlagsContinuation* cont);
-void EmitInt32MulWithOverflow(InstructionSelector* selector, Node* node,
- FlagsContinuation* cont) {
- S390OperandGenerator g(selector);
- Int32BinopMatcher m(node);
- InstructionOperand result_operand = g.DefineAsRegister(node);
- InstructionOperand high32_operand = g.TempRegister();
- InstructionOperand temp_operand = g.TempRegister();
- {
- InstructionOperand outputs[] = {result_operand, high32_operand};
- InstructionOperand inputs[] = {g.UseRegister(m.left().node()),
- g.UseRegister(m.right().node())};
- selector->Emit(kS390_Mul32WithHigh32, 2, outputs, 2, inputs);
- }
- {
- InstructionOperand shift_31 = g.UseImmediate(31);
- InstructionOperand outputs[] = {temp_operand};
- InstructionOperand inputs[] = {result_operand, shift_31};
- selector->Emit(kS390_ShiftRightArith32, 1, outputs, 2, inputs);
- }
-
- VisitCompare(selector, kS390_Cmp32, high32_operand, temp_operand, cont);
-}
+#if V8_TARGET_ARCH_S390X
void VisitMul(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
S390OperandGenerator g(selector);
Int32BinopMatcher m(node);
Node* left = m.left().node();
Node* right = m.right().node();
- if (g.CanBeImmediate(right, kInt32Imm)) {
+ if (g.CanBeImmediate(right, OperandMode::kInt32Imm)) {
selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
g.UseImmediate(right));
} else {
@@ -1041,17 +1355,18 @@ void VisitMul(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
g.Use(right));
}
}
+#endif
} // namespace
void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, ovf);
- return EmitInt32MulWithOverflow(this, node, &cont);
+ return VisitBin32op(this, node, kS390_Mul32WithOverflow,
+ OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps,
+ &cont);
}
- VisitMul(this, node, kS390_Mul32);
- // FlagsContinuation cont;
- // EmitInt32MulWithOverflow(this, node, &cont);
+ VisitBin32op(this, node, kS390_Mul32, MulOperandMode);
}
void InstructionSelector::VisitInt32Mul(Node* node) {
@@ -1059,14 +1374,20 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
Int32BinopMatcher m(node);
Node* left = m.left().node();
Node* right = m.right().node();
- if (g.CanBeImmediate(right, kInt32Imm) &&
+ if (g.CanBeImmediate(right, OperandMode::kInt32Imm) &&
base::bits::IsPowerOfTwo32(g.GetImmediate(right))) {
int power = 31 - base::bits::CountLeadingZeros32(g.GetImmediate(right));
- Emit(kS390_ShiftLeft32, g.DefineSameAsFirst(node), g.UseRegister(left),
- g.UseImmediate(power));
+ bool doZeroExt = !ZeroExtendsWord32ToWord64(left);
+ InstructionOperand dst =
+ (doZeroExt && CpuFeatures::IsSupported(DISTINCT_OPS))
+ ? g.DefineAsRegister(node)
+ : g.DefineSameAsFirst(node);
+
+ Emit(kS390_ShiftLeft32, dst, g.UseRegister(left), g.UseImmediate(power),
+ g.TempImmediate(doZeroExt));
return;
}
- VisitMul(this, node, kS390_Mul32);
+ VisitBin32op(this, node, kS390_Mul32, MulOperandMode);
}
#if V8_TARGET_ARCH_S390X
@@ -1075,7 +1396,7 @@ void InstructionSelector::VisitInt64Mul(Node* node) {
Int64BinopMatcher m(node);
Node* left = m.left().node();
Node* right = m.right().node();
- if (g.CanBeImmediate(right, kInt32Imm) &&
+ if (g.CanBeImmediate(right, OperandMode::kInt32Imm) &&
base::bits::IsPowerOfTwo64(g.GetImmediate(right))) {
int power = 63 - base::bits::CountLeadingZeros64(g.GetImmediate(right));
Emit(kS390_ShiftLeft64, g.DefineSameAsFirst(node), g.UseRegister(left),
@@ -1087,31 +1408,18 @@ void InstructionSelector::VisitInt64Mul(Node* node) {
#endif
void InstructionSelector::VisitInt32MulHigh(Node* node) {
- S390OperandGenerator g(this);
- Int32BinopMatcher m(node);
- Node* left = m.left().node();
- Node* right = m.right().node();
- if (g.CanBeBetterLeftOperand(right)) {
- std::swap(left, right);
- }
- Emit(kS390_MulHigh32, g.DefineAsRegister(node), g.UseRegister(left),
- g.Use(right));
+ VisitBin32op(this, node, kS390_MulHigh32,
+ OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps);
}
void InstructionSelector::VisitUint32MulHigh(Node* node) {
- S390OperandGenerator g(this);
- Int32BinopMatcher m(node);
- Node* left = m.left().node();
- Node* right = m.right().node();
- if (g.CanBeBetterLeftOperand(right)) {
- std::swap(left, right);
- }
- Emit(kS390_MulHighU32, g.DefineAsRegister(node), g.UseRegister(left),
- g.Use(right));
+ VisitBin32op(this, node, kS390_MulHighU32,
+ OperandMode::kAllowRRM | OperandMode::kAllowRRR);
}
void InstructionSelector::VisitInt32Div(Node* node) {
- VisitRRR(this, kS390_Div32, node);
+ VisitBin32op(this, node, kS390_Div32,
+ OperandMode::kAllowRRM | OperandMode::kAllowRRR);
}
#if V8_TARGET_ARCH_S390X
@@ -1121,7 +1429,8 @@ void InstructionSelector::VisitInt64Div(Node* node) {
#endif
void InstructionSelector::VisitUint32Div(Node* node) {
- VisitRRR(this, kS390_DivU32, node);
+ VisitBin32op(this, node, kS390_DivU32,
+ OperandMode::kAllowRRM | OperandMode::kAllowRRR);
}
#if V8_TARGET_ARCH_S390X
@@ -1131,7 +1440,8 @@ void InstructionSelector::VisitUint64Div(Node* node) {
#endif
void InstructionSelector::VisitInt32Mod(Node* node) {
- VisitRRR(this, kS390_Mod32, node);
+ VisitBin32op(this, node, kS390_Mod32,
+ OperandMode::kAllowRRM | OperandMode::kAllowRRR);
}
#if V8_TARGET_ARCH_S390X
@@ -1141,7 +1451,8 @@ void InstructionSelector::VisitInt64Mod(Node* node) {
#endif
void InstructionSelector::VisitUint32Mod(Node* node) {
- VisitRRR(this, kS390_ModU32, node);
+ VisitBin32op(this, node, kS390_ModU32,
+ OperandMode::kAllowRRM | OperandMode::kAllowRRR);
}
#if V8_TARGET_ARCH_S390X
@@ -1205,7 +1516,13 @@ void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
}
void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
- // TODO(mbrandy): inspect input to see if nop is appropriate.
+ S390OperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ if (ZeroExtendsWord32ToWord64(value)) {
+ // These 32-bit operations implicitly zero-extend to 64-bit on x64, so the
+ // zero-extension is a no-op.
+ return EmitIdentity(node);
+ }
VisitRR(this, kS390_Uint32ToUint64, node);
}
#endif
@@ -1411,46 +1728,46 @@ void InstructionSelector::VisitFloat64Neg(Node* node) {
}
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+ OperandModes mode = AddOperandMode;
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
- return VisitBinop<Int32BinopMatcher>(this, node, kS390_Add32, kInt32Imm,
- &cont);
+ return VisitBin32op(this, node, kS390_Add32, mode, &cont);
}
FlagsContinuation cont;
- VisitBinop<Int32BinopMatcher>(this, node, kS390_Add32, kInt32Imm, &cont);
+ VisitBin32op(this, node, kS390_Add32, mode, &cont);
}
void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
+ OperandModes mode = SubOperandMode;
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
- return VisitBinop<Int32BinopMatcher>(this, node, kS390_Sub32,
- kInt32Imm_Negate, &cont);
+ return VisitBin32op(this, node, kS390_Sub32, mode, &cont);
}
FlagsContinuation cont;
- VisitBinop<Int32BinopMatcher>(this, node, kS390_Sub32, kInt32Imm_Negate,
- &cont);
+ VisitBin32op(this, node, kS390_Sub32, mode, &cont);
}
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
- return VisitBinop<Int64BinopMatcher>(this, node, kS390_Add64, kInt32Imm,
- &cont);
+ return VisitBinop<Int64BinopMatcher>(this, node, kS390_Add64,
+ OperandMode::kInt32Imm, &cont);
}
FlagsContinuation cont;
- VisitBinop<Int64BinopMatcher>(this, node, kS390_Add64, kInt32Imm, &cont);
+ VisitBinop<Int64BinopMatcher>(this, node, kS390_Add64, OperandMode::kInt32Imm,
+ &cont);
}
void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
return VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub64,
- kInt32Imm_Negate, &cont);
+ OperandMode::kInt32Imm_Negate, &cont);
}
FlagsContinuation cont;
- VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub64, kInt32Imm_Negate,
- &cont);
+ VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub64,
+ OperandMode::kInt32Imm_Negate, &cont);
}
#endif
@@ -1480,8 +1797,8 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
selector->Emit(opcode, g.NoOutput(), left, right,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
- cont->frame_state());
+ selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
+ cont->reason(), cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
} else {
@@ -1491,60 +1808,193 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
}
}
+void VisitWordCompareZero(InstructionSelector* selector, Node* user,
+ Node* value, InstructionCode opcode,
+ FlagsContinuation* cont);
+
+void VisitLoadAndTest(InstructionSelector* selector, InstructionCode opcode,
+ Node* node, Node* value, FlagsContinuation* cont,
+ bool discard_output = false);
+
// Shared routine for multiple word compare operations.
void VisitWordCompare(InstructionSelector* selector, Node* node,
InstructionCode opcode, FlagsContinuation* cont,
- bool commutative, ImmediateMode immediate_mode) {
+ OperandModes immediate_mode) {
S390OperandGenerator g(selector);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
- // Match immediates on left or right side of comparison.
- if (g.CanBeImmediate(right, immediate_mode)) {
- VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
- cont);
- } else if (g.CanBeImmediate(left, immediate_mode)) {
- if (!commutative) cont->Commute();
- VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
- cont);
+ DCHECK(IrOpcode::IsComparisonOpcode(node->opcode()) ||
+ node->opcode() == IrOpcode::kInt32Sub ||
+ node->opcode() == IrOpcode::kInt64Sub);
+
+ InstructionOperand inputs[8];
+ InstructionOperand outputs[1];
+ size_t input_count = 0;
+ size_t output_count = 0;
+
+ // If one of the two inputs is an immediate, make sure it's on the right, or
+ // if one of the two inputs is a memory operand, make sure it's on the left.
+ int effect_level = selector->GetEffectLevel(node);
+ if (cont->IsBranch()) {
+ effect_level = selector->GetEffectLevel(
+ cont->true_block()->PredecessorAt(0)->control_input());
+ }
+
+ if ((!g.CanBeImmediate(right, immediate_mode) &&
+ g.CanBeImmediate(left, immediate_mode)) ||
+ (!g.CanBeMemoryOperand(opcode, node, right, effect_level) &&
+ g.CanBeMemoryOperand(opcode, node, left, effect_level))) {
+ if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
+ std::swap(left, right);
+ }
+
+ // check if compare with 0
+ if (g.CanBeImmediate(right, immediate_mode) && g.GetImmediate(right) == 0) {
+ DCHECK(opcode == kS390_Cmp32 || opcode == kS390_Cmp64);
+ ArchOpcode load_and_test = (opcode == kS390_Cmp32)
+ ? kS390_LoadAndTestWord32
+ : kS390_LoadAndTestWord64;
+ return VisitLoadAndTest(selector, load_and_test, node, left, cont, true);
+ }
+
+ inputs[input_count++] = g.UseRegister(left);
+ if (g.CanBeMemoryOperand(opcode, node, right, effect_level)) {
+ // generate memory operand
+ AddressingMode addressing_mode = g.GetEffectiveAddressMemoryOperand(
+ right, inputs, &input_count, OpcodeImmMode(opcode));
+ opcode |= AddressingModeField::encode(addressing_mode);
+ } else if (g.CanBeImmediate(right, immediate_mode)) {
+ inputs[input_count++] = g.UseImmediate(right);
+ } else {
+ inputs[input_count++] = g.UseAnyExceptImmediate(right);
+ }
+
+ opcode = cont->Encode(opcode);
+ if (cont->IsBranch()) {
+ inputs[input_count++] = g.Label(cont->true_block());
+ inputs[input_count++] = g.Label(cont->false_block());
+ } else if (cont->IsSet()) {
+ outputs[output_count++] = g.DefineAsRegister(cont->result());
+ } else if (cont->IsTrap()) {
+ inputs[input_count++] = g.UseImmediate(cont->trap_id());
+ } else {
+ DCHECK(cont->IsDeoptimize());
+ // nothing to do
+ }
+
+ DCHECK(input_count <= 8 && output_count <= 1);
+ if (cont->IsDeoptimize()) {
+ selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
+ cont->kind(), cont->reason(), cont->frame_state());
} else {
- VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
- cont);
+ selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
}
void VisitWord32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
- ImmediateMode mode = (CompareLogical(cont) ? kUint32Imm : kInt32Imm);
- VisitWordCompare(selector, node, kS390_Cmp32, cont, false, mode);
+ OperandModes mode =
+ (CompareLogical(cont) ? OperandMode::kUint32Imm : OperandMode::kInt32Imm);
+ VisitWordCompare(selector, node, kS390_Cmp32, cont, mode);
}
#if V8_TARGET_ARCH_S390X
void VisitWord64Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
- ImmediateMode mode = (CompareLogical(cont) ? kUint32Imm : kUint32Imm);
- VisitWordCompare(selector, node, kS390_Cmp64, cont, false, mode);
+ OperandModes mode =
+ (CompareLogical(cont) ? OperandMode::kUint32Imm : OperandMode::kInt32Imm);
+ VisitWordCompare(selector, node, kS390_Cmp64, cont, mode);
}
#endif
// Shared routine for multiple float32 compare operations.
void VisitFloat32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
- S390OperandGenerator g(selector);
- Node* left = node->InputAt(0);
- Node* right = node->InputAt(1);
- VisitCompare(selector, kS390_CmpFloat, g.UseRegister(left),
- g.UseRegister(right), cont);
+ VisitWordCompare(selector, node, kS390_CmpFloat, cont, OperandMode::kNone);
}
// Shared routine for multiple float64 compare operations.
void VisitFloat64Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
+ VisitWordCompare(selector, node, kS390_CmpDouble, cont, OperandMode::kNone);
+}
+
+void VisitTestUnderMask(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ DCHECK(node->opcode() == IrOpcode::kWord32And ||
+ node->opcode() == IrOpcode::kWord64And);
+ ArchOpcode opcode =
+ (node->opcode() == IrOpcode::kWord32And) ? kS390_Tst32 : kS390_Tst64;
S390OperandGenerator g(selector);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
- VisitCompare(selector, kS390_CmpDouble, g.UseRegister(left),
- g.UseRegister(right), cont);
+ if (!g.CanBeImmediate(right, OperandMode::kUint32Imm) &&
+ g.CanBeImmediate(left, OperandMode::kUint32Imm)) {
+ std::swap(left, right);
+ }
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseOperand(right, OperandMode::kUint32Imm), cont);
+}
+
+void VisitLoadAndTest(InstructionSelector* selector, InstructionCode opcode,
+ Node* node, Node* value, FlagsContinuation* cont,
+ bool discard_output) {
+ static_assert(kS390_LoadAndTestFloat64 - kS390_LoadAndTestWord32 == 3,
+ "LoadAndTest Opcode shouldn't contain other opcodes.");
+
+ // TODO(john.yan): Add support for Float32/Float64.
+ DCHECK(opcode >= kS390_LoadAndTestWord32 ||
+ opcode <= kS390_LoadAndTestWord64);
+
+ S390OperandGenerator g(selector);
+ InstructionOperand inputs[8];
+ InstructionOperand outputs[2];
+ size_t input_count = 0;
+ size_t output_count = 0;
+ bool use_value = false;
+
+ int effect_level = selector->GetEffectLevel(node);
+ if (cont->IsBranch()) {
+ effect_level = selector->GetEffectLevel(
+ cont->true_block()->PredecessorAt(0)->control_input());
+ }
+
+ if (g.CanBeMemoryOperand(opcode, node, value, effect_level)) {
+ // generate memory operand
+ AddressingMode addressing_mode =
+ g.GetEffectiveAddressMemoryOperand(value, inputs, &input_count);
+ opcode |= AddressingModeField::encode(addressing_mode);
+ } else {
+ inputs[input_count++] = g.UseAnyExceptImmediate(value);
+ use_value = true;
+ }
+
+ if (!discard_output && !use_value) {
+ outputs[output_count++] = g.DefineAsRegister(value);
+ }
+
+ opcode = cont->Encode(opcode);
+ if (cont->IsBranch()) {
+ inputs[input_count++] = g.Label(cont->true_block());
+ inputs[input_count++] = g.Label(cont->false_block());
+ } else if (cont->IsSet()) {
+ outputs[output_count++] = g.DefineAsRegister(cont->result());
+ } else if (cont->IsTrap()) {
+ inputs[input_count++] = g.UseImmediate(cont->trap_id());
+ } else {
+ DCHECK(cont->IsDeoptimize());
+ // nothing to do
+ }
+
+ DCHECK(input_count <= 8 && output_count <= 2);
+ opcode = cont->Encode(opcode);
+ if (cont->IsDeoptimize()) {
+ selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
+ cont->kind(), cont->reason(), cont->frame_state());
+ } else {
+ selector->Emit(opcode, output_count, outputs, input_count, inputs);
+ }
}
// Shared routine for word comparisons against zero.
@@ -1562,6 +2012,7 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
cont->Negate();
}
+ FlagsCondition fc = cont->condition();
if (selector->CanCover(user, value)) {
switch (value->opcode()) {
case IrOpcode::kWord32Equal: {
@@ -1576,8 +2027,7 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
case IrOpcode::kInt32Sub:
return VisitWord32Compare(selector, value, cont);
case IrOpcode::kWord32And:
- return VisitWordCompare(selector, value, kS390_Tst64, cont,
- true, kUint32Imm);
+ return VisitTestUnderMask(selector, value, cont);
default:
break;
}
@@ -1610,8 +2060,7 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
case IrOpcode::kInt64Sub:
return VisitWord64Compare(selector, value, cont);
case IrOpcode::kWord64And:
- return VisitWordCompare(selector, value, kS390_Tst64, cont,
- true, kUint32Imm);
+ return VisitTestUnderMask(selector, value, cont);
default:
break;
}
@@ -1665,24 +2114,28 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop<Int32BinopMatcher>(
- selector, node, kS390_Add32, kInt32Imm, cont);
+ return VisitBin32op(selector, node, kS390_Add32, AddOperandMode,
+ cont);
case IrOpcode::kInt32SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop<Int32BinopMatcher>(
- selector, node, kS390_Sub32, kInt32Imm_Negate, cont);
+ return VisitBin32op(selector, node, kS390_Sub32, SubOperandMode,
+ cont);
case IrOpcode::kInt32MulWithOverflow:
cont->OverwriteAndNegateIfEqual(kNotEqual);
- return EmitInt32MulWithOverflow(selector, node, cont);
+ return VisitBin32op(
+ selector, node, kS390_Mul32WithOverflow,
+ OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps,
+ cont);
#if V8_TARGET_ARCH_S390X
case IrOpcode::kInt64AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop<Int64BinopMatcher>(
- selector, node, kS390_Add64, kInt32Imm, cont);
+ selector, node, kS390_Add64, OperandMode::kInt32Imm, cont);
case IrOpcode::kInt64SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop<Int64BinopMatcher>(
- selector, node, kS390_Sub64, kInt32Imm_Negate, cont);
+ selector, node, kS390_Sub64, OperandMode::kInt32Imm_Negate,
+ cont);
#endif
default:
break;
@@ -1691,53 +2144,77 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
}
break;
case IrOpcode::kInt32Sub:
- return VisitWord32Compare(selector, value, cont);
+ if (fc == kNotEqual || fc == kEqual)
+ return VisitWord32Compare(selector, value, cont);
+ break;
case IrOpcode::kWord32And:
- return VisitWordCompare(selector, value, kS390_Tst32, cont, true,
- kUint32Imm);
-// TODO(mbrandy): Handle?
-// case IrOpcode::kInt32Add:
-// case IrOpcode::kWord32Or:
-// case IrOpcode::kWord32Xor:
-// case IrOpcode::kWord32Sar:
-// case IrOpcode::kWord32Shl:
-// case IrOpcode::kWord32Shr:
-// case IrOpcode::kWord32Ror:
+ return VisitTestUnderMask(selector, value, cont);
+ case IrOpcode::kLoad: {
+ LoadRepresentation load_rep = LoadRepresentationOf(value->op());
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord32:
+ if (opcode == kS390_LoadAndTestWord32) {
+ return VisitLoadAndTest(selector, opcode, user, value, cont);
+ }
+ default:
+ break;
+ }
+ break;
+ }
+ case IrOpcode::kInt32Add:
+ // can't handle overflow case.
+ break;
+ case IrOpcode::kWord32Or:
+ return VisitBin32op(selector, value, kS390_Or32, OrOperandMode, cont);
+ case IrOpcode::kWord32Xor:
+ return VisitBin32op(selector, value, kS390_Xor32, XorOperandMode, cont);
+ case IrOpcode::kWord32Sar:
+ case IrOpcode::kWord32Shl:
+ case IrOpcode::kWord32Shr:
+ case IrOpcode::kWord32Ror:
+ // doesn't generate cc, so ignore.
+ break;
#if V8_TARGET_ARCH_S390X
case IrOpcode::kInt64Sub:
- return VisitWord64Compare(selector, value, cont);
+ if (fc == kNotEqual || fc == kEqual)
+ return VisitWord64Compare(selector, value, cont);
+ break;
case IrOpcode::kWord64And:
- return VisitWordCompare(selector, value, kS390_Tst64, cont, true,
- kUint32Imm);
-// TODO(mbrandy): Handle?
-// case IrOpcode::kInt64Add:
-// case IrOpcode::kWord64Or:
-// case IrOpcode::kWord64Xor:
-// case IrOpcode::kWord64Sar:
-// case IrOpcode::kWord64Shl:
-// case IrOpcode::kWord64Shr:
-// case IrOpcode::kWord64Ror:
+ return VisitTestUnderMask(selector, value, cont);
+ case IrOpcode::kInt64Add:
+ // can't handle overflow case.
+ break;
+ case IrOpcode::kWord64Or:
+ // TODO(john.yan): need to handle
+ break;
+ case IrOpcode::kWord64Xor:
+ // TODO(john.yan): need to handle
+ break;
+ case IrOpcode::kWord64Sar:
+ case IrOpcode::kWord64Shl:
+ case IrOpcode::kWord64Shr:
+ case IrOpcode::kWord64Ror:
+ // doesn't generate cc, so ignore
+ break;
#endif
default:
break;
}
}
- // Branch could not be combined with a compare, emit compare against 0.
- S390OperandGenerator g(selector);
- VisitCompare(selector, opcode, g.UseRegister(value), g.TempImmediate(0),
- cont);
+ // Branch could not be combined with a compare, emit LoadAndTest
+ VisitLoadAndTest(selector, opcode, user, value, cont, true);
}
void VisitWord32CompareZero(InstructionSelector* selector, Node* user,
Node* value, FlagsContinuation* cont) {
- VisitWordCompareZero(selector, user, value, kS390_Cmp32, cont);
+ VisitWordCompareZero(selector, user, value, kS390_LoadAndTestWord32, cont);
}
#if V8_TARGET_ARCH_S390X
void VisitWord64CompareZero(InstructionSelector* selector, Node* user,
Node* value, FlagsContinuation* cont) {
- VisitWordCompareZero(selector, user, value, kS390_Cmp64, cont);
+ VisitWordCompareZero(selector, user, value, kS390_LoadAndTestWord64, cont);
}
#endif
@@ -1750,14 +2227,16 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+ DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+ kNotEqual, p.kind(), p.reason(), node->InputAt(1));
VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+ DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+ kEqual, p.kind(), p.reason(), node->InputAt(1));
VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
}
@@ -1790,9 +2269,14 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand index_operand = value_operand;
if (sw.min_value) {
index_operand = g.TempRegister();
- Emit(kS390_Sub32, index_operand, value_operand,
- g.TempImmediate(sw.min_value));
+ Emit(kS390_Lay | AddressingModeField::encode(kMode_MRI), index_operand,
+ value_operand, g.TempImmediate(-sw.min_value));
}
+#if V8_TARGET_ARCH_S390X
+ InstructionOperand index_operand_zero_ext = g.TempRegister();
+ Emit(kS390_Uint32ToUint64, index_operand_zero_ext, index_operand);
+ index_operand = index_operand_zero_ext;
+#endif
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
}
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.cc b/deps/v8/src/compiler/simd-scalar-lowering.cc
index a11d8bc4cc..19ffe93775 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.cc
+++ b/deps/v8/src/compiler/simd-scalar-lowering.cc
@@ -9,6 +9,7 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
+#include "src/objects-inl.h"
#include "src/wasm/wasm-module.h"
namespace v8 {
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index c9fda35b36..4acc77f22f 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -138,7 +138,10 @@ UseInfo TruncatingUseInfoFromRepresentation(MachineRepresentation rep) {
return UseInfo::TruncatingWord32();
case MachineRepresentation::kBit:
return UseInfo::Bool();
- case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kSimd128:
+ case MachineRepresentation::kSimd1x4:
+ case MachineRepresentation::kSimd1x8:
+ case MachineRepresentation::kSimd1x16:
case MachineRepresentation::kNone:
break;
}
@@ -170,6 +173,7 @@ void ReplaceEffectControlUses(Node* node, Node* effect, Node* control) {
}
void ChangeToPureOp(Node* node, const Operator* new_op) {
+ DCHECK(new_op->HasProperty(Operator::kPure));
if (node->op()->EffectInputCount() > 0) {
DCHECK_LT(0, node->op()->ControlInputCount());
// Disconnect the node from effect and control chains.
@@ -894,6 +898,7 @@ class RepresentationSelector {
// Helper for handling selects.
void VisitSelect(Node* node, Truncation truncation,
SimplifiedLowering* lowering) {
+ DCHECK(TypeOf(node->InputAt(0))->Is(Type::Boolean()));
ProcessInput(node, 0, UseInfo::Bool());
MachineRepresentation output =
@@ -980,7 +985,7 @@ class RepresentationSelector {
}
}
- MachineSemantic DeoptValueSemanticOf(Type* type) {
+ static MachineSemantic DeoptValueSemanticOf(Type* type) {
// We only need signedness to do deopt correctly.
if (type->Is(Type::Signed32())) {
return MachineSemantic::kInt32;
@@ -991,6 +996,29 @@ class RepresentationSelector {
}
}
+ static MachineType DeoptMachineTypeOf(MachineRepresentation rep, Type* type) {
+ if (!type->IsInhabited()) {
+ return MachineType::None();
+ }
+ // TODO(turbofan): Special treatment for ExternalPointer here,
+ // to avoid incompatible truncations. We really need a story
+ // for the JSFunction::entry field.
+ if (type->Is(Type::ExternalPointer())) {
+ return MachineType::Pointer();
+ }
+ // Do not distinguish between various Tagged variations.
+ if (IsAnyTagged(rep)) {
+ return MachineType::AnyTagged();
+ }
+ MachineType machine_type(rep, DeoptValueSemanticOf(type));
+ DCHECK(machine_type.representation() != MachineRepresentation::kWord32 ||
+ machine_type.semantic() == MachineSemantic::kInt32 ||
+ machine_type.semantic() == MachineSemantic::kUint32);
+ DCHECK(machine_type.representation() != MachineRepresentation::kBit ||
+ type->Is(Type::Boolean()));
+ return machine_type;
+ }
+
void VisitStateValues(Node* node) {
if (propagate()) {
for (int i = 0; i < node->InputCount(); i++) {
@@ -1003,17 +1031,8 @@ class RepresentationSelector {
ZoneVector<MachineType>(node->InputCount(), zone);
for (int i = 0; i < node->InputCount(); i++) {
Node* input = node->InputAt(i);
- NodeInfo* input_info = GetInfo(input);
- Type* input_type = TypeOf(input);
- MachineRepresentation rep = input_type->IsInhabited()
- ? input_info->representation()
- : MachineRepresentation::kNone;
- MachineType machine_type(rep, DeoptValueSemanticOf(input_type));
- DCHECK(machine_type.representation() !=
- MachineRepresentation::kWord32 ||
- machine_type.semantic() == MachineSemantic::kInt32 ||
- machine_type.semantic() == MachineSemantic::kUint32);
- (*types)[i] = machine_type;
+ (*types)[i] =
+ DeoptMachineTypeOf(GetInfo(input)->representation(), TypeOf(input));
}
SparseInputMask mask = SparseInputMaskOf(node->op());
NodeProperties::ChangeOp(
@@ -1047,28 +1066,8 @@ class RepresentationSelector {
ZoneVector<MachineType>(node->InputCount(), zone);
for (int i = 0; i < node->InputCount(); i++) {
Node* input = node->InputAt(i);
- NodeInfo* input_info = GetInfo(input);
- Type* input_type = TypeOf(input);
- // TODO(turbofan): Special treatment for ExternalPointer here,
- // to avoid incompatible truncations. We really need a story
- // for the JSFunction::entry field.
- if (!input_type->IsInhabited()) {
- (*types)[i] = MachineType::None();
- } else if (input_type->Is(Type::ExternalPointer())) {
- (*types)[i] = MachineType::Pointer();
- } else {
- MachineRepresentation rep = input_type->IsInhabited()
- ? input_info->representation()
- : MachineRepresentation::kNone;
- MachineType machine_type(rep, DeoptValueSemanticOf(input_type));
- DCHECK(machine_type.representation() !=
- MachineRepresentation::kWord32 ||
- machine_type.semantic() == MachineSemantic::kInt32 ||
- machine_type.semantic() == MachineSemantic::kUint32);
- DCHECK(machine_type.representation() != MachineRepresentation::kBit ||
- input_type->Is(Type::Boolean()));
- (*types)[i] = machine_type;
- }
+ (*types)[i] =
+ DeoptMachineTypeOf(GetInfo(input)->representation(), TypeOf(input));
}
NodeProperties::ChangeOp(node,
jsgraph_->common()->TypedObjectState(types));
@@ -1192,8 +1191,11 @@ class RepresentationSelector {
// ToNumber(x) can throw if x is either a Receiver or a Symbol, so we can
// only eliminate an unused speculative number operation if we know that
// the inputs are PlainPrimitive, which excludes everything that's might
- // have side effects or throws during a ToNumber conversion.
- if (BothInputsAre(node, Type::PlainPrimitive())) {
+ // have side effects or throws during a ToNumber conversion. We are only
+ // allowed to perform a number addition if neither input is a String, even
+ // if the value is never used, so we further limit to NumberOrOddball in
+ // order to explicitly exclude String inputs.
+ if (BothInputsAre(node, Type::NumberOrOddball())) {
if (truncation.IsUnused()) return VisitUnused(node);
}
@@ -1430,10 +1432,12 @@ class RepresentationSelector {
return;
}
- case IrOpcode::kBranch:
+ case IrOpcode::kBranch: {
+ DCHECK(TypeOf(node->InputAt(0))->Is(Type::Boolean()));
ProcessInput(node, 0, UseInfo::Bool());
EnqueueInput(node, NodeProperties::FirstControlIndex(node));
return;
+ }
case IrOpcode::kSwitch:
ProcessInput(node, 0, UseInfo::TruncatingWord32());
EnqueueInput(node, NodeProperties::FirstControlIndex(node));
@@ -1982,8 +1986,26 @@ class RepresentationSelector {
if (BothInputsAre(node, Type::PlainPrimitive())) {
if (truncation.IsUnused()) return VisitUnused(node);
}
+ NumberOperationHint hint = NumberOperationHintOf(node->op());
+ Type* rhs_type = GetUpperBound(node->InputAt(1));
+ if (rhs_type->Is(type_cache_.kZeroish) &&
+ (hint == NumberOperationHint::kSignedSmall ||
+ hint == NumberOperationHint::kSigned32) &&
+ !truncation.IsUsedAsWord32()) {
+ // The SignedSmall or Signed32 feedback means that the results that we
+ // have seen so far were of type Unsigned31. We speculate that this
+ // will continue to hold. Moreover, since the RHS is 0, the result
+ // will just be the (converted) LHS.
+ VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
+ MachineRepresentation::kWord32, Type::Unsigned31());
+ if (lower()) {
+ node->RemoveInput(1);
+ NodeProperties::ChangeOp(node,
+ simplified()->CheckedUint32ToInt32());
+ }
+ return;
+ }
if (BothInputsAre(node, Type::NumberOrOddball())) {
- Type* rhs_type = GetUpperBound(node->InputAt(1));
VisitBinop(node, UseInfo::TruncatingWord32(),
UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32);
@@ -1992,8 +2014,6 @@ class RepresentationSelector {
}
return;
}
- NumberOperationHint hint = NumberOperationHintOf(node->op());
- Type* rhs_type = GetUpperBound(node->InputAt(1));
VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
MachineRepresentation::kWord32, Type::Unsigned32());
if (lower()) {
@@ -2240,6 +2260,13 @@ class RepresentationSelector {
MachineRepresentation::kTaggedPointer);
return;
}
+ case IrOpcode::kStringIndexOf: {
+ ProcessInput(node, 0, UseInfo::AnyTagged());
+ ProcessInput(node, 1, UseInfo::AnyTagged());
+ ProcessInput(node, 2, UseInfo::TaggedSigned());
+ SetOutput(node, MachineRepresentation::kTaggedSigned);
+ return;
+ }
case IrOpcode::kCheckBounds: {
Type* index_type = TypeOf(node->InputAt(0));
@@ -2282,7 +2309,7 @@ class RepresentationSelector {
MachineRepresentation::kTaggedPointer);
if (lower()) DeferReplacement(node, node->InputAt(0));
} else {
- VisitUnop(node, UseInfo::AnyTagged(),
+ VisitUnop(node, UseInfo::CheckedHeapObjectAsTaggedPointer(),
MachineRepresentation::kTaggedPointer);
}
return;
@@ -2306,6 +2333,17 @@ class RepresentationSelector {
}
return;
}
+ case IrOpcode::kCheckReceiver: {
+ if (InputIs(node, Type::Receiver())) {
+ VisitUnop(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTaggedPointer);
+ if (lower()) DeferReplacement(node, node->InputAt(0));
+ } else {
+ VisitUnop(node, UseInfo::CheckedHeapObjectAsTaggedPointer(),
+ MachineRepresentation::kTaggedPointer);
+ }
+ return;
+ }
case IrOpcode::kCheckSmi: {
if (SmiValuesAre32Bits() && truncation.IsUsedAsWord32()) {
VisitUnop(node, UseInfo::CheckedSignedSmallAsWord32(),
@@ -2323,7 +2361,7 @@ class RepresentationSelector {
MachineRepresentation::kTaggedPointer);
if (lower()) DeferReplacement(node, node->InputAt(0));
} else {
- VisitUnop(node, UseInfo::AnyTagged(),
+ VisitUnop(node, UseInfo::CheckedHeapObjectAsTaggedPointer(),
MachineRepresentation::kTaggedPointer);
}
return;
@@ -2503,9 +2541,12 @@ class RepresentationSelector {
}
return;
}
- case IrOpcode::kObjectIsCallable: {
- // TODO(turbofan): Add Type::Callable to optimize this?
- VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
+ case IrOpcode::kObjectIsDetectableCallable: {
+ VisitObjectIs(node, Type::DetectableCallable(), lowering);
+ return;
+ }
+ case IrOpcode::kObjectIsNonCallable: {
+ VisitObjectIs(node, Type::NonCallable(), lowering);
return;
}
case IrOpcode::kObjectIsNumber: {
@@ -2540,7 +2581,6 @@ class RepresentationSelector {
return;
}
case IrOpcode::kCheckFloat64Hole: {
- if (truncation.IsUnused()) return VisitUnused(node);
CheckFloat64HoleMode mode = CheckFloat64HoleModeOf(node->op());
ProcessInput(node, 0, UseInfo::TruncatingFloat64());
ProcessRemainingInputs(node, 1);
@@ -2647,6 +2687,7 @@ class RepresentationSelector {
case IrOpcode::kBeginRegion:
case IrOpcode::kProjection:
case IrOpcode::kOsrValue:
+ case IrOpcode::kArgumentsObjectState:
// All JavaScript operators except JSToNumber have uniform handling.
#define OPCODE_CASE(name) case IrOpcode::k##name:
JS_SIMPLE_BINOP_LIST(OPCODE_CASE)
@@ -3361,12 +3402,11 @@ void SimplifiedLowering::DoMin(Node* node, Operator const* op,
void SimplifiedLowering::DoShift(Node* node, Operator const* op,
Type* rhs_type) {
- Node* const rhs = NodeProperties::GetValueInput(node, 1);
if (!rhs_type->Is(type_cache_.kZeroToThirtyOne)) {
+ Node* const rhs = NodeProperties::GetValueInput(node, 1);
node->ReplaceInput(1, graph()->NewNode(machine()->Word32And(), rhs,
jsgraph()->Int32Constant(0x1f)));
}
- DCHECK(op->HasProperty(Operator::kPure));
ChangeToPureOp(node, op);
}
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index 31dac61d7e..90a4e344d8 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -461,6 +461,7 @@ UnicodeEncoding UnicodeEncodingOf(const Operator* op) {
V(StringCharAt, Operator::kNoProperties, 2, 1) \
V(StringCharCodeAt, Operator::kNoProperties, 2, 1) \
V(StringFromCharCode, Operator::kNoProperties, 1, 0) \
+ V(StringIndexOf, Operator::kNoProperties, 3, 0) \
V(PlainPrimitiveToNumber, Operator::kNoProperties, 1, 0) \
V(PlainPrimitiveToWord32, Operator::kNoProperties, 1, 0) \
V(PlainPrimitiveToFloat64, Operator::kNoProperties, 1, 0) \
@@ -468,6 +469,7 @@ UnicodeEncoding UnicodeEncodingOf(const Operator* op) {
V(ChangeTaggedToInt32, Operator::kNoProperties, 1, 0) \
V(ChangeTaggedToUint32, Operator::kNoProperties, 1, 0) \
V(ChangeTaggedToFloat64, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedToTaggedSigned, Operator::kNoProperties, 1, 0) \
V(ChangeFloat64ToTagged, Operator::kNoProperties, 1, 0) \
V(ChangeFloat64ToTaggedPointer, Operator::kNoProperties, 1, 0) \
V(ChangeInt31ToTaggedSigned, Operator::kNoProperties, 1, 0) \
@@ -478,7 +480,8 @@ UnicodeEncoding UnicodeEncodingOf(const Operator* op) {
V(TruncateTaggedToBit, Operator::kNoProperties, 1, 0) \
V(TruncateTaggedToWord32, Operator::kNoProperties, 1, 0) \
V(TruncateTaggedToFloat64, Operator::kNoProperties, 1, 0) \
- V(ObjectIsCallable, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsDetectableCallable, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsNonCallable, Operator::kNoProperties, 1, 0) \
V(ObjectIsNumber, Operator::kNoProperties, 1, 0) \
V(ObjectIsReceiver, Operator::kNoProperties, 1, 0) \
V(ObjectIsSmi, Operator::kNoProperties, 1, 0) \
@@ -502,6 +505,7 @@ UnicodeEncoding UnicodeEncodingOf(const Operator* op) {
V(CheckIf, 1, 0) \
V(CheckInternalizedString, 1, 1) \
V(CheckNumber, 1, 1) \
+ V(CheckReceiver, 1, 1) \
V(CheckSmi, 1, 1) \
V(CheckString, 1, 1) \
V(CheckTaggedHole, 1, 1) \
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index 4ad44354f8..ff3f60a423 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -356,6 +356,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* StringCharCodeAt();
const Operator* StringFromCharCode();
const Operator* StringFromCodePoint(UnicodeEncoding encoding);
+ const Operator* StringIndexOf();
const Operator* PlainPrimitiveToNumber();
const Operator* PlainPrimitiveToWord32();
@@ -365,6 +366,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* ChangeTaggedToInt32();
const Operator* ChangeTaggedToUint32();
const Operator* ChangeTaggedToFloat64();
+ const Operator* ChangeTaggedToTaggedSigned();
const Operator* ChangeInt31ToTaggedSigned();
const Operator* ChangeInt32ToTagged();
const Operator* ChangeUint32ToTagged();
@@ -385,6 +387,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* CheckNumber();
const Operator* CheckSmi();
const Operator* CheckString();
+ const Operator* CheckReceiver();
const Operator* CheckedInt32Add();
const Operator* CheckedInt32Sub();
@@ -408,7 +411,8 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* CheckTaggedHole();
const Operator* ConvertTaggedHoleToUndefined();
- const Operator* ObjectIsCallable();
+ const Operator* ObjectIsDetectableCallable();
+ const Operator* ObjectIsNonCallable();
const Operator* ObjectIsNumber();
const Operator* ObjectIsReceiver();
const Operator* ObjectIsSmi();
diff --git a/deps/v8/src/compiler/state-values-utils.cc b/deps/v8/src/compiler/state-values-utils.cc
index 61c71caf87..899c91af85 100644
--- a/deps/v8/src/compiler/state-values-utils.cc
+++ b/deps/v8/src/compiler/state-values-utils.cc
@@ -137,7 +137,8 @@ Node* StateValuesCache::GetValuesNodeFromCache(Node** nodes, size_t count,
SparseInputMask::BitMaskType StateValuesCache::FillBufferWithValues(
WorkingBuffer* node_buffer, size_t* node_count, size_t* values_idx,
- Node** values, size_t count, const BitVector* liveness) {
+ Node** values, size_t count, const BitVector* liveness,
+ int liveness_offset) {
SparseInputMask::BitMaskType input_mask = 0;
// Virtual nodes are the live nodes plus the implicit optimized out nodes,
@@ -149,7 +150,7 @@ SparseInputMask::BitMaskType StateValuesCache::FillBufferWithValues(
DCHECK_LE(*values_idx, static_cast<size_t>(INT_MAX));
if (liveness == nullptr ||
- liveness->Contains(static_cast<int>(*values_idx))) {
+ liveness->Contains(liveness_offset + static_cast<int>(*values_idx))) {
input_mask |= 1 << (virtual_node_count);
(*node_buffer)[(*node_count)++] = values[*values_idx];
}
@@ -169,14 +170,14 @@ SparseInputMask::BitMaskType StateValuesCache::FillBufferWithValues(
Node* StateValuesCache::BuildTree(size_t* values_idx, Node** values,
size_t count, const BitVector* liveness,
- size_t level) {
+ int liveness_offset, size_t level) {
WorkingBuffer* node_buffer = GetWorkingSpace(level);
size_t node_count = 0;
SparseInputMask::BitMaskType input_mask = SparseInputMask::kDenseBitMask;
if (level == 0) {
input_mask = FillBufferWithValues(node_buffer, &node_count, values_idx,
- values, count, liveness);
+ values, count, liveness, liveness_offset);
// Make sure we returned a sparse input mask.
DCHECK_NE(input_mask, SparseInputMask::kDenseBitMask);
} else {
@@ -188,8 +189,9 @@ Node* StateValuesCache::BuildTree(size_t* values_idx, Node** values,
// remaining live nodes.
size_t previous_input_count = node_count;
- input_mask = FillBufferWithValues(node_buffer, &node_count, values_idx,
- values, count, liveness);
+ input_mask =
+ FillBufferWithValues(node_buffer, &node_count, values_idx, values,
+ count, liveness, liveness_offset);
// Make sure we have exhausted our values.
DCHECK_EQ(*values_idx, count);
// Make sure we returned a sparse input mask.
@@ -205,8 +207,8 @@ Node* StateValuesCache::BuildTree(size_t* values_idx, Node** values,
} else {
// Otherwise, add the values to a subtree and add that as an input.
- Node* subtree =
- BuildTree(values_idx, values, count, liveness, level - 1);
+ Node* subtree = BuildTree(values_idx, values, count, liveness,
+ liveness_offset, level - 1);
(*node_buffer)[node_count++] = subtree;
// Don't touch the bitmask, so that it stays dense.
}
@@ -229,7 +231,7 @@ Node* StateValuesCache::BuildTree(size_t* values_idx, Node** values,
namespace {
void CheckTreeContainsValues(Node* tree, Node** values, size_t count,
- const BitVector* liveness) {
+ const BitVector* liveness, int liveness_offset) {
CHECK_EQ(count, StateValuesAccess(tree).size());
int i;
@@ -237,7 +239,7 @@ void CheckTreeContainsValues(Node* tree, Node** values, size_t count,
auto it = access.begin();
auto itend = access.end();
for (i = 0; it != itend; ++it, ++i) {
- if (liveness == nullptr || liveness->Contains(i)) {
+ if (liveness == nullptr || liveness->Contains(liveness_offset + i)) {
CHECK((*it).node == values[i]);
} else {
CHECK((*it).node == nullptr);
@@ -250,7 +252,8 @@ void CheckTreeContainsValues(Node* tree, Node** values, size_t count,
#endif
Node* StateValuesCache::GetNodeForValues(Node** values, size_t count,
- const BitVector* liveness) {
+ const BitVector* liveness,
+ int liveness_offset) {
#if DEBUG
// Check that the values represent actual values, and not a tree of values.
for (size_t i = 0; i < count; i++) {
@@ -260,12 +263,10 @@ Node* StateValuesCache::GetNodeForValues(Node** values, size_t count,
}
}
if (liveness != nullptr) {
- // Liveness can have extra bits for the stack or accumulator, which we
- // ignore here.
- DCHECK_LE(count, static_cast<size_t>(liveness->length()));
+ DCHECK_LE(liveness_offset + count, static_cast<size_t>(liveness->length()));
for (size_t i = 0; i < count; i++) {
- if (liveness->Contains(static_cast<int>(i))) {
+ if (liveness->Contains(liveness_offset + static_cast<int>(i))) {
DCHECK_NOT_NULL(values[i]);
}
}
@@ -288,7 +289,8 @@ Node* StateValuesCache::GetNodeForValues(Node** values, size_t count,
}
size_t values_idx = 0;
- Node* tree = BuildTree(&values_idx, values, count, liveness, height);
+ Node* tree =
+ BuildTree(&values_idx, values, count, liveness, liveness_offset, height);
// The values should be exhausted by the end of BuildTree.
DCHECK_EQ(values_idx, count);
@@ -296,7 +298,7 @@ Node* StateValuesCache::GetNodeForValues(Node** values, size_t count,
DCHECK_EQ(tree->opcode(), IrOpcode::kStateValues);
#if DEBUG
- CheckTreeContainsValues(tree, values, count, liveness);
+ CheckTreeContainsValues(tree, values, count, liveness, liveness_offset);
#endif
return tree;
diff --git a/deps/v8/src/compiler/state-values-utils.h b/deps/v8/src/compiler/state-values-utils.h
index d5e84d208c..e1fd7d287c 100644
--- a/deps/v8/src/compiler/state-values-utils.h
+++ b/deps/v8/src/compiler/state-values-utils.h
@@ -24,7 +24,8 @@ class V8_EXPORT_PRIVATE StateValuesCache {
explicit StateValuesCache(JSGraph* js_graph);
Node* GetNodeForValues(Node** values, size_t count,
- const BitVector* liveness = nullptr);
+ const BitVector* liveness = nullptr,
+ int liveness_offset = 0);
private:
static const size_t kMaxInputCount = 8;
@@ -58,10 +59,11 @@ class V8_EXPORT_PRIVATE StateValuesCache {
size_t* node_count,
size_t* values_idx,
Node** values, size_t count,
- const BitVector* liveness);
+ const BitVector* liveness,
+ int liveness_offset);
Node* BuildTree(size_t* values_idx, Node** values, size_t count,
- const BitVector* liveness, size_t level);
+ const BitVector* liveness, int liveness_offset, size_t level);
WorkingBuffer* GetWorkingSpace(size_t level);
Node* GetEmptyStateValues();
diff --git a/deps/v8/src/compiler/typed-optimization.cc b/deps/v8/src/compiler/typed-optimization.cc
index 8149a1bee4..e130a10e4e 100644
--- a/deps/v8/src/compiler/typed-optimization.cc
+++ b/deps/v8/src/compiler/typed-optimization.cc
@@ -92,6 +92,8 @@ Reduction TypedOptimization::Reduce(Node* node) {
return ReduceNumberToUint8Clamped(node);
case IrOpcode::kPhi:
return ReducePhi(node);
+ case IrOpcode::kReferenceEqual:
+ return ReduceReferenceEqual(node);
case IrOpcode::kSelect:
return ReduceSelect(node);
default:
@@ -258,6 +260,18 @@ Reduction TypedOptimization::ReducePhi(Node* node) {
return NoChange();
}
+Reduction TypedOptimization::ReduceReferenceEqual(Node* node) {
+ DCHECK_EQ(IrOpcode::kReferenceEqual, node->opcode());
+ Node* const lhs = NodeProperties::GetValueInput(node, 0);
+ Node* const rhs = NodeProperties::GetValueInput(node, 1);
+ Type* const lhs_type = NodeProperties::GetType(lhs);
+ Type* const rhs_type = NodeProperties::GetType(rhs);
+ if (!lhs_type->Maybe(rhs_type)) {
+ return Replace(jsgraph()->FalseConstant());
+ }
+ return NoChange();
+}
+
Reduction TypedOptimization::ReduceSelect(Node* node) {
DCHECK_EQ(IrOpcode::kSelect, node->opcode());
Node* const condition = NodeProperties::GetValueInput(node, 0);
diff --git a/deps/v8/src/compiler/typed-optimization.h b/deps/v8/src/compiler/typed-optimization.h
index 810914993f..93de680d4f 100644
--- a/deps/v8/src/compiler/typed-optimization.h
+++ b/deps/v8/src/compiler/typed-optimization.h
@@ -50,6 +50,7 @@ class V8_EXPORT_PRIVATE TypedOptimization final
Reduction ReduceNumberRoundop(Node* node);
Reduction ReduceNumberToUint8Clamped(Node* node);
Reduction ReducePhi(Node* node);
+ Reduction ReduceReferenceEqual(Node* node);
Reduction ReduceSelect(Node* node);
CompilationDependencies* dependencies() const { return dependencies_; }
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 51b8352b31..ed1a04aa3b 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -43,13 +43,14 @@ Typer::Typer(Isolate* isolate, Flags flags, Graph* graph)
Zone* zone = this->zone();
Factory* const factory = isolate->factory();
- singleton_false_ = Type::HeapConstant(factory->false_value(), zone);
- singleton_true_ = Type::HeapConstant(factory->true_value(), zone);
- singleton_the_hole_ = Type::HeapConstant(factory->the_hole_value(), zone);
+ singleton_empty_string_ = Type::HeapConstant(factory->empty_string(), zone);
+ singleton_false_ = operation_typer_.singleton_false();
+ singleton_true_ = operation_typer_.singleton_true();
falsish_ = Type::Union(
Type::Undetectable(),
Type::Union(Type::Union(singleton_false_, cache_.kZeroish, zone),
- singleton_the_hole_, zone),
+ Type::Union(singleton_empty_string_, Type::Hole(), zone),
+ zone),
zone);
truish_ = Type::Union(
singleton_true_,
@@ -283,7 +284,8 @@ class Typer::Visitor : public Reducer {
SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
- static Type* ObjectIsCallable(Type*, Typer*);
+ static Type* ObjectIsDetectableCallable(Type*, Typer*);
+ static Type* ObjectIsNonCallable(Type*, Typer*);
static Type* ObjectIsNumber(Type*, Typer*);
static Type* ObjectIsReceiver(Type*, Typer*);
static Type* ObjectIsSmi(Type*, Typer*);
@@ -296,7 +298,7 @@ class Typer::Visitor : public Reducer {
JS_SIMPLE_BINOP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
- static Type* JSCallFunctionTyper(Type*, Typer*);
+ static Type* JSCallTyper(Type*, Typer*);
static Type* ReferenceEqualTyper(Type*, Type*, Typer*);
static Type* StringFromCharCodeTyper(Type*, Typer*);
@@ -501,9 +503,15 @@ Type* Typer::Visitor::ToString(Type* type, Typer* t) {
// Type checks.
-Type* Typer::Visitor::ObjectIsCallable(Type* type, Typer* t) {
- if (type->Is(Type::Function())) return t->singleton_true_;
- if (type->Is(Type::Primitive())) return t->singleton_false_;
+Type* Typer::Visitor::ObjectIsDetectableCallable(Type* type, Typer* t) {
+ if (type->Is(Type::DetectableCallable())) return t->singleton_true_;
+ if (!type->Maybe(Type::DetectableCallable())) return t->singleton_false_;
+ return Type::Boolean();
+}
+
+Type* Typer::Visitor::ObjectIsNonCallable(Type* type, Typer* t) {
+ if (type->Is(Type::NonCallable())) return t->singleton_true_;
+ if (!type->Maybe(Type::NonCallable())) return t->singleton_false_;
return Type::Boolean();
}
@@ -826,6 +834,10 @@ Type* Typer::Visitor::TypeTypedStateValues(Node* node) {
return Type::Internal();
}
+Type* Typer::Visitor::TypeArgumentsObjectState(Node* node) {
+ return Type::Internal();
+}
+
Type* Typer::Visitor::TypeObjectState(Node* node) { return Type::Internal(); }
Type* Typer::Visitor::TypeTypedObjectState(Node* node) {
@@ -897,8 +909,7 @@ Type* Typer::Visitor::JSStrictEqualTyper(Type* lhs, Type* rhs, Typer* t) {
(lhs->Max() < rhs->Min() || lhs->Min() > rhs->Max())) {
return t->singleton_false_;
}
- if ((lhs->Is(t->singleton_the_hole_) || rhs->Is(t->singleton_the_hole_)) &&
- !lhs->Maybe(rhs)) {
+ if ((lhs->Is(Type::Hole()) || rhs->Is(Type::Hole())) && !lhs->Maybe(rhs)) {
return t->singleton_false_;
}
if (lhs->IsHeapConstant() && rhs->Is(lhs)) {
@@ -1045,6 +1056,9 @@ Type* Typer::Visitor::JSModulusTyper(Type* lhs, Type* rhs, Typer* t) {
// JS unary operators.
+Type* Typer::Visitor::TypeJSClassOf(Node* node) {
+ return Type::InternalizedStringOrNull();
+}
Type* Typer::Visitor::TypeJSTypeOf(Node* node) {
return Type::InternalizedString();
@@ -1237,6 +1251,11 @@ Type* Typer::Visitor::TypeJSStoreGlobal(Node* node) {
return nullptr;
}
+Type* Typer::Visitor::TypeJSStoreNamedOwn(Node* node) {
+ UNREACHABLE();
+ return nullptr;
+}
+
Type* Typer::Visitor::TypeJSStoreDataPropertyInLiteral(Node* node) {
UNREACHABLE();
return nullptr;
@@ -1308,16 +1327,13 @@ Type* Typer::Visitor::TypeJSCreateScriptContext(Node* node) {
// JS other operators.
+Type* Typer::Visitor::TypeJSConstruct(Node* node) { return Type::Receiver(); }
-Type* Typer::Visitor::TypeJSCallConstruct(Node* node) {
+Type* Typer::Visitor::TypeJSConstructWithSpread(Node* node) {
return Type::Receiver();
}
-Type* Typer::Visitor::TypeJSCallConstructWithSpread(Node* node) {
- return Type::Receiver();
-}
-
-Type* Typer::Visitor::JSCallFunctionTyper(Type* fun, Typer* t) {
+Type* Typer::Visitor::JSCallTyper(Type* fun, Typer* t) {
if (fun->IsHeapConstant() && fun->AsHeapConstant()->Value()->IsJSFunction()) {
Handle<JSFunction> function =
Handle<JSFunction>::cast(fun->AsHeapConstant()->Value());
@@ -1450,6 +1466,8 @@ Type* Typer::Visitor::JSCallFunctionTyper(Type* fun, Typer* t) {
return Type::OtherObject();
// Array functions.
+ case kArrayIsArray:
+ return Type::Boolean();
case kArrayConcat:
return Type::Receiver();
case kArrayEvery:
@@ -1484,8 +1502,13 @@ Type* Typer::Visitor::JSCallFunctionTyper(Type* fun, Typer* t) {
return t->cache_.kPositiveSafeInteger;
// Object functions.
+ case kObjectAssign:
+ case kObjectCreate:
+ return Type::OtherObject();
case kObjectHasOwnProperty:
return Type::Boolean();
+ case kObjectToString:
+ return Type::String();
// RegExp functions.
case kRegExpCompile:
@@ -1512,6 +1535,46 @@ Type* Typer::Visitor::JSCallFunctionTyper(Type* fun, Typer* t) {
case kGlobalIsFinite:
case kGlobalIsNaN:
return Type::Boolean();
+
+ // Map functions.
+ case kMapClear:
+ case kMapForEach:
+ return Type::Undefined();
+ case kMapDelete:
+ case kMapHas:
+ return Type::Boolean();
+ case kMapEntries:
+ case kMapKeys:
+ case kMapSet:
+ case kMapValues:
+ return Type::OtherObject();
+
+ // Set functions.
+ case kSetAdd:
+ case kSetEntries:
+ case kSetKeys:
+ case kSetValues:
+ return Type::OtherObject();
+ case kSetClear:
+ case kSetForEach:
+ return Type::Undefined();
+ case kSetDelete:
+ case kSetHas:
+ return Type::Boolean();
+
+ // WeakMap functions.
+ case kWeakMapDelete:
+ case kWeakMapHas:
+ return Type::Boolean();
+ case kWeakMapSet:
+ return Type::OtherObject();
+
+ // WeakSet functions.
+ case kWeakSetAdd:
+ return Type::OtherObject();
+ case kWeakSetDelete:
+ case kWeakSetHas:
+ return Type::Boolean();
default:
break;
}
@@ -1520,13 +1583,19 @@ Type* Typer::Visitor::JSCallFunctionTyper(Type* fun, Typer* t) {
return Type::NonInternal();
}
+Type* Typer::Visitor::TypeJSCallForwardVarargs(Node* node) {
+ return TypeUnaryOp(node, JSCallTyper);
+}
-Type* Typer::Visitor::TypeJSCallFunction(Node* node) {
+Type* Typer::Visitor::TypeJSCall(Node* node) {
// TODO(bmeurer): We could infer better types if we wouldn't ignore the
- // argument types for the JSCallFunctionTyper above.
- return TypeUnaryOp(node, JSCallFunctionTyper);
+ // argument types for the JSCallTyper above.
+ return TypeUnaryOp(node, JSCallTyper);
}
+Type* Typer::Visitor::TypeJSCallWithSpread(Node* node) {
+ return TypeUnaryOp(node, JSCallTyper);
+}
Type* Typer::Visitor::TypeJSCallRuntime(Node* node) {
switch (CallRuntimeParametersOf(node->op()).id()) {
@@ -1554,6 +1623,8 @@ Type* Typer::Visitor::TypeJSCallRuntime(Node* node) {
return TypeUnaryOp(node, ToObject);
case Runtime::kInlineToString:
return TypeUnaryOp(node, ToString);
+ case Runtime::kInlineClassOf:
+ return Type::InternalizedStringOrNull();
case Runtime::kHasInPrototypeChain:
return Type::Boolean();
default:
@@ -1572,7 +1643,7 @@ Type* Typer::Visitor::TypeJSConvertReceiver(Node* node) {
Type* Typer::Visitor::TypeJSForInNext(Node* node) {
- return Type::Union(Type::Name(), Type::Undefined(), zone());
+ return Type::Union(Type::String(), Type::Undefined(), zone());
}
@@ -1616,6 +1687,8 @@ Type* Typer::Visitor::TypeJSGeneratorRestoreRegister(Node* node) {
Type* Typer::Visitor::TypeJSStackCheck(Node* node) { return Type::Any(); }
+Type* Typer::Visitor::TypeJSDebugger(Node* node) { return Type::Any(); }
+
// Simplified operators.
Type* Typer::Visitor::TypeBooleanNot(Node* node) { return Type::Boolean(); }
@@ -1695,6 +1768,10 @@ Type* Typer::Visitor::TypeStringFromCodePoint(Node* node) {
return TypeUnaryOp(node, StringFromCodePointTyper);
}
+Type* Typer::Visitor::TypeStringIndexOf(Node* node) {
+ return Type::Range(-1.0, String::kMaxLength - 1.0, zone());
+}
+
Type* Typer::Visitor::TypeCheckBounds(Node* node) {
Type* index = Operand(node, 0);
Type* length = Operand(node, 1);
@@ -1731,6 +1808,11 @@ Type* Typer::Visitor::TypeCheckNumber(Node* node) {
return Type::Intersect(arg, Type::Number(), zone());
}
+Type* Typer::Visitor::TypeCheckReceiver(Node* node) {
+ Type* arg = Operand(node, 0);
+ return Type::Intersect(arg, Type::Receiver(), zone());
+}
+
Type* Typer::Visitor::TypeCheckSmi(Node* node) {
Type* arg = Operand(node, 0);
return Type::Intersect(arg, Type::SignedSmall(), zone());
@@ -1819,8 +1901,12 @@ Type* Typer::Visitor::TypeStoreTypedElement(Node* node) {
return nullptr;
}
-Type* Typer::Visitor::TypeObjectIsCallable(Node* node) {
- return TypeUnaryOp(node, ObjectIsCallable);
+Type* Typer::Visitor::TypeObjectIsDetectableCallable(Node* node) {
+ return TypeUnaryOp(node, ObjectIsDetectableCallable);
+}
+
+Type* Typer::Visitor::TypeObjectIsNonCallable(Node* node) {
+ return TypeUnaryOp(node, ObjectIsNonCallable);
}
Type* Typer::Visitor::TypeObjectIsNumber(Node* node) {
diff --git a/deps/v8/src/compiler/typer.h b/deps/v8/src/compiler/typer.h
index 7f6f90a517..09b0b4d81b 100644
--- a/deps/v8/src/compiler/typer.h
+++ b/deps/v8/src/compiler/typer.h
@@ -50,9 +50,9 @@ class V8_EXPORT_PRIVATE Typer {
TypeCache const& cache_;
OperationTyper operation_typer_;
+ Type* singleton_empty_string_;
Type* singleton_false_;
Type* singleton_true_;
- Type* singleton_the_hole_;
Type* falsish_;
Type* truish_;
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index a2af190d9d..f28a56a43b 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -152,6 +152,8 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case ONE_BYTE_STRING_TYPE:
case CONS_STRING_TYPE:
case CONS_ONE_BYTE_STRING_TYPE:
+ case THIN_STRING_TYPE:
+ case THIN_ONE_BYTE_STRING_TYPE:
case SLICED_STRING_TYPE:
case SLICED_ONE_BYTE_STRING_TYPE:
case EXTERNAL_STRING_TYPE:
@@ -188,8 +190,6 @@ Type::bitset BitsetType::Lub(i::Map* map) {
}
case HEAP_NUMBER_TYPE:
return kNumber;
- case SIMD128_VALUE_TYPE:
- return kSimd;
case JS_OBJECT_TYPE:
case JS_ARGUMENTS_TYPE:
case JS_ERROR_TYPE:
@@ -225,6 +225,7 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case JS_SET_ITERATOR_TYPE:
case JS_MAP_ITERATOR_TYPE:
case JS_STRING_ITERATOR_TYPE:
+ case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
case JS_TYPED_ARRAY_KEY_ITERATOR_TYPE:
case JS_FAST_ARRAY_KEY_ITERATOR_TYPE:
@@ -315,7 +316,6 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case ALLOCATION_MEMENTO_TYPE:
case TYPE_FEEDBACK_INFO_TYPE:
case ALIASED_ARGUMENTS_ENTRY_TYPE:
- case BOX_TYPE:
case PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE:
case PROMISE_REACTION_JOB_INFO_TYPE:
case DEBUG_INFO_TYPE:
@@ -462,7 +462,7 @@ HeapConstantType::HeapConstantType(BitsetType::bitset bitset,
i::Handle<i::HeapObject> object)
: TypeBase(kHeapConstant), bitset_(bitset), object_(object) {
DCHECK(!object->IsHeapNumber());
- DCHECK(!object->IsString());
+ DCHECK_IMPLIES(object->IsString(), object->IsInternalizedString());
}
// -----------------------------------------------------------------------------
@@ -838,17 +838,8 @@ Type* Type::NewConstant(i::Handle<i::Object> value, Zone* zone) {
return Range(v, v, zone);
} else if (value->IsHeapNumber()) {
return NewConstant(value->Number(), zone);
- } else if (value->IsString()) {
- bitset b = BitsetType::Lub(*value);
- DCHECK(b == BitsetType::kInternalizedString ||
- b == BitsetType::kOtherString);
- if (b == BitsetType::kInternalizedString) {
- return Type::InternalizedString();
- } else if (b == BitsetType::kOtherString) {
- return Type::OtherString();
- } else {
- UNREACHABLE();
- }
+ } else if (value->IsString() && !value->IsInternalizedString()) {
+ return Type::OtherString();
}
return HeapConstant(i::Handle<i::HeapObject>::cast(value), zone);
}
diff --git a/deps/v8/src/compiler/types.h b/deps/v8/src/compiler/types.h
index b04f4e3c98..9e55a0bc88 100644
--- a/deps/v8/src/compiler/types.h
+++ b/deps/v8/src/compiler/types.h
@@ -116,69 +116,75 @@ namespace compiler {
V(Symbol, 1u << 12) \
V(InternalizedString, 1u << 13) \
V(OtherString, 1u << 14) \
- V(Simd, 1u << 15) \
- V(OtherCallable, 1u << 16) \
- V(OtherObject, 1u << 17) \
- V(OtherUndetectable, 1u << 18) \
- V(CallableProxy, 1u << 19) \
- V(OtherProxy, 1u << 20) \
- V(Function, 1u << 21) \
- V(BoundFunction, 1u << 22) \
- V(Hole, 1u << 23) \
- V(OtherInternal, 1u << 24) \
- V(ExternalPointer, 1u << 25) \
+ V(OtherCallable, 1u << 15) \
+ V(OtherObject, 1u << 16) \
+ V(OtherUndetectable, 1u << 17) \
+ V(CallableProxy, 1u << 18) \
+ V(OtherProxy, 1u << 19) \
+ V(Function, 1u << 20) \
+ V(BoundFunction, 1u << 21) \
+ V(Hole, 1u << 22) \
+ V(OtherInternal, 1u << 23) \
+ V(ExternalPointer, 1u << 24) \
\
- V(Signed31, kUnsigned30 | kNegative31) \
- V(Signed32, kSigned31 | kOtherUnsigned31 | kOtherSigned32) \
- V(Signed32OrMinusZero, kSigned32 | kMinusZero) \
- V(Signed32OrMinusZeroOrNaN, kSigned32 | kMinusZero | kNaN) \
- V(Negative32, kNegative31 | kOtherSigned32) \
- V(Unsigned31, kUnsigned30 | kOtherUnsigned31) \
- V(Unsigned32, kUnsigned30 | kOtherUnsigned31 | \
- kOtherUnsigned32) \
- V(Unsigned32OrMinusZero, kUnsigned32 | kMinusZero) \
- V(Unsigned32OrMinusZeroOrNaN, kUnsigned32 | kMinusZero | kNaN) \
- V(Integral32, kSigned32 | kUnsigned32) \
- V(Integral32OrMinusZeroOrNaN, kIntegral32 | kMinusZero | kNaN) \
- V(PlainNumber, kIntegral32 | kOtherNumber) \
- V(OrderedNumber, kPlainNumber | kMinusZero) \
- V(MinusZeroOrNaN, kMinusZero | kNaN) \
- V(Number, kOrderedNumber | kNaN) \
- V(String, kInternalizedString | kOtherString) \
- V(UniqueName, kSymbol | kInternalizedString) \
- V(Name, kSymbol | kString) \
- V(BooleanOrNumber, kBoolean | kNumber) \
- V(BooleanOrNullOrNumber, kBooleanOrNumber | kNull) \
- V(BooleanOrNullOrUndefined, kBoolean | kNull | kUndefined) \
- V(NullOrNumber, kNull | kNumber) \
- V(NullOrUndefined, kNull | kUndefined) \
- V(Undetectable, kNullOrUndefined | kOtherUndetectable) \
- V(NumberOrOddball, kNumber | kNullOrUndefined | kBoolean | kHole) \
- V(NumberOrSimdOrString, kNumber | kSimd | kString) \
- V(NumberOrString, kNumber | kString) \
- V(NumberOrUndefined, kNumber | kUndefined) \
- V(PlainPrimitive, kNumberOrString | kBoolean | kNullOrUndefined) \
- V(Primitive, kSymbol | kSimd | kPlainPrimitive) \
- V(Proxy, kCallableProxy | kOtherProxy) \
- V(Callable, kFunction | kBoundFunction | kOtherCallable | \
- kCallableProxy | kOtherUndetectable) \
- V(DetectableObject, kFunction | kBoundFunction | kOtherCallable | \
- kOtherObject) \
- V(DetectableReceiver, kDetectableObject | kProxy) \
- V(DetectableReceiverOrNull, kDetectableReceiver | kNull) \
- V(Object, kDetectableObject | kOtherUndetectable) \
- V(Receiver, kObject | kProxy) \
- V(ReceiverOrUndefined, kReceiver | kUndefined) \
- V(ReceiverOrNullOrUndefined, kReceiver | kNull | kUndefined) \
- V(StringOrReceiver, kString | kReceiver) \
- V(Unique, kBoolean | kUniqueName | kNull | kUndefined | \
- kReceiver) \
- V(NonStringUniqueOrHole, kBoolean | kHole | kNull | kReceiver | \
- kSymbol | kUndefined) \
- V(Internal, kHole | kExternalPointer | kOtherInternal) \
- V(NonInternal, kPrimitive | kReceiver) \
- V(NonNumber, kUnique | kString | kInternal) \
- V(Any, 0xfffffffeu)
+ V(Signed31, kUnsigned30 | kNegative31) \
+ V(Signed32, kSigned31 | kOtherUnsigned31 | \
+ kOtherSigned32) \
+ V(Signed32OrMinusZero, kSigned32 | kMinusZero) \
+ V(Signed32OrMinusZeroOrNaN, kSigned32 | kMinusZero | kNaN) \
+ V(Negative32, kNegative31 | kOtherSigned32) \
+ V(Unsigned31, kUnsigned30 | kOtherUnsigned31) \
+ V(Unsigned32, kUnsigned30 | kOtherUnsigned31 | \
+ kOtherUnsigned32) \
+ V(Unsigned32OrMinusZero, kUnsigned32 | kMinusZero) \
+ V(Unsigned32OrMinusZeroOrNaN, kUnsigned32 | kMinusZero | kNaN) \
+ V(Integral32, kSigned32 | kUnsigned32) \
+ V(Integral32OrMinusZeroOrNaN, kIntegral32 | kMinusZero | kNaN) \
+ V(PlainNumber, kIntegral32 | kOtherNumber) \
+ V(OrderedNumber, kPlainNumber | kMinusZero) \
+ V(MinusZeroOrNaN, kMinusZero | kNaN) \
+ V(Number, kOrderedNumber | kNaN) \
+ V(String, kInternalizedString | kOtherString) \
+ V(UniqueName, kSymbol | kInternalizedString) \
+ V(Name, kSymbol | kString) \
+ V(InternalizedStringOrNull, kInternalizedString | kNull) \
+ V(BooleanOrNumber, kBoolean | kNumber) \
+ V(BooleanOrNullOrNumber, kBooleanOrNumber | kNull) \
+ V(BooleanOrNullOrUndefined, kBoolean | kNull | kUndefined) \
+ V(Oddball, kBooleanOrNullOrUndefined | kHole) \
+ V(NullOrNumber, kNull | kNumber) \
+ V(NullOrUndefined, kNull | kUndefined) \
+ V(Undetectable, kNullOrUndefined | kOtherUndetectable) \
+ V(NumberOrOddball, kNumber | kNullOrUndefined | kBoolean | \
+ kHole) \
+ V(NumberOrString, kNumber | kString) \
+ V(NumberOrUndefined, kNumber | kUndefined) \
+ V(PlainPrimitive, kNumberOrString | kBoolean | \
+ kNullOrUndefined) \
+ V(Primitive, kSymbol | kPlainPrimitive) \
+ V(OtherUndetectableOrUndefined, kOtherUndetectable | kUndefined) \
+ V(Proxy, kCallableProxy | kOtherProxy) \
+ V(DetectableCallable, kFunction | kBoundFunction | \
+ kOtherCallable | kCallableProxy) \
+ V(Callable, kDetectableCallable | kOtherUndetectable) \
+ V(NonCallable, kOtherObject | kOtherProxy) \
+ V(NonCallableOrNull, kNonCallable | kNull) \
+ V(DetectableObject, kFunction | kBoundFunction | \
+ kOtherCallable | kOtherObject) \
+ V(DetectableReceiver, kDetectableObject | kProxy) \
+ V(DetectableReceiverOrNull, kDetectableReceiver | kNull) \
+ V(Object, kDetectableObject | kOtherUndetectable) \
+ V(Receiver, kObject | kProxy) \
+ V(ReceiverOrUndefined, kReceiver | kUndefined) \
+ V(ReceiverOrNullOrUndefined, kReceiver | kNull | kUndefined) \
+ V(SymbolOrReceiver, kSymbol | kReceiver) \
+ V(StringOrReceiver, kString | kReceiver) \
+ V(Unique, kBoolean | kUniqueName | kNull | \
+ kUndefined | kReceiver) \
+ V(Internal, kHole | kExternalPointer | kOtherInternal) \
+ V(NonInternal, kPrimitive | kReceiver) \
+ V(NonNumber, kUnique | kString | kInternal) \
+ V(Any, 0xfffffffeu)
// clang-format on
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index e11fc98320..7f63ceb803 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -207,6 +207,8 @@ void Verifier::Visitor::Check(Node* node) {
}
CHECK_EQ(1, count_true);
CHECK_EQ(1, count_false);
+ // The condition must be a Boolean.
+ CheckValueInputIs(node, 0, Type::Boolean());
// Type is empty.
CheckNotTyped(node);
break;
@@ -408,6 +410,10 @@ void Verifier::Visitor::Check(Node* node) {
CHECK_EQ(0, effect_count);
CHECK_EQ(0, control_count);
CHECK_EQ(3, value_count);
+ // The condition must be a Boolean.
+ CheckValueInputIs(node, 0, Type::Boolean());
+ // Type can be anything.
+ CheckTypeIs(node, Type::Any());
break;
}
case IrOpcode::kPhi: {
@@ -490,6 +496,7 @@ void Verifier::Visitor::Check(Node* node) {
}
case IrOpcode::kStateValues:
case IrOpcode::kTypedStateValues:
+ case IrOpcode::kArgumentsObjectState:
case IrOpcode::kObjectState:
case IrOpcode::kTypedObjectState:
// TODO(jarin): what are the constraints on these?
@@ -625,6 +632,11 @@ void Verifier::Visitor::Check(Node* node) {
CheckNotTyped(node);
CHECK(StoreGlobalParametersOf(node->op()).feedback().IsValid());
break;
+ case IrOpcode::kJSStoreNamedOwn:
+ // Type is empty.
+ CheckNotTyped(node);
+ CHECK(StoreNamedOwnParametersOf(node->op()).feedback().IsValid());
+ break;
case IrOpcode::kJSStoreDataPropertyInLiteral:
// Type is empty.
CheckNotTyped(node);
@@ -636,9 +648,13 @@ void Verifier::Visitor::Check(Node* node) {
// Type is Boolean.
CheckTypeIs(node, Type::Boolean());
break;
+ case IrOpcode::kJSClassOf:
+ // Type is InternaliedString \/ Null.
+ CheckTypeIs(node, Type::InternalizedStringOrNull());
+ break;
case IrOpcode::kJSTypeOf:
- // Type is String.
- CheckTypeIs(node, Type::String());
+ // Type is InternalizedString.
+ CheckTypeIs(node, Type::InternalizedString());
break;
case IrOpcode::kJSGetSuperConstructor:
// We don't check the input for Type::Function because
@@ -670,13 +686,15 @@ void Verifier::Visitor::Check(Node* node) {
break;
}
- case IrOpcode::kJSCallConstruct:
- case IrOpcode::kJSCallConstructWithSpread:
+ case IrOpcode::kJSConstruct:
+ case IrOpcode::kJSConstructWithSpread:
case IrOpcode::kJSConvertReceiver:
// Type is Receiver.
CheckTypeIs(node, Type::Receiver());
break;
- case IrOpcode::kJSCallFunction:
+ case IrOpcode::kJSCallForwardVarargs:
+ case IrOpcode::kJSCall:
+ case IrOpcode::kJSCallWithSpread:
case IrOpcode::kJSCallRuntime:
// Type can be anything.
CheckTypeIs(node, Type::Any());
@@ -716,6 +734,7 @@ void Verifier::Visitor::Check(Node* node) {
break;
case IrOpcode::kJSStackCheck:
+ case IrOpcode::kJSDebugger:
// Type is empty.
CheckNotTyped(node);
break;
@@ -919,13 +938,22 @@ void Verifier::Visitor::Check(Node* node) {
CheckValueInputIs(node, 0, Type::Number());
CheckTypeIs(node, Type::String());
break;
- case IrOpcode::kReferenceEqual: {
+ case IrOpcode::kStringIndexOf:
+ // (String, String, SignedSmall) -> SignedSmall
+ CheckValueInputIs(node, 0, Type::String());
+ CheckValueInputIs(node, 1, Type::String());
+ CheckValueInputIs(node, 2, Type::SignedSmall());
+ CheckTypeIs(node, Type::SignedSmall());
+ break;
+
+ case IrOpcode::kReferenceEqual:
// (Unique, Any) -> Boolean and
// (Any, Unique) -> Boolean
CheckTypeIs(node, Type::Boolean());
break;
- }
- case IrOpcode::kObjectIsCallable:
+
+ case IrOpcode::kObjectIsDetectableCallable:
+ case IrOpcode::kObjectIsNonCallable:
case IrOpcode::kObjectIsNumber:
case IrOpcode::kObjectIsReceiver:
case IrOpcode::kObjectIsSmi:
@@ -995,6 +1023,8 @@ void Verifier::Visitor::Check(Node* node) {
// CheckTypeIs(node, to));
break;
}
+ case IrOpcode::kChangeTaggedToTaggedSigned:
+ break;
case IrOpcode::kTruncateTaggedToFloat64: {
// NumberOrUndefined /\ Tagged -> Number /\ UntaggedFloat64
// TODO(neis): Activate once ChangeRepresentation works in typer.
@@ -1101,6 +1131,10 @@ void Verifier::Visitor::Check(Node* node) {
CheckValueInputIs(node, 0, Type::Any());
CheckTypeIs(node, Type::Number());
break;
+ case IrOpcode::kCheckReceiver:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckTypeIs(node, Type::Receiver());
+ break;
case IrOpcode::kCheckSmi:
CheckValueInputIs(node, 0, Type::Any());
break;
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index f54ddbf492..168178e49e 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -6,11 +6,12 @@
#include <memory>
-#include "src/isolate-inl.h"
-
+#include "src/assembler-inl.h"
#include "src/base/platform/elapsed-timer.h"
#include "src/base/platform/platform.h"
-
+#include "src/builtins/builtins.h"
+#include "src/code-factory.h"
+#include "src/code-stubs.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/compiler-source-position-table.h"
@@ -27,12 +28,9 @@
#include "src/compiler/pipeline.h"
#include "src/compiler/simd-scalar-lowering.h"
#include "src/compiler/zone-stats.h"
-
-#include "src/code-factory.h"
-#include "src/code-stubs.h"
#include "src/factory.h"
+#include "src/isolate-inl.h"
#include "src/log-inl.h"
-
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
@@ -67,10 +65,12 @@ void MergeControlToEnd(JSGraph* jsgraph, Node* node) {
}
}
-Node* BuildCallToRuntime(Runtime::FunctionId f, JSGraph* jsgraph,
- Handle<Context> context, Node** parameters,
- int parameter_count, Node** effect_ptr,
- Node* control) {
+// Only call this function for code which is not reused across instantiations,
+// as we do not patch the embedded context.
+Node* BuildCallToRuntimeWithContext(Runtime::FunctionId f, JSGraph* jsgraph,
+ Node* context, Node** parameters,
+ int parameter_count, Node** effect_ptr,
+ Node* control) {
const Runtime::Function* fun = Runtime::FunctionForId(f);
CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
jsgraph->zone(), f, fun->nargs, Operator::kNoProperties,
@@ -78,7 +78,7 @@ Node* BuildCallToRuntime(Runtime::FunctionId f, JSGraph* jsgraph,
// CEntryStubConstant nodes have to be created and cached in the main
// thread. At the moment this is only done for CEntryStubConstant(1).
DCHECK_EQ(1, fun->result_size);
- // At the moment we only allow 2 parameters. If more parameters are needed,
+ // At the moment we only allow 3 parameters. If more parameters are needed,
// increase this constant accordingly.
static const int kMaxParams = 3;
DCHECK_GE(kMaxParams, parameter_count);
@@ -91,7 +91,7 @@ Node* BuildCallToRuntime(Runtime::FunctionId f, JSGraph* jsgraph,
inputs[count++] = jsgraph->ExternalConstant(
ExternalReference(f, jsgraph->isolate())); // ref
inputs[count++] = jsgraph->Int32Constant(fun->nargs); // arity
- inputs[count++] = jsgraph->HeapConstant(context); // context
+ inputs[count++] = context; // context
inputs[count++] = *effect_ptr;
inputs[count++] = control;
@@ -101,6 +101,14 @@ Node* BuildCallToRuntime(Runtime::FunctionId f, JSGraph* jsgraph,
return node;
}
+Node* BuildCallToRuntime(Runtime::FunctionId f, JSGraph* jsgraph,
+ Node** parameters, int parameter_count,
+ Node** effect_ptr, Node* control) {
+ return BuildCallToRuntimeWithContext(f, jsgraph, jsgraph->NoContextConstant(),
+ parameters, parameter_count, effect_ptr,
+ control);
+}
+
} // namespace
// TODO(eholk): Support trap handlers on other platforms.
@@ -170,30 +178,30 @@ class WasmTrapHelper : public ZoneObject {
return TrapIfEq64(reason, node, 0, position);
}
- Runtime::FunctionId GetFunctionIdForTrap(wasm::TrapReason reason) {
+ Builtins::Name GetBuiltinIdForTrap(wasm::TrapReason reason) {
if (builder_->module_ && !builder_->module_->instance->context.is_null()) {
switch (reason) {
#define TRAPREASON_TO_MESSAGE(name) \
case wasm::k##name: \
- return Runtime::kThrowWasm##name;
+ return Builtins::kThrowWasm##name;
FOREACH_WASM_TRAPREASON(TRAPREASON_TO_MESSAGE)
#undef TRAPREASON_TO_MESSAGE
default:
UNREACHABLE();
- return Runtime::kNumFunctions;
+ return Builtins::builtin_count;
}
} else {
// We use Runtime::kNumFunctions as a marker to tell the code generator
// to generate a call to a testing c-function instead of a runtime
// function. This code should only be called from a cctest.
- return Runtime::kNumFunctions;
+ return Builtins::builtin_count;
}
}
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || \
V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || \
V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_S390 || \
- V8_TARGET_ARCH_S390X
+ V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_X87
#define WASM_TRAP_IF_SUPPORTED
#endif
@@ -202,7 +210,7 @@ class WasmTrapHelper : public ZoneObject {
wasm::WasmCodePosition position) {
#ifdef WASM_TRAP_IF_SUPPORTED
if (FLAG_wasm_trap_if) {
- int32_t trap_id = GetFunctionIdForTrap(reason);
+ int32_t trap_id = GetBuiltinIdForTrap(reason);
Node* node = graph()->NewNode(common()->TrapIf(trap_id), cond,
builder_->Effect(), builder_->Control());
*builder_->control_ = node;
@@ -218,7 +226,7 @@ class WasmTrapHelper : public ZoneObject {
wasm::WasmCodePosition position) {
#ifdef WASM_TRAP_IF_SUPPORTED
if (FLAG_wasm_trap_if) {
- int32_t trap_id = GetFunctionIdForTrap(reason);
+ int32_t trap_id = GetBuiltinIdForTrap(reason);
Node* node = graph()->NewNode(common()->TrapUnless(trap_id), cond,
builder_->Effect(), builder_->Control());
@@ -327,8 +335,7 @@ class WasmTrapHelper : public ZoneObject {
if (module && !module->instance->context.is_null()) {
Node* parameters[] = {trap_reason_smi, // message id
trap_position_smi}; // byte position
- BuildCallToRuntime(Runtime::kThrowWasmError, jsgraph(),
- module->instance->context, parameters,
+ BuildCallToRuntime(Runtime::kThrowWasmError, jsgraph(), parameters,
arraysize(parameters), effect_ptr, *control_ptr);
}
if (false) {
@@ -472,40 +479,44 @@ Node* WasmGraphBuilder::Int64Constant(int64_t value) {
void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position,
Node** effect, Node** control) {
if (FLAG_wasm_no_stack_checks) return;
- if (effect == nullptr) {
- effect = effect_;
- }
- if (control == nullptr) {
- control = control_;
- }
// We do not generate stack checks for cctests.
- if (module_ && !module_->instance->context.is_null()) {
- Node* limit = graph()->NewNode(
- jsgraph()->machine()->Load(MachineType::Pointer()),
- jsgraph()->ExternalConstant(
- ExternalReference::address_of_stack_limit(jsgraph()->isolate())),
- jsgraph()->IntPtrConstant(0), *effect, *control);
- Node* pointer = graph()->NewNode(jsgraph()->machine()->LoadStackPointer());
+ if (!module_ || module_->instance->context.is_null()) return;
+ if (effect == nullptr) effect = effect_;
+ if (control == nullptr) control = control_;
- Node* check =
- graph()->NewNode(jsgraph()->machine()->UintLessThan(), limit, pointer);
+ Node* limit = graph()->NewNode(
+ jsgraph()->machine()->Load(MachineType::Pointer()),
+ jsgraph()->ExternalConstant(
+ ExternalReference::address_of_stack_limit(jsgraph()->isolate())),
+ jsgraph()->IntPtrConstant(0), *effect, *control);
+ Node* pointer = graph()->NewNode(jsgraph()->machine()->LoadStackPointer());
- Diamond stack_check(graph(), jsgraph()->common(), check, BranchHint::kTrue);
- stack_check.Chain(*control);
- Node* effect_true = *effect;
+ Node* check =
+ graph()->NewNode(jsgraph()->machine()->UintLessThan(), limit, pointer);
- // Generate a call to the runtime if there is a stack check failure.
- Node* call = BuildCallToRuntime(Runtime::kStackGuard, jsgraph(),
- module_->instance->context, nullptr, 0,
- effect, stack_check.if_false);
- SetSourcePosition(call, position);
+ Diamond stack_check(graph(), jsgraph()->common(), check, BranchHint::kTrue);
+ stack_check.Chain(*control);
+ Node* effect_true = *effect;
- Node* ephi = graph()->NewNode(jsgraph()->common()->EffectPhi(2),
- effect_true, call, stack_check.merge);
+ Handle<Code> code = jsgraph()->isolate()->builtins()->WasmStackGuard();
+ CallInterfaceDescriptor idesc =
+ WasmRuntimeCallDescriptor(jsgraph()->isolate());
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ jsgraph()->isolate(), jsgraph()->zone(), idesc, 0,
+ CallDescriptor::kNoFlags, Operator::kNoProperties);
+ Node* stub_code = jsgraph()->HeapConstant(code);
- *control = stack_check.merge;
- *effect = ephi;
- }
+ Node* context = jsgraph()->NoContextConstant();
+ Node* call = graph()->NewNode(jsgraph()->common()->Call(desc), stub_code,
+ context, *effect, stack_check.if_false);
+
+ SetSourcePosition(call, position);
+
+ Node* ephi = graph()->NewNode(jsgraph()->common()->EffectPhi(2), effect_true,
+ call, stack_check.merge);
+
+ *control = stack_check.merge;
+ *effect = ephi;
}
Node* WasmGraphBuilder::Binop(wasm::WasmOpcode opcode, Node* left, Node* right,
@@ -1781,35 +1792,23 @@ Node* WasmGraphBuilder::GrowMemory(Node* input) {
Diamond check_input_range(
graph(), jsgraph()->common(),
graph()->NewNode(jsgraph()->machine()->Uint32LessThanOrEqual(), input,
- jsgraph()->Uint32Constant(wasm::kV8MaxWasmMemoryPages)),
+ jsgraph()->Uint32Constant(FLAG_wasm_max_mem_pages)),
BranchHint::kTrue);
check_input_range.Chain(*control_);
- Runtime::FunctionId function_id = Runtime::kWasmGrowMemory;
- const Runtime::Function* function = Runtime::FunctionForId(function_id);
- CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
- jsgraph()->zone(), function_id, function->nargs, Operator::kNoThrow,
- CallDescriptor::kNoFlags);
- wasm::ModuleEnv* module = module_;
- input = BuildChangeUint32ToSmi(input);
- Node* inputs[] = {
- jsgraph()->CEntryStubConstant(function->result_size), input, // C entry
- jsgraph()->ExternalConstant(
- ExternalReference(function_id, jsgraph()->isolate())), // ref
- jsgraph()->Int32Constant(function->nargs), // arity
- jsgraph()->HeapConstant(module->instance->context), // context
- *effect_,
- check_input_range.if_true};
- Node* call = graph()->NewNode(jsgraph()->common()->Call(desc),
- static_cast<int>(arraysize(inputs)), inputs);
+ Node* parameters[] = {BuildChangeUint32ToSmi(input)};
+ Node* old_effect = *effect_;
+ Node* call = BuildCallToRuntime(Runtime::kWasmGrowMemory, jsgraph(),
+ parameters, arraysize(parameters), effect_,
+ check_input_range.if_true);
Node* result = BuildChangeSmiToInt32(call);
result = check_input_range.Phi(MachineRepresentation::kWord32, result,
jsgraph()->Int32Constant(-1));
- *effect_ = graph()->NewNode(jsgraph()->common()->EffectPhi(2), call, *effect_,
- check_input_range.merge);
+ *effect_ = graph()->NewNode(jsgraph()->common()->EffectPhi(2), call,
+ old_effect, check_input_range.merge);
*control_ = check_input_range.merge;
return result;
}
@@ -1832,8 +1831,7 @@ Node* WasmGraphBuilder::Throw(Node* input) {
graph()->NewNode(machine->Word32And(), input, Int32Constant(0xFFFFu)));
Node* parameters[] = {lower, upper}; // thrown value
- return BuildCallToRuntime(Runtime::kWasmThrow, jsgraph(),
- module_->instance->context, parameters,
+ return BuildCallToRuntime(Runtime::kWasmThrow, jsgraph(), parameters,
arraysize(parameters), effect_, *control_);
}
@@ -1843,8 +1841,7 @@ Node* WasmGraphBuilder::Catch(Node* input, wasm::WasmCodePosition position) {
Node* parameters[] = {input}; // caught value
Node* value =
BuildCallToRuntime(Runtime::kWasmGetCaughtExceptionValue, jsgraph(),
- module_->instance->context, parameters,
- arraysize(parameters), effect_, *control_);
+ parameters, arraysize(parameters), effect_, *control_);
Node* is_smi;
Node* is_heap;
@@ -2765,12 +2762,18 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code,
*control_ = start;
*effect_ = start;
+ // Create the context parameter
+ Node* context = graph()->NewNode(
+ jsgraph()->common()->Parameter(
+ Linkage::GetJSCallContextParamIndex(wasm_count + 1), "%context"),
+ graph()->start());
+
if (!HasJSCompatibleSignature(sig_)) {
- // Throw a TypeError. The native context is good enough here because we
- // only throw a TypeError.
- BuildCallToRuntime(Runtime::kWasmThrowTypeError, jsgraph(),
- jsgraph()->isolate()->native_context(), nullptr, 0,
- effect_, *control_);
+ // Throw a TypeError. Use the context of the calling javascript function
+ // (passed as a parameter), such that the generated code is context
+ // independent.
+ BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, jsgraph(),
+ context, nullptr, 0, effect_, *control_);
// Add a dummy call to the wasm function so that the generated wrapper
// contains a reference to the wrapped wasm function. Without this reference
@@ -2789,12 +2792,6 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code,
return;
}
- // Create the context parameter
- Node* context = graph()->NewNode(
- jsgraph()->common()->Parameter(
- Linkage::GetJSCallContextParamIndex(wasm_count + 1), "%context"),
- graph()->start());
-
int pos = 0;
args[pos++] = HeapConstant(wasm_code);
@@ -2845,11 +2842,13 @@ void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSReceiver> target,
*control_ = start;
if (!HasJSCompatibleSignature(sig_)) {
- // Throw a TypeError. The native context is good enough here because we
- // only throw a TypeError.
- Return(BuildCallToRuntime(Runtime::kWasmThrowTypeError, jsgraph(),
- jsgraph()->isolate()->native_context(), nullptr,
- 0, effect_, *control_));
+ // Throw a TypeError. Embedding the context is ok here, since this code is
+ // regenerated at instantiation time.
+ Node* context =
+ jsgraph()->HeapConstant(jsgraph()->isolate()->native_context());
+ Return(BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError,
+ jsgraph(), context, nullptr, 0,
+ effect_, *control_));
return;
}
@@ -2947,7 +2946,8 @@ void WasmGraphBuilder::BuildWasmInterpreterEntry(
// Compute size for the argument buffer.
int args_size_bytes = 0;
for (int i = 0; i < wasm_count; i++) {
- args_size_bytes += 1 << ElementSizeLog2Of(sig->GetParam(i));
+ args_size_bytes +=
+ RoundUpToMultipleOfPowOf2(1 << ElementSizeLog2Of(sig->GetParam(i)), 8);
}
// The return value is also passed via this buffer:
@@ -2958,8 +2958,10 @@ void WasmGraphBuilder::BuildWasmInterpreterEntry(
sig->return_count() == 0 ? 0 : 1 << ElementSizeLog2Of(sig->GetReturn(0));
// Get a stack slot for the arguments.
- Node* arg_buffer = graph()->NewNode(jsgraph()->machine()->StackSlot(
- std::max(args_size_bytes, return_size_bytes)));
+ Node* arg_buffer = args_size_bytes == 0 && return_size_bytes == 0
+ ? jsgraph()->IntPtrConstant(0)
+ : graph()->NewNode(jsgraph()->machine()->StackSlot(
+ std::max(args_size_bytes, return_size_bytes)));
// Now store all our arguments to the buffer.
int param_index = 0;
@@ -2968,25 +2970,33 @@ void WasmGraphBuilder::BuildWasmInterpreterEntry(
Node* param = Param(param_index++);
bool is_i64_as_two_params =
jsgraph()->machine()->Is32() && sig->GetParam(i) == wasm::kWasmI64;
- MachineRepresentation param_rep =
- is_i64_as_two_params ? wasm::kWasmI32 : sig->GetParam(i);
- StoreRepresentation store_rep(param_rep, WriteBarrierKind::kNoWriteBarrier);
- *effect_ =
- graph()->NewNode(jsgraph()->machine()->Store(store_rep), arg_buffer,
- Int32Constant(offset), param, *effect_, *control_);
- offset += 1 << ElementSizeLog2Of(param_rep);
- // TODO(clemensh): Respect endianess here. Might need to swap upper and
- // lower word.
+
if (is_i64_as_two_params) {
- // Also store the upper half.
- param = Param(param_index++);
StoreRepresentation store_rep(wasm::kWasmI32,
WriteBarrierKind::kNoWriteBarrier);
*effect_ =
graph()->NewNode(jsgraph()->machine()->Store(store_rep), arg_buffer,
+ Int32Constant(offset + kInt64LowerHalfMemoryOffset),
+ param, *effect_, *control_);
+
+ param = Param(param_index++);
+ *effect_ =
+ graph()->NewNode(jsgraph()->machine()->Store(store_rep), arg_buffer,
+ Int32Constant(offset + kInt64UpperHalfMemoryOffset),
+ param, *effect_, *control_);
+ offset += 8;
+
+ } else {
+ MachineRepresentation param_rep = sig->GetParam(i);
+ StoreRepresentation store_rep(param_rep,
+ WriteBarrierKind::kNoWriteBarrier);
+ *effect_ =
+ graph()->NewNode(jsgraph()->machine()->Store(store_rep), arg_buffer,
Int32Constant(offset), param, *effect_, *control_);
- offset += 1 << ElementSizeLog2Of(wasm::kWasmI32);
+ offset += RoundUpToMultipleOfPowOf2(1 << ElementSizeLog2Of(param_rep), 8);
}
+
+ DCHECK(IsAligned(offset, 8));
}
DCHECK_EQ(param_count, param_index);
DCHECK_EQ(args_size_bytes, offset);
@@ -2999,8 +3009,7 @@ void WasmGraphBuilder::BuildWasmInterpreterEntry(
jsgraph()->SmiConstant(function_index), // function index
arg_buffer, // argument buffer
};
- BuildCallToRuntime(Runtime::kWasmRunInterpreter, jsgraph(),
- jsgraph()->isolate()->native_context(), parameters,
+ BuildCallToRuntime(Runtime::kWasmRunInterpreter, jsgraph(), parameters,
arraysize(parameters), effect_, *control_);
// Read back the return value.
@@ -3353,10 +3362,10 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
case wasm::kExprF32x4Splat:
return graph()->NewNode(jsgraph()->machine()->CreateFloat32x4(),
inputs[0], inputs[0], inputs[0], inputs[0]);
- case wasm::kExprF32x4FromInt32x4:
+ case wasm::kExprF32x4SConvertI32x4:
return graph()->NewNode(jsgraph()->machine()->Float32x4FromInt32x4(),
inputs[0]);
- case wasm::kExprF32x4FromUint32x4:
+ case wasm::kExprF32x4UConvertI32x4:
return graph()->NewNode(jsgraph()->machine()->Float32x4FromUint32x4(),
inputs[0]);
case wasm::kExprF32x4Abs:
@@ -3378,27 +3387,231 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
case wasm::kExprI32x4Splat:
return graph()->NewNode(jsgraph()->machine()->CreateInt32x4(), inputs[0],
inputs[0], inputs[0], inputs[0]);
- case wasm::kExprI32x4FromFloat32x4:
+ case wasm::kExprI32x4SConvertF32x4:
return graph()->NewNode(jsgraph()->machine()->Int32x4FromFloat32x4(),
inputs[0]);
- case wasm::kExprUi32x4FromFloat32x4:
+ case wasm::kExprI32x4UConvertF32x4:
return graph()->NewNode(jsgraph()->machine()->Uint32x4FromFloat32x4(),
inputs[0]);
+ case wasm::kExprI32x4Neg:
+ return graph()->NewNode(jsgraph()->machine()->Int32x4Neg(), inputs[0]);
case wasm::kExprI32x4Add:
return graph()->NewNode(jsgraph()->machine()->Int32x4Add(), inputs[0],
inputs[1]);
case wasm::kExprI32x4Sub:
return graph()->NewNode(jsgraph()->machine()->Int32x4Sub(), inputs[0],
inputs[1]);
+ case wasm::kExprI32x4Mul:
+ return graph()->NewNode(jsgraph()->machine()->Int32x4Mul(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI32x4MinS:
+ return graph()->NewNode(jsgraph()->machine()->Int32x4Min(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI32x4MaxS:
+ return graph()->NewNode(jsgraph()->machine()->Int32x4Max(), inputs[0],
+ inputs[1]);
case wasm::kExprI32x4Eq:
return graph()->NewNode(jsgraph()->machine()->Int32x4Equal(), inputs[0],
inputs[1]);
case wasm::kExprI32x4Ne:
return graph()->NewNode(jsgraph()->machine()->Int32x4NotEqual(),
inputs[0], inputs[1]);
+ case wasm::kExprI32x4LtS:
+ return graph()->NewNode(jsgraph()->machine()->Int32x4GreaterThan(),
+ inputs[1], inputs[0]);
+ case wasm::kExprI32x4LeS:
+ return graph()->NewNode(jsgraph()->machine()->Int32x4GreaterThanOrEqual(),
+ inputs[1], inputs[0]);
+ case wasm::kExprI32x4GtS:
+ return graph()->NewNode(jsgraph()->machine()->Int32x4GreaterThan(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI32x4GeS:
+ return graph()->NewNode(jsgraph()->machine()->Int32x4GreaterThanOrEqual(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI32x4MinU:
+ return graph()->NewNode(jsgraph()->machine()->Uint32x4Min(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI32x4MaxU:
+ return graph()->NewNode(jsgraph()->machine()->Uint32x4Max(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI32x4LtU:
+ return graph()->NewNode(jsgraph()->machine()->Uint32x4GreaterThan(),
+ inputs[1], inputs[0]);
+ case wasm::kExprI32x4LeU:
+ return graph()->NewNode(
+ jsgraph()->machine()->Uint32x4GreaterThanOrEqual(), inputs[1],
+ inputs[0]);
+ case wasm::kExprI32x4GtU:
+ return graph()->NewNode(jsgraph()->machine()->Uint32x4GreaterThan(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI32x4GeU:
+ return graph()->NewNode(
+ jsgraph()->machine()->Uint32x4GreaterThanOrEqual(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI16x8Splat:
+ return graph()->NewNode(jsgraph()->machine()->CreateInt16x8(), inputs[0],
+ inputs[0], inputs[0], inputs[0], inputs[0],
+ inputs[0], inputs[0], inputs[0]);
+ case wasm::kExprI16x8Neg:
+ return graph()->NewNode(jsgraph()->machine()->Int16x8Neg(), inputs[0]);
+ case wasm::kExprI16x8Add:
+ return graph()->NewNode(jsgraph()->machine()->Int16x8Add(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI16x8AddSaturateS:
+ return graph()->NewNode(jsgraph()->machine()->Int16x8AddSaturate(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI16x8Sub:
+ return graph()->NewNode(jsgraph()->machine()->Int16x8Sub(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI16x8SubSaturateS:
+ return graph()->NewNode(jsgraph()->machine()->Int16x8SubSaturate(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI16x8Mul:
+ return graph()->NewNode(jsgraph()->machine()->Int16x8Mul(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI16x8MinS:
+ return graph()->NewNode(jsgraph()->machine()->Int16x8Min(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI16x8MaxS:
+ return graph()->NewNode(jsgraph()->machine()->Int16x8Max(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI16x8Eq:
+ return graph()->NewNode(jsgraph()->machine()->Int16x8Equal(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI16x8Ne:
+ return graph()->NewNode(jsgraph()->machine()->Int16x8NotEqual(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI16x8LtS:
+ return graph()->NewNode(jsgraph()->machine()->Int16x8GreaterThan(),
+ inputs[1], inputs[0]);
+ case wasm::kExprI16x8LeS:
+ return graph()->NewNode(jsgraph()->machine()->Int16x8GreaterThanOrEqual(),
+ inputs[1], inputs[0]);
+ case wasm::kExprI16x8GtS:
+ return graph()->NewNode(jsgraph()->machine()->Int16x8GreaterThan(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI16x8GeS:
+ return graph()->NewNode(jsgraph()->machine()->Int16x8GreaterThanOrEqual(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI16x8AddSaturateU:
+ return graph()->NewNode(jsgraph()->machine()->Uint16x8AddSaturate(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI16x8SubSaturateU:
+ return graph()->NewNode(jsgraph()->machine()->Uint16x8SubSaturate(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI16x8MinU:
+ return graph()->NewNode(jsgraph()->machine()->Uint16x8Min(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI16x8MaxU:
+ return graph()->NewNode(jsgraph()->machine()->Uint16x8Max(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI16x8LtU:
+ return graph()->NewNode(jsgraph()->machine()->Uint16x8GreaterThan(),
+ inputs[1], inputs[0]);
+ case wasm::kExprI16x8LeU:
+ return graph()->NewNode(
+ jsgraph()->machine()->Uint16x8GreaterThanOrEqual(), inputs[1],
+ inputs[0]);
+ case wasm::kExprI16x8GtU:
+ return graph()->NewNode(jsgraph()->machine()->Uint16x8GreaterThan(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI16x8GeU:
+ return graph()->NewNode(
+ jsgraph()->machine()->Uint16x8GreaterThanOrEqual(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI8x16Splat:
+ return graph()->NewNode(jsgraph()->machine()->CreateInt8x16(), inputs[0],
+ inputs[0], inputs[0], inputs[0], inputs[0],
+ inputs[0], inputs[0], inputs[0], inputs[0],
+ inputs[0], inputs[0], inputs[0], inputs[0],
+ inputs[0], inputs[0], inputs[0]);
+ case wasm::kExprI8x16Neg:
+ return graph()->NewNode(jsgraph()->machine()->Int8x16Neg(), inputs[0]);
+ case wasm::kExprI8x16Add:
+ return graph()->NewNode(jsgraph()->machine()->Int8x16Add(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI8x16AddSaturateS:
+ return graph()->NewNode(jsgraph()->machine()->Int8x16AddSaturate(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI8x16Sub:
+ return graph()->NewNode(jsgraph()->machine()->Int8x16Sub(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI8x16SubSaturateS:
+ return graph()->NewNode(jsgraph()->machine()->Int8x16SubSaturate(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI8x16Mul:
+ return graph()->NewNode(jsgraph()->machine()->Int8x16Mul(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI8x16MinS:
+ return graph()->NewNode(jsgraph()->machine()->Int8x16Min(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI8x16MaxS:
+ return graph()->NewNode(jsgraph()->machine()->Int8x16Max(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI8x16Eq:
+ return graph()->NewNode(jsgraph()->machine()->Int8x16Equal(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI8x16Ne:
+ return graph()->NewNode(jsgraph()->machine()->Int8x16NotEqual(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI8x16LtS:
+ return graph()->NewNode(jsgraph()->machine()->Int8x16GreaterThan(),
+ inputs[1], inputs[0]);
+ case wasm::kExprI8x16LeS:
+ return graph()->NewNode(jsgraph()->machine()->Int8x16GreaterThanOrEqual(),
+ inputs[1], inputs[0]);
+ case wasm::kExprI8x16GtS:
+ return graph()->NewNode(jsgraph()->machine()->Int8x16GreaterThan(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI8x16GeS:
+ return graph()->NewNode(jsgraph()->machine()->Int8x16GreaterThanOrEqual(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI8x16AddSaturateU:
+ return graph()->NewNode(jsgraph()->machine()->Uint8x16AddSaturate(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI8x16SubSaturateU:
+ return graph()->NewNode(jsgraph()->machine()->Uint8x16SubSaturate(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI8x16MinU:
+ return graph()->NewNode(jsgraph()->machine()->Uint8x16Min(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI8x16MaxU:
+ return graph()->NewNode(jsgraph()->machine()->Uint8x16Max(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI8x16LtU:
+ return graph()->NewNode(jsgraph()->machine()->Uint8x16GreaterThan(),
+ inputs[1], inputs[0]);
+ case wasm::kExprI8x16LeU:
+ return graph()->NewNode(
+ jsgraph()->machine()->Uint8x16GreaterThanOrEqual(), inputs[1],
+ inputs[0]);
+ case wasm::kExprI8x16GtU:
+ return graph()->NewNode(jsgraph()->machine()->Uint8x16GreaterThan(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI8x16GeU:
+ return graph()->NewNode(
+ jsgraph()->machine()->Uint8x16GreaterThanOrEqual(), inputs[0],
+ inputs[1]);
case wasm::kExprS32x4Select:
return graph()->NewNode(jsgraph()->machine()->Simd32x4Select(), inputs[0],
inputs[1], inputs[2]);
+ case wasm::kExprS16x8Select:
+ return graph()->NewNode(jsgraph()->machine()->Simd16x8Select(), inputs[0],
+ inputs[1], inputs[2]);
+ case wasm::kExprS8x16Select:
+ return graph()->NewNode(jsgraph()->machine()->Simd8x16Select(), inputs[0],
+ inputs[1], inputs[2]);
+ case wasm::kExprS128And:
+ return graph()->NewNode(jsgraph()->machine()->Simd128And(), inputs[0],
+ inputs[1]);
+ case wasm::kExprS128Or:
+ return graph()->NewNode(jsgraph()->machine()->Simd128Or(), inputs[0],
+ inputs[1]);
+ case wasm::kExprS128Xor:
+ return graph()->NewNode(jsgraph()->machine()->Simd128Xor(), inputs[0],
+ inputs[1]);
+ case wasm::kExprS128Not:
+ return graph()->NewNode(jsgraph()->machine()->Simd128Not(), inputs[0]);
default:
return graph()->NewNode(UnsupportedOpcode(opcode), nullptr);
}
@@ -3408,23 +3621,89 @@ Node* WasmGraphBuilder::SimdLaneOp(wasm::WasmOpcode opcode, uint8_t lane,
const NodeVector& inputs) {
has_simd_ = true;
switch (opcode) {
+ case wasm::kExprF32x4ExtractLane:
+ return graph()->NewNode(jsgraph()->machine()->Float32x4ExtractLane(lane),
+ inputs[0]);
+ case wasm::kExprF32x4ReplaceLane:
+ return graph()->NewNode(jsgraph()->machine()->Float32x4ReplaceLane(lane),
+ inputs[0], inputs[1]);
case wasm::kExprI32x4ExtractLane:
- return graph()->NewNode(jsgraph()->common()->Int32x4ExtractLane(lane),
+ return graph()->NewNode(jsgraph()->machine()->Int32x4ExtractLane(lane),
inputs[0]);
case wasm::kExprI32x4ReplaceLane:
- return graph()->NewNode(jsgraph()->common()->Int32x4ReplaceLane(lane),
+ return graph()->NewNode(jsgraph()->machine()->Int32x4ReplaceLane(lane),
inputs[0], inputs[1]);
- case wasm::kExprF32x4ExtractLane:
- return graph()->NewNode(jsgraph()->common()->Float32x4ExtractLane(lane),
+ case wasm::kExprI16x8ExtractLane:
+ return graph()->NewNode(jsgraph()->machine()->Int16x8ExtractLane(lane),
inputs[0]);
- case wasm::kExprF32x4ReplaceLane:
- return graph()->NewNode(jsgraph()->common()->Float32x4ReplaceLane(lane),
+ case wasm::kExprI16x8ReplaceLane:
+ return graph()->NewNode(jsgraph()->machine()->Int16x8ReplaceLane(lane),
+ inputs[0], inputs[1]);
+ case wasm::kExprI8x16ExtractLane:
+ return graph()->NewNode(jsgraph()->machine()->Int8x16ExtractLane(lane),
+ inputs[0]);
+ case wasm::kExprI8x16ReplaceLane:
+ return graph()->NewNode(jsgraph()->machine()->Int8x16ReplaceLane(lane),
inputs[0], inputs[1]);
default:
return graph()->NewNode(UnsupportedOpcode(opcode), nullptr);
}
}
+Node* WasmGraphBuilder::SimdShiftOp(wasm::WasmOpcode opcode, uint8_t shift,
+ const NodeVector& inputs) {
+ has_simd_ = true;
+ switch (opcode) {
+ case wasm::kExprI32x4Shl:
+ return graph()->NewNode(
+ jsgraph()->machine()->Int32x4ShiftLeftByScalar(shift), inputs[0]);
+ case wasm::kExprI32x4ShrS:
+ return graph()->NewNode(
+ jsgraph()->machine()->Int32x4ShiftRightByScalar(shift), inputs[0]);
+ case wasm::kExprI32x4ShrU:
+ return graph()->NewNode(
+ jsgraph()->machine()->Uint32x4ShiftRightByScalar(shift), inputs[0]);
+ case wasm::kExprI16x8Shl:
+ return graph()->NewNode(
+ jsgraph()->machine()->Int16x8ShiftLeftByScalar(shift), inputs[0]);
+ case wasm::kExprI16x8ShrS:
+ return graph()->NewNode(
+ jsgraph()->machine()->Int16x8ShiftRightByScalar(shift), inputs[0]);
+ case wasm::kExprI16x8ShrU:
+ return graph()->NewNode(
+ jsgraph()->machine()->Uint16x8ShiftRightByScalar(shift), inputs[0]);
+ case wasm::kExprI8x16Shl:
+ return graph()->NewNode(
+ jsgraph()->machine()->Int8x16ShiftLeftByScalar(shift), inputs[0]);
+ case wasm::kExprI8x16ShrS:
+ return graph()->NewNode(
+ jsgraph()->machine()->Int8x16ShiftRightByScalar(shift), inputs[0]);
+ case wasm::kExprI8x16ShrU:
+ return graph()->NewNode(
+ jsgraph()->machine()->Uint8x16ShiftRightByScalar(shift), inputs[0]);
+ default:
+ return graph()->NewNode(UnsupportedOpcode(opcode), nullptr);
+ }
+}
+
+Node* WasmGraphBuilder::SimdSwizzleOp(wasm::WasmOpcode opcode, uint32_t swizzle,
+ const NodeVector& inputs) {
+ has_simd_ = true;
+ switch (opcode) {
+ case wasm::kExprS32x4Swizzle:
+ return graph()->NewNode(jsgraph()->machine()->Simd32x4Swizzle(swizzle),
+ inputs[0]);
+ case wasm::kExprS16x8Swizzle:
+ return graph()->NewNode(jsgraph()->machine()->Simd16x8Swizzle(swizzle),
+ inputs[0]);
+ case wasm::kExprS8x16Swizzle:
+ return graph()->NewNode(jsgraph()->machine()->Simd8x16Swizzle(swizzle),
+ inputs[0]);
+ default:
+ return graph()->NewNode(UnsupportedOpcode(opcode), nullptr);
+ }
+}
+
static void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
Isolate* isolate, Handle<Code> code,
const char* message, uint32_t index,
@@ -3618,7 +3897,10 @@ Handle<Code> CompileWasmInterpreterEntry(Isolate* isolate, uint32_t func_index,
Zone zone(isolate->allocator(), ZONE_NAME);
Graph graph(&zone);
CommonOperatorBuilder common(&zone);
- MachineOperatorBuilder machine(&zone);
+ MachineOperatorBuilder machine(
+ &zone, MachineType::PointerRepresentation(),
+ InstructionSelector::SupportedMachineOperatorFlags(),
+ InstructionSelector::AlignmentRequirements());
JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine);
Node* control = nullptr;
@@ -3688,9 +3970,9 @@ SourcePositionTable* WasmCompilationUnit::BuildGraphForWasmFunction(
MachineOperatorBuilder* machine = jsgraph_->machine();
SourcePositionTable* source_position_table =
new (jsgraph_->zone()) SourcePositionTable(graph);
- WasmGraphBuilder builder(module_env_, jsgraph_->zone(), jsgraph_,
+ WasmGraphBuilder builder(&module_env_->module_env, jsgraph_->zone(), jsgraph_,
function_->sig, source_position_table);
- const byte* module_start = module_env_->module_bytes.start();
+ const byte* module_start = module_env_->wire_bytes.start();
wasm::FunctionBody body = {function_->sig, module_start,
module_start + function_->code_start_offset,
module_start + function_->code_end_offset};
@@ -3719,12 +4001,13 @@ SourcePositionTable* WasmCompilationUnit::BuildGraphForWasmFunction(
if (index >= FLAG_trace_wasm_ast_start && index < FLAG_trace_wasm_ast_end) {
OFStream os(stdout);
- PrintRawWasmCode(isolate_->allocator(), body, module_env_->module);
+ PrintRawWasmCode(isolate_->allocator(), body,
+ module_env_->module_env.module);
}
if (index >= FLAG_trace_wasm_text_start && index < FLAG_trace_wasm_text_end) {
OFStream os(stdout);
- PrintWasmText(module_env_->module, *module_env_, function_->func_index, os,
- nullptr);
+ PrintWasmText(module_env_->module_env.module, module_env_->wire_bytes,
+ function_->func_index, os, nullptr);
}
if (FLAG_trace_wasm_decode_time) {
*decode_ms = decode_timer.Elapsed().InMillisecondsF();
@@ -3732,6 +4015,13 @@ SourcePositionTable* WasmCompilationUnit::BuildGraphForWasmFunction(
return source_position_table;
}
+char* WasmCompilationUnit::GetTaggedFunctionName(
+ const wasm::WasmFunction* function) {
+ snprintf(function_name_, sizeof(function_name_), "wasm#%d",
+ function->func_index);
+ return function_name_;
+}
+
WasmCompilationUnit::WasmCompilationUnit(wasm::ErrorThrower* thrower,
Isolate* isolate,
wasm::ModuleBytesEnv* module_env,
@@ -3740,7 +4030,7 @@ WasmCompilationUnit::WasmCompilationUnit(wasm::ErrorThrower* thrower,
: thrower_(thrower),
isolate_(isolate),
module_env_(module_env),
- function_(&module_env->module->functions[index]),
+ function_(&module_env->module_env.module->functions[index]),
graph_zone_(new Zone(isolate->allocator(), ZONE_NAME)),
jsgraph_(new (graph_zone()) JSGraph(
isolate, new (graph_zone()) Graph(graph_zone()),
@@ -3750,8 +4040,9 @@ WasmCompilationUnit::WasmCompilationUnit(wasm::ErrorThrower* thrower,
InstructionSelector::SupportedMachineOperatorFlags(),
InstructionSelector::AlignmentRequirements()))),
compilation_zone_(isolate->allocator(), ZONE_NAME),
- info_(function->name_length != 0 ? module_env->GetNameOrNull(function)
- : ArrayVector("wasm"),
+ info_(function->name_length != 0
+ ? module_env->wire_bytes.GetNameOrNull(function)
+ : CStrVector(GetTaggedFunctionName(function)),
isolate, &compilation_zone_,
Code::ComputeFlags(Code::WASM_FUNCTION)),
job_(),
@@ -3769,7 +4060,9 @@ void WasmCompilationUnit::ExecuteCompilation() {
if (FLAG_trace_wasm_compiler) {
OFStream os(stdout);
os << "Compiling WASM function "
- << wasm::WasmFunctionName(function_, module_env_) << std::endl;
+ << wasm::WasmFunctionName(
+ function_, module_env_->wire_bytes.GetNameOrNull(function_))
+ << std::endl;
os << std::endl;
}
@@ -3794,12 +4087,12 @@ void WasmCompilationUnit::ExecuteCompilation() {
CallDescriptor* descriptor = wasm::ModuleEnv::GetWasmCallDescriptor(
&compilation_zone_, function_->sig);
if (jsgraph_->machine()->Is32()) {
- descriptor =
- module_env_->GetI32WasmCallDescriptor(&compilation_zone_, descriptor);
+ descriptor = module_env_->module_env.GetI32WasmCallDescriptor(
+ &compilation_zone_, descriptor);
}
job_.reset(Pipeline::NewWasmCompilationJob(
&info_, jsgraph_, descriptor, source_positions, &protected_instructions_,
- module_env_->module->origin != wasm::kWasmOrigin));
+ module_env_->module_env.module->origin != wasm::kWasmOrigin));
ok_ = job_->ExecuteJob() == CompilationJob::SUCCEEDED;
// TODO(bradnelson): Improve histogram handling of size_t.
// TODO(ahaas): The counters are not thread-safe at the moment.
@@ -3810,11 +4103,10 @@ void WasmCompilationUnit::ExecuteCompilation() {
if (FLAG_trace_wasm_decode_time) {
double pipeline_ms = pipeline_timer.Elapsed().InMillisecondsF();
PrintF(
- "wasm-compilation phase 1 ok: %d bytes, %0.3f ms decode, %zu nodes, "
+ "wasm-compilation phase 1 ok: %u bytes, %0.3f ms decode, %zu nodes, "
"%0.3f ms pipeline\n",
- static_cast<int>(function_->code_end_offset -
- function_->code_start_offset),
- decode_ms, node_count, pipeline_ms);
+ function_->code_end_offset - function_->code_start_offset, decode_ms,
+ node_count, pipeline_ms);
}
}
@@ -3823,7 +4115,7 @@ Handle<Code> WasmCompilationUnit::FinishCompilation() {
if (graph_construction_result_.failed()) {
// Add the function as another context for the exception
ScopedVector<char> buffer(128);
- wasm::WasmName name = module_env_->GetName(function_);
+ wasm::WasmName name = module_env_->wire_bytes.GetName(function_);
SNPrintF(buffer, "Compiling WASM function #%d:%.*s failed:",
function_->func_index, name.length(), name.start());
thrower_->CompileFailed(buffer.start(), graph_construction_result_);
@@ -3831,13 +4123,13 @@ Handle<Code> WasmCompilationUnit::FinishCompilation() {
return Handle<Code>::null();
}
+ base::ElapsedTimer codegen_timer;
+ if (FLAG_trace_wasm_decode_time) {
+ codegen_timer.Start();
+ }
if (job_->FinalizeJob() != CompilationJob::SUCCEEDED) {
return Handle<Code>::null();
}
- base::ElapsedTimer compile_timer;
- if (FLAG_trace_wasm_decode_time) {
- compile_timer.Start();
- }
Handle<Code> code = info_.code();
DCHECK(!code.is_null());
@@ -3846,38 +4138,19 @@ Handle<Code> WasmCompilationUnit::FinishCompilation() {
RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, isolate_, code,
"WASM_function", function_->func_index,
wasm::WasmName("module"),
- module_env_->GetName(function_));
+ module_env_->wire_bytes.GetName(function_));
}
if (FLAG_trace_wasm_decode_time) {
- double compile_ms = compile_timer.Elapsed().InMillisecondsF();
- PrintF("wasm-code-generation ok: %d bytes, %0.3f ms code generation\n",
- static_cast<int>(function_->code_end_offset -
- function_->code_start_offset),
- compile_ms);
+ double codegen_ms = codegen_timer.Elapsed().InMillisecondsF();
+ PrintF("wasm-code-generation ok: %u bytes, %0.3f ms code generation\n",
+ function_->code_end_offset - function_->code_start_offset,
+ codegen_ms);
}
- Handle<FixedArray> protected_instructions = PackProtectedInstructions();
- code->set_protected_instructions(*protected_instructions);
-
return code;
}
-Handle<FixedArray> WasmCompilationUnit::PackProtectedInstructions() const {
- const int num_instructions = static_cast<int>(protected_instructions_.size());
- Handle<FixedArray> fn_protected = isolate_->factory()->NewFixedArray(
- num_instructions * Code::kTrapDataSize, TENURED);
- for (unsigned i = 0; i < protected_instructions_.size(); ++i) {
- const trap_handler::ProtectedInstructionData& instruction =
- protected_instructions_[i];
- fn_protected->set(Code::kTrapDataSize * i + Code::kTrapCodeOffset,
- Smi::FromInt(instruction.instr_offset));
- fn_protected->set(Code::kTrapDataSize * i + Code::kTrapLandingOffset,
- Smi::FromInt(instruction.landing_offset));
- }
- return fn_protected;
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index a1bad1f0e5..706c386f5e 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -68,12 +68,14 @@ class WasmCompilationUnit final {
private:
SourcePositionTable* BuildGraphForWasmFunction(double* decode_ms);
- Handle<FixedArray> PackProtectedInstructions() const;
+ char* GetTaggedFunctionName(const wasm::WasmFunction* function);
wasm::ErrorThrower* thrower_;
Isolate* isolate_;
wasm::ModuleBytesEnv* module_env_;
const wasm::WasmFunction* function_;
+ // Function name is tagged with uint32 func_index - wasm#<func_index>
+ char function_name_[16];
// The graph zone is deallocated at the end of ExecuteCompilation.
std::unique_ptr<Zone> graph_zone_;
JSGraph* jsgraph_;
@@ -233,6 +235,12 @@ class WasmGraphBuilder {
Node* SimdLaneOp(wasm::WasmOpcode opcode, uint8_t lane,
const NodeVector& inputs);
+ Node* SimdShiftOp(wasm::WasmOpcode opcode, uint8_t shift,
+ const NodeVector& inputs);
+
+ Node* SimdSwizzleOp(wasm::WasmOpcode opcode, uint32_t swizzle,
+ const NodeVector& inputs);
+
bool has_simd() const { return has_simd_; }
wasm::ModuleEnv* module_env() const { return module_; }
diff --git a/deps/v8/src/compiler/wasm-linkage.cc b/deps/v8/src/compiler/wasm-linkage.cc
index c4acfb3672..01c1b860df 100644
--- a/deps/v8/src/compiler/wasm-linkage.cc
+++ b/deps/v8/src/compiler/wasm-linkage.cc
@@ -5,6 +5,7 @@
#include "src/assembler.h"
#include "src/base/lazy-instance.h"
#include "src/macro-assembler.h"
+#include "src/objects-inl.h"
#include "src/register-configuration.h"
#include "src/wasm/wasm-module.h"
diff --git a/deps/v8/src/compiler/x64/code-generator-x64.cc b/deps/v8/src/compiler/x64/code-generator-x64.cc
index cd4eeedf10..ae33e8c4b7 100644
--- a/deps/v8/src/compiler/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/x64/code-generator-x64.cc
@@ -11,6 +11,7 @@
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
+#include "src/heap/heap-inl.h"
#include "src/wasm/wasm-module.h"
#include "src/x64/assembler-x64.h"
#include "src/x64/macro-assembler-x64.h"
@@ -280,9 +281,7 @@ class WasmOutOfLineTrap final : public OutOfLineCode {
// TODO(eholk): Refactor this method to take the code generator as a
// parameter.
void Generate() final {
- int current_pc = __ pc_offset();
-
- gen_->AddProtectedInstruction(pc_, current_pc);
+ __ RecordProtectedInstructionLanding(pc_);
if (frame_elided_) {
__ EnterFrame(StackFrame::WASM_COMPILED);
@@ -726,8 +725,8 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
Label done;
// Check if current frame is an arguments adaptor frame.
- __ Cmp(Operand(rbp, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ cmpp(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
+ Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(not_equal, &done, Label::kNear);
// Load arguments count from current arguments adaptor frame (note, it
@@ -930,10 +929,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchDeoptimize: {
int deopt_state_id =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
- Deoptimizer::BailoutType bailout_type =
- Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
- CodeGenResult result = AssembleDeoptimizerCall(
- deopt_state_id, bailout_type, current_source_position_);
+ CodeGenResult result =
+ AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
if (result != kSuccess) return result;
break;
}
@@ -2099,30 +2096,35 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ incl(i.OutputRegister());
break;
case kX64Push:
- if (HasImmediateInput(instr, 0)) {
+ if (AddressingModeField::decode(instr->opcode()) != kMode_None) {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ pushq(operand);
+ frame_access_state()->IncreaseSPDelta(1);
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ kPointerSize);
+ } else if (HasImmediateInput(instr, 0)) {
__ pushq(i.InputImmediate(0));
frame_access_state()->IncreaseSPDelta(1);
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
kPointerSize);
+ } else if (instr->InputAt(0)->IsRegister()) {
+ __ pushq(i.InputRegister(0));
+ frame_access_state()->IncreaseSPDelta(1);
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ kPointerSize);
+ } else if (instr->InputAt(0)->IsFPRegister()) {
+ // TODO(titzer): use another machine instruction?
+ __ subq(rsp, Immediate(kDoubleSize));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ kDoubleSize);
+ __ Movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
} else {
- if (instr->InputAt(0)->IsRegister()) {
- __ pushq(i.InputRegister(0));
- frame_access_state()->IncreaseSPDelta(1);
- unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- kPointerSize);
- } else if (instr->InputAt(0)->IsFPRegister()) {
- // TODO(titzer): use another machine instruction?
- __ subq(rsp, Immediate(kDoubleSize));
- frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
- unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- kDoubleSize);
- __ Movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
- } else {
- __ pushq(i.InputOperand(0));
- frame_access_state()->IncreaseSPDelta(1);
- unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- kPointerSize);
- }
+ __ pushq(i.InputOperand(0));
+ frame_access_state()->IncreaseSPDelta(1);
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ kPointerSize);
}
break;
case kX64Poke: {
@@ -2316,8 +2318,8 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
void Generate() final {
X64OperandConverter i(gen_, instr_);
- Runtime::FunctionId trap_id = static_cast<Runtime::FunctionId>(
- i.InputInt32(instr_->InputCount() - 1));
+ Builtins::Name trap_id =
+ static_cast<Builtins::Name>(i.InputInt32(instr_->InputCount() - 1));
bool old_has_frame = __ has_frame();
if (frame_elided_) {
__ set_has_frame(true);
@@ -2327,29 +2329,31 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
if (frame_elided_) {
__ set_has_frame(old_has_frame);
}
- if (FLAG_debug_code) {
- __ ud2();
- }
}
private:
- void GenerateCallToTrap(Runtime::FunctionId trap_id) {
- if (trap_id == Runtime::kNumFunctions) {
+ void GenerateCallToTrap(Builtins::Name trap_id) {
+ if (trap_id == Builtins::builtin_count) {
// We cannot test calls to the runtime in cctest/test-run-wasm.
// Therefore we emit a call to C here instead of a call to the runtime.
__ PrepareCallCFunction(0);
__ CallCFunction(
ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
0);
+ __ LeaveFrame(StackFrame::WASM_COMPILED);
+ __ Ret();
} else {
- __ Move(rsi, isolate()->native_context());
gen_->AssembleSourcePosition(instr_);
- __ CallRuntime(trap_id);
+ __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
+ RelocInfo::CODE_TARGET);
+ ReferenceMap* reference_map =
+ new (gen_->zone()) ReferenceMap(gen_->zone());
+ gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ if (FLAG_debug_code) {
+ __ ud2();
+ }
}
- ReferenceMap* reference_map =
- new (gen_->zone()) ReferenceMap(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
- Safepoint::kNoLazyDeopt);
}
bool frame_elided_;
@@ -2423,13 +2427,16 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
}
CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
- int deoptimization_id, Deoptimizer::BailoutType bailout_type,
- SourcePosition pos) {
+ int deoptimization_id, SourcePosition pos) {
+ DeoptimizeKind deoptimization_kind = GetDeoptimizationKind(deoptimization_id);
+ DeoptimizeReason deoptimization_reason =
+ GetDeoptimizationReason(deoptimization_id);
+ Deoptimizer::BailoutType bailout_type =
+ deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
+ : Deoptimizer::EAGER;
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
- DeoptimizeReason deoptimization_reason =
- GetDeoptimizationReason(deoptimization_id);
__ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
__ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
return kSuccess;
diff --git a/deps/v8/src/compiler/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
index 4c213793f7..7abdd9096c 100644
--- a/deps/v8/src/compiler/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
@@ -58,6 +58,7 @@ class X64OperandGenerator final : public OperandGenerator {
MachineRepresentation rep =
LoadRepresentationOf(input->op()).representation();
switch (opcode) {
+ case kX64Push:
case kX64Cmp:
case kX64Test:
return rep == MachineRepresentation::kWord64 || IsAnyTagged(rep);
@@ -219,6 +220,9 @@ ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
opcode = kX64Movq;
break;
case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kSimd1x4: // Fall through.
+ case MachineRepresentation::kSimd1x8: // Fall through.
+ case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
break;
@@ -251,6 +255,9 @@ ArchOpcode GetStoreOpcode(StoreRepresentation store_rep) {
return kX64Movq;
break;
case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kSimd1x4: // Fall through.
+ case MachineRepresentation::kSimd1x8: // Fall through.
+ case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return kArchNop;
@@ -335,6 +342,11 @@ void InstructionSelector::VisitStore(Node* node) {
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
InstructionCode code =
opcode | AddressingModeField::encode(addressing_mode);
+ if ((ElementSizeLog2Of(store_rep.representation()) < kPointerSizeLog2) &&
+ (value->opcode() == IrOpcode::kTruncateInt64ToInt32) &&
+ CanCover(node, value)) {
+ value = value->InputAt(0);
+ }
InstructionOperand value_operand =
g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
inputs[input_count++] = value_operand;
@@ -398,6 +410,9 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
break;
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kSimd1x4: // Fall through.
+ case MachineRepresentation::kSimd1x8: // Fall through.
+ case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
@@ -453,6 +468,9 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
break;
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kSimd1x4: // Fall through.
+ case MachineRepresentation::kSimd1x8: // Fall through.
+ case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
@@ -536,7 +554,7 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -830,31 +848,6 @@ void InstructionSelector::VisitWord64Ror(Node* node) {
VisitWord64Shift(this, node, kX64Ror);
}
-
-void InstructionSelector::VisitWord64Clz(Node* node) {
- X64OperandGenerator g(this);
- Emit(kX64Lzcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitWord32Clz(Node* node) {
- X64OperandGenerator g(this);
- Emit(kX64Lzcnt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitWord64Ctz(Node* node) {
- X64OperandGenerator g(this);
- Emit(kX64Tzcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitWord32Ctz(Node* node) {
- X64OperandGenerator g(this);
- Emit(kX64Tzcnt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
@@ -864,18 +857,6 @@ void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitWord32Popcnt(Node* node) {
- X64OperandGenerator g(this);
- Emit(kX64Popcnt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitWord64Popcnt(Node* node) {
- X64OperandGenerator g(this);
- Emit(kX64Popcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
void InstructionSelector::VisitInt32Add(Node* node) {
X64OperandGenerator g(this);
@@ -1098,55 +1079,6 @@ void InstructionSelector::VisitUint32MulHigh(Node* node) {
VisitMulHigh(this, node, kX64UmulHigh32);
}
-
-void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
- X64OperandGenerator g(this);
- Emit(kSSEFloat32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
- X64OperandGenerator g(this);
- Emit(kSSEInt32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
- X64OperandGenerator g(this);
- Emit(kSSEUint32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
- X64OperandGenerator g(this);
- Emit(kSSEFloat64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
- X64OperandGenerator g(this);
- Emit(kSSEFloat64ToUint32 | MiscField::encode(1), g.DefineAsRegister(node),
- g.Use(node->InputAt(0)));
-}
-
-void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
- X64OperandGenerator g(this);
- Emit(kSSEFloat64ToUint32 | MiscField::encode(0), g.DefineAsRegister(node),
- g.Use(node->InputAt(0)));
-}
-
-void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
- X64OperandGenerator g(this);
- Emit(kSSEFloat32ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
- X64OperandGenerator g(this);
- Emit(kSSEFloat32ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
X64OperandGenerator g(this);
InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
@@ -1364,16 +1296,65 @@ void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
} // namespace
-
-void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
- VisitRO(this, node, kSSEFloat64ToFloat32);
-}
+#define RO_OP_LIST(V) \
+ V(Word64Clz, kX64Lzcnt) \
+ V(Word32Clz, kX64Lzcnt32) \
+ V(Word64Ctz, kX64Tzcnt) \
+ V(Word32Ctz, kX64Tzcnt32) \
+ V(Word64Popcnt, kX64Popcnt) \
+ V(Word32Popcnt, kX64Popcnt32) \
+ V(Float64Sqrt, kSSEFloat64Sqrt) \
+ V(Float32Sqrt, kSSEFloat32Sqrt) \
+ V(ChangeFloat64ToInt32, kSSEFloat64ToInt32) \
+ V(ChangeFloat64ToUint32, kSSEFloat64ToUint32 | MiscField::encode(1)) \
+ V(TruncateFloat64ToUint32, kSSEFloat64ToUint32 | MiscField::encode(0)) \
+ V(TruncateFloat64ToFloat32, kSSEFloat64ToFloat32) \
+ V(ChangeFloat32ToFloat64, kSSEFloat32ToFloat64) \
+ V(TruncateFloat32ToInt32, kSSEFloat32ToInt32) \
+ V(TruncateFloat32ToUint32, kSSEFloat32ToUint32) \
+ V(ChangeInt32ToFloat64, kSSEInt32ToFloat64) \
+ V(ChangeUint32ToFloat64, kSSEUint32ToFloat64) \
+ V(RoundFloat64ToInt32, kSSEFloat64ToInt32) \
+ V(RoundInt32ToFloat32, kSSEInt32ToFloat32) \
+ V(RoundInt64ToFloat32, kSSEInt64ToFloat32) \
+ V(RoundInt64ToFloat64, kSSEInt64ToFloat64) \
+ V(RoundUint32ToFloat32, kSSEUint32ToFloat32) \
+ V(BitcastFloat32ToInt32, kX64BitcastFI) \
+ V(BitcastFloat64ToInt64, kX64BitcastDL) \
+ V(BitcastInt32ToFloat32, kX64BitcastIF) \
+ V(BitcastInt64ToFloat64, kX64BitcastLD) \
+ V(Float64ExtractLowWord32, kSSEFloat64ExtractLowWord32) \
+ V(Float64ExtractHighWord32, kSSEFloat64ExtractHighWord32)
+
+#define RR_OP_LIST(V) \
+ V(Float32RoundDown, kSSEFloat32Round | MiscField::encode(kRoundDown)) \
+ V(Float64RoundDown, kSSEFloat64Round | MiscField::encode(kRoundDown)) \
+ V(Float32RoundUp, kSSEFloat32Round | MiscField::encode(kRoundUp)) \
+ V(Float64RoundUp, kSSEFloat64Round | MiscField::encode(kRoundUp)) \
+ V(Float32RoundTruncate, kSSEFloat32Round | MiscField::encode(kRoundToZero)) \
+ V(Float64RoundTruncate, kSSEFloat64Round | MiscField::encode(kRoundToZero)) \
+ V(Float32RoundTiesEven, \
+ kSSEFloat32Round | MiscField::encode(kRoundToNearest)) \
+ V(Float64RoundTiesEven, kSSEFloat64Round | MiscField::encode(kRoundToNearest))
+
+#define RO_VISITOR(Name, opcode) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRO(this, node, opcode); \
+ }
+RO_OP_LIST(RO_VISITOR)
+#undef RO_VISITOR
+
+#define RR_VISITOR(Name, opcode) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRR(this, node, opcode); \
+ }
+RR_OP_LIST(RR_VISITOR)
+#undef RR_VISITOR
void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
VisitRR(this, node, kArchTruncateDoubleToI);
}
-
void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
X64OperandGenerator g(this);
Node* value = node->InputAt(0);
@@ -1399,34 +1380,6 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
}
-void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
- VisitRO(this, node, kSSEFloat64ToInt32);
-}
-
-void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
- X64OperandGenerator g(this);
- Emit(kSSEInt32ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
- X64OperandGenerator g(this);
- Emit(kSSEInt64ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
- X64OperandGenerator g(this);
- Emit(kSSEInt64ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
- X64OperandGenerator g(this);
- Emit(kSSEUint32ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
X64OperandGenerator g(this);
InstructionOperand temps[] = {g.TempRegister()};
@@ -1442,31 +1395,6 @@ void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
arraysize(temps), temps);
}
-
-void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
- X64OperandGenerator g(this);
- Emit(kX64BitcastFI, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
- X64OperandGenerator g(this);
- Emit(kX64BitcastDL, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
- X64OperandGenerator g(this);
- Emit(kX64BitcastIF, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
- X64OperandGenerator g(this);
- Emit(kX64BitcastLD, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
void InstructionSelector::VisitFloat32Add(Node* node) {
VisitFloatBinop(this, node, kAVXFloat32Add, kSSEFloat32Add);
}
@@ -1491,10 +1419,6 @@ void InstructionSelector::VisitFloat32Abs(Node* node) {
}
-void InstructionSelector::VisitFloat32Sqrt(Node* node) {
- VisitRO(this, node, kSSEFloat32Sqrt);
-}
-
void InstructionSelector::VisitFloat32Max(Node* node) {
VisitRRO(this, node, kSSEFloat32Max);
}
@@ -1545,55 +1469,12 @@ void InstructionSelector::VisitFloat64Abs(Node* node) {
VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Abs, kSSEFloat64Abs);
}
-void InstructionSelector::VisitFloat64Sqrt(Node* node) {
- VisitRO(this, node, kSSEFloat64Sqrt);
-}
-
-
-void InstructionSelector::VisitFloat32RoundDown(Node* node) {
- VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundDown));
-}
-
-
-void InstructionSelector::VisitFloat64RoundDown(Node* node) {
- VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundDown));
-}
-
-
-void InstructionSelector::VisitFloat32RoundUp(Node* node) {
- VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundUp));
-}
-
-
-void InstructionSelector::VisitFloat64RoundUp(Node* node) {
- VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundUp));
-}
-
-
-void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
- VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundToZero));
-}
-
-
-void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
- VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToZero));
-}
-
void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
UNREACHABLE();
}
-void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
- VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundToNearest));
-}
-
-
-void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
- VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToNearest));
-}
-
void InstructionSelector::VisitFloat32Neg(Node* node) {
VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Neg, kSSEFloat32Neg);
}
@@ -1641,17 +1522,29 @@ void InstructionSelector::EmitPrepareArguments(
}
} else {
// Push any stack arguments.
+ int effect_level = GetEffectLevel(node);
for (PushParameter input : base::Reversed(*arguments)) {
- // TODO(titzer): X64Push cannot handle stack->stack double moves
- // because there is no way to encode fixed double slots.
- InstructionOperand value =
- g.CanBeImmediate(input.node())
- ? g.UseImmediate(input.node())
- : IsSupported(ATOM) ||
- sequence()->IsFP(GetVirtualRegister(input.node()))
- ? g.UseRegister(input.node())
- : g.Use(input.node());
- Emit(kX64Push, g.NoOutput(), value);
+ Node* input_node = input.node();
+ if (g.CanBeImmediate(input_node)) {
+ Emit(kX64Push, g.NoOutput(), g.UseImmediate(input_node));
+ } else if (IsSupported(ATOM) ||
+ sequence()->IsFP(GetVirtualRegister(input_node))) {
+ // TODO(titzer): X64Push cannot handle stack->stack double moves
+ // because there is no way to encode fixed double slots.
+ Emit(kX64Push, g.NoOutput(), g.UseRegister(input_node));
+ } else if (g.CanBeMemoryOperand(kX64Push, node, input_node,
+ effect_level)) {
+ InstructionOperand outputs[1];
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ InstructionCode opcode = kX64Push;
+ AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
+ input_node, inputs, &input_count);
+ opcode |= AddressingModeField::encode(mode);
+ Emit(opcode, 0, outputs, input_count, inputs);
+ } else {
+ Emit(kX64Push, g.NoOutput(), g.Use(input_node));
+ }
}
}
}
@@ -1683,7 +1576,7 @@ void VisitCompareWithMemoryOperand(InstructionSelector* selector,
selector->Emit(opcode, 0, nullptr, input_count, inputs);
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
- cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->frame_state());
} else if (cont->IsSet()) {
InstructionOperand output = g.DefineAsRegister(cont->result());
selector->Emit(opcode, 1, &output, input_count, inputs);
@@ -1704,8 +1597,8 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
selector->Emit(opcode, g.NoOutput(), left, right,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
- cont->frame_state());
+ selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
+ cont->reason(), cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
} else {
@@ -1892,8 +1785,8 @@ void VisitWord64Compare(InstructionSelector* selector, Node* node,
selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->reason(),
- cont->frame_state());
+ selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->kind(),
+ cont->reason(), cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()));
} else {
@@ -2071,12 +1964,8 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
break;
case IrOpcode::kInt32Sub:
return VisitWordCompare(selector, value, kX64Cmp32, cont);
- case IrOpcode::kInt64Sub:
- return VisitWord64Compare(selector, value, cont);
case IrOpcode::kWord32And:
return VisitWordCompare(selector, value, kX64Test32, cont);
- case IrOpcode::kWord64And:
- return VisitWordCompare(selector, value, kX64Test, cont);
default:
break;
}
@@ -2095,14 +1984,16 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+ DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+ kNotEqual, p.kind(), p.reason(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+ DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+ kEqual, p.kind(), p.reason(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
@@ -2155,32 +2046,7 @@ void InstructionSelector::VisitWord32Equal(Node* const node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
Int32BinopMatcher m(user);
if (m.right().Is(0)) {
- Node* value = m.left().node();
-
- // Try to combine with comparisons against 0 by simply inverting the branch.
- while (CanCover(user, value) && value->opcode() == IrOpcode::kWord32Equal) {
- Int32BinopMatcher m(value);
- if (m.right().Is(0)) {
- user = value;
- value = m.left().node();
- cont.Negate();
- } else {
- break;
- }
- }
-
- // Try to combine the branch with a comparison.
- if (CanCover(user, value)) {
- switch (value->opcode()) {
- case IrOpcode::kInt32Sub:
- return VisitWordCompare(this, value, kX64Cmp32, &cont);
- case IrOpcode::kWord32And:
- return VisitWordCompare(this, value, kX64Test32, &cont);
- default:
- break;
- }
- }
- return VisitCompareZero(this, value, kX64Cmp32, &cont);
+ return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
}
VisitWordCompare(this, node, kX64Cmp32, &cont);
}
@@ -2333,21 +2199,6 @@ void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
VisitFloat64Compare(this, node, &cont);
}
-
-void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
- X64OperandGenerator g(this);
- Emit(kSSEFloat64ExtractLowWord32, g.DefineAsRegister(node),
- g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
- X64OperandGenerator g(this);
- Emit(kSSEFloat64ExtractHighWord32, g.DefineAsRegister(node),
- g.Use(node->InputAt(0)));
-}
-
-
void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
X64OperandGenerator g(this);
Node* left = node->InputAt(0);
diff --git a/deps/v8/src/compiler/x87/code-generator-x87.cc b/deps/v8/src/compiler/x87/code-generator-x87.cc
index 5d8594c92b..fc5992a9c1 100644
--- a/deps/v8/src/compiler/x87/code-generator-x87.cc
+++ b/deps/v8/src/compiler/x87/code-generator-x87.cc
@@ -736,10 +736,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ fild_s(MemOperand(esp, 0));
__ lea(esp, Operand(esp, kPointerSize));
- Deoptimizer::BailoutType bailout_type =
- Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
- CodeGenResult result = AssembleDeoptimizerCall(
- deopt_state_id, bailout_type, current_source_position_);
+ CodeGenResult result =
+ AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
if (result != kSuccess) return result;
break;
}
@@ -994,10 +992,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
__ add(i.OutputRegister(0), i.InputRegister(2));
}
- __ adc(i.InputRegister(1), Operand(i.InputRegister(3)));
if (i.OutputRegister(1).code() != i.InputRegister(1).code()) {
__ Move(i.OutputRegister(1), i.InputRegister(1));
}
+ __ adc(i.OutputRegister(1), Operand(i.InputRegister(3)));
if (use_temp) {
__ Move(i.OutputRegister(0), i.TempRegister(0));
}
@@ -1019,10 +1017,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
__ sub(i.OutputRegister(0), i.InputRegister(2));
}
- __ sbb(i.InputRegister(1), Operand(i.InputRegister(3)));
if (i.OutputRegister(1).code() != i.InputRegister(1).code()) {
__ Move(i.OutputRegister(1), i.InputRegister(1));
}
+ __ sbb(i.OutputRegister(1), Operand(i.InputRegister(3)));
if (use_temp) {
__ Move(i.OutputRegister(0), i.TempRegister(0));
}
@@ -2028,69 +2026,74 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
return kSuccess;
} // NOLINT(readability/fn_size)
-
-// Assembles a branch after an instruction.
-void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
- X87OperandConverter i(this, instr);
- Label::Distance flabel_distance =
- branch->fallthru ? Label::kNear : Label::kFar;
-
- Label done;
- Label tlabel_tmp;
- Label flabel_tmp;
- Label* tlabel = &tlabel_tmp;
- Label* flabel = &flabel_tmp;
-
- Label* tlabel_dst = branch->true_label;
- Label* flabel_dst = branch->false_label;
-
- switch (branch->condition) {
+static Condition FlagsConditionToCondition(FlagsCondition condition) {
+ switch (condition) {
case kUnorderedEqual:
- __ j(parity_even, flabel, flabel_distance);
- // Fall through.
case kEqual:
- __ j(equal, tlabel);
+ return equal;
break;
case kUnorderedNotEqual:
- __ j(parity_even, tlabel);
- // Fall through.
case kNotEqual:
- __ j(not_equal, tlabel);
+ return not_equal;
break;
case kSignedLessThan:
- __ j(less, tlabel);
+ return less;
break;
case kSignedGreaterThanOrEqual:
- __ j(greater_equal, tlabel);
+ return greater_equal;
break;
case kSignedLessThanOrEqual:
- __ j(less_equal, tlabel);
+ return less_equal;
break;
case kSignedGreaterThan:
- __ j(greater, tlabel);
+ return greater;
break;
case kUnsignedLessThan:
- __ j(below, tlabel);
+ return below;
break;
case kUnsignedGreaterThanOrEqual:
- __ j(above_equal, tlabel);
+ return above_equal;
break;
case kUnsignedLessThanOrEqual:
- __ j(below_equal, tlabel);
+ return below_equal;
break;
case kUnsignedGreaterThan:
- __ j(above, tlabel);
+ return above;
break;
case kOverflow:
- __ j(overflow, tlabel);
+ return overflow;
break;
case kNotOverflow:
- __ j(no_overflow, tlabel);
+ return no_overflow;
break;
default:
UNREACHABLE();
+ return no_condition;
break;
}
+}
+
+// Assembles a branch after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
+ Label::Distance flabel_distance =
+ branch->fallthru ? Label::kNear : Label::kFar;
+
+ Label done;
+ Label tlabel_tmp;
+ Label flabel_tmp;
+ Label* tlabel = &tlabel_tmp;
+ Label* flabel = &flabel_tmp;
+
+ Label* tlabel_dst = branch->true_label;
+ Label* flabel_dst = branch->false_label;
+
+ if (branch->condition == kUnorderedEqual) {
+ __ j(parity_even, flabel, flabel_distance);
+ } else if (branch->condition == kUnorderedNotEqual) {
+ __ j(parity_even, tlabel);
+ }
+ __ j(FlagsConditionToCondition(branch->condition), tlabel);
+
// Add a jump if not falling through to the next block.
if (!branch->fallthru) __ jmp(flabel);
@@ -2130,7 +2133,68 @@ void CodeGenerator::AssembleArchJump(RpoNumber target) {
void CodeGenerator::AssembleArchTrap(Instruction* instr,
FlagsCondition condition) {
- UNREACHABLE();
+ class OutOfLineTrap final : public OutOfLineCode {
+ public:
+ OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
+ : OutOfLineCode(gen),
+ frame_elided_(frame_elided),
+ instr_(instr),
+ gen_(gen) {}
+
+ void Generate() final {
+ X87OperandConverter i(gen_, instr_);
+
+ Runtime::FunctionId trap_id = static_cast<Runtime::FunctionId>(
+ i.InputInt32(instr_->InputCount() - 1));
+ bool old_has_frame = __ has_frame();
+ if (frame_elided_) {
+ __ set_has_frame(true);
+ __ EnterFrame(StackFrame::WASM_COMPILED);
+ }
+ GenerateCallToTrap(trap_id);
+ if (frame_elided_) {
+ ReferenceMap* reference_map =
+ new (gen_->zone()) ReferenceMap(gen_->zone());
+ gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ __ set_has_frame(old_has_frame);
+ }
+ if (FLAG_debug_code) {
+ __ ud2();
+ }
+ }
+
+ private:
+ void GenerateCallToTrap(Runtime::FunctionId trap_id) {
+ if (trap_id == Runtime::kNumFunctions) {
+ // We cannot test calls to the runtime in cctest/test-run-wasm.
+ // Therefore we emit a call to C here instead of a call to the runtime.
+ __ PrepareCallCFunction(0, esi);
+ __ CallCFunction(
+ ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
+ 0);
+ } else {
+ __ Move(esi, isolate()->native_context());
+ gen_->AssembleSourcePosition(instr_);
+ __ CallRuntime(trap_id);
+ }
+ }
+
+ bool frame_elided_;
+ Instruction* instr_;
+ CodeGenerator* gen_;
+ };
+ bool frame_elided = !frame_access_state()->has_frame();
+ auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+ Label* tlabel = ool->entry();
+ Label end;
+ if (condition == kUnorderedEqual) {
+ __ j(parity_even, &end);
+ } else if (condition == kUnorderedNotEqual) {
+ __ j(parity_even, tlabel);
+ }
+ __ j(FlagsConditionToCondition(condition), tlabel);
+ __ bind(&end);
}
// Assembles boolean materializations after an instruction.
@@ -2144,58 +2208,17 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
Label check;
DCHECK_NE(0u, instr->OutputCount());
Register reg = i.OutputRegister(instr->OutputCount() - 1);
- Condition cc = no_condition;
- switch (condition) {
- case kUnorderedEqual:
- __ j(parity_odd, &check, Label::kNear);
- __ Move(reg, Immediate(0));
- __ jmp(&done, Label::kNear);
- // Fall through.
- case kEqual:
- cc = equal;
- break;
- case kUnorderedNotEqual:
- __ j(parity_odd, &check, Label::kNear);
- __ mov(reg, Immediate(1));
- __ jmp(&done, Label::kNear);
- // Fall through.
- case kNotEqual:
- cc = not_equal;
- break;
- case kSignedLessThan:
- cc = less;
- break;
- case kSignedGreaterThanOrEqual:
- cc = greater_equal;
- break;
- case kSignedLessThanOrEqual:
- cc = less_equal;
- break;
- case kSignedGreaterThan:
- cc = greater;
- break;
- case kUnsignedLessThan:
- cc = below;
- break;
- case kUnsignedGreaterThanOrEqual:
- cc = above_equal;
- break;
- case kUnsignedLessThanOrEqual:
- cc = below_equal;
- break;
- case kUnsignedGreaterThan:
- cc = above;
- break;
- case kOverflow:
- cc = overflow;
- break;
- case kNotOverflow:
- cc = no_overflow;
- break;
- default:
- UNREACHABLE();
- break;
+ if (condition == kUnorderedEqual) {
+ __ j(parity_odd, &check, Label::kNear);
+ __ Move(reg, Immediate(0));
+ __ jmp(&done, Label::kNear);
+ } else if (condition == kUnorderedNotEqual) {
+ __ j(parity_odd, &check, Label::kNear);
+ __ mov(reg, Immediate(1));
+ __ jmp(&done, Label::kNear);
}
+ Condition cc = FlagsConditionToCondition(condition);
+
__ bind(&check);
if (reg.is_byte_register()) {
// setcc for byte registers (al, bl, cl, dl).
@@ -2240,13 +2263,16 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
}
CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
- int deoptimization_id, Deoptimizer::BailoutType bailout_type,
- SourcePosition pos) {
+ int deoptimization_id, SourcePosition pos) {
+ DeoptimizeKind deoptimization_kind = GetDeoptimizationKind(deoptimization_id);
+ DeoptimizeReason deoptimization_reason =
+ GetDeoptimizationReason(deoptimization_id);
+ Deoptimizer::BailoutType bailout_type =
+ deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
+ : Deoptimizer::EAGER;
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
- DeoptimizeReason deoptimization_reason =
- GetDeoptimizationReason(deoptimization_id);
__ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
__ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
return kSuccess;
@@ -2562,7 +2588,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ Move(dst, g.ToImmediate(source));
} else if (src_constant.type() == Constant::kFloat32) {
// TODO(turbofan): Can we do better here?
- uint32_t src = bit_cast<uint32_t>(src_constant.ToFloat32());
+ uint32_t src = src_constant.ToFloat32AsInt();
if (destination->IsFPRegister()) {
__ sub(esp, Immediate(kInt32Size));
__ mov(MemOperand(esp, 0), Immediate(src));
@@ -2577,7 +2603,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
} else {
DCHECK_EQ(Constant::kFloat64, src_constant.type());
- uint64_t src = bit_cast<uint64_t>(src_constant.ToFloat64());
+ uint64_t src = src_constant.ToFloat64AsInt();
uint32_t lower = static_cast<uint32_t>(src);
uint32_t upper = static_cast<uint32_t>(src >> 32);
if (destination->IsFPRegister()) {
diff --git a/deps/v8/src/compiler/x87/instruction-selector-x87.cc b/deps/v8/src/compiler/x87/instruction-selector-x87.cc
index 9f9e4264a7..ede0d45ce0 100644
--- a/deps/v8/src/compiler/x87/instruction-selector-x87.cc
+++ b/deps/v8/src/compiler/x87/instruction-selector-x87.cc
@@ -195,6 +195,9 @@ void InstructionSelector::VisitLoad(Node* node) {
break;
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kSimd1x4: // Fall through.
+ case MachineRepresentation::kSimd1x8: // Fall through.
+ case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -285,6 +288,9 @@ void InstructionSelector::VisitStore(Node* node) {
break;
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kSimd1x4: // Fall through.
+ case MachineRepresentation::kSimd1x8: // Fall through.
+ case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -352,6 +358,9 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kSimd1x4: // Fall through.
+ case MachineRepresentation::kSimd1x8: // Fall through.
+ case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -401,6 +410,9 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kSimd1x4: // Fall through.
+ case MachineRepresentation::kSimd1x8: // Fall through.
+ case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -481,7 +493,7 @@ void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -1223,11 +1235,14 @@ void VisitCompareWithMemoryOperand(InstructionSelector* selector,
selector->Emit(opcode, 0, nullptr, input_count, inputs);
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
- cont->reason(), cont->frame_state());
- } else {
- DCHECK(cont->IsSet());
+ cont->kind(), cont->reason(), cont->frame_state());
+ } else if (cont->IsSet()) {
InstructionOperand output = g.DefineAsRegister(cont->result());
selector->Emit(opcode, 1, &output, input_count, inputs);
+ } else {
+ DCHECK(cont->IsTrap());
+ inputs[input_count++] = g.UseImmediate(cont->trap_id());
+ selector->Emit(opcode, 0, nullptr, input_count, inputs);
}
}
@@ -1241,11 +1256,14 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
selector->Emit(opcode, g.NoOutput(), left, right,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
- cont->frame_state());
- } else {
- DCHECK(cont->IsSet());
+ selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
+ cont->reason(), cont->frame_state());
+ } else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsByteRegister(cont->result()), left, right);
+ } else {
+ DCHECK(cont->IsTrap());
+ selector->Emit(opcode, g.NoOutput(), left, right,
+ g.UseImmediate(cont->trap_id()));
}
}
@@ -1261,21 +1279,54 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
}
+MachineType MachineTypeForNarrow(Node* node, Node* hint_node) {
+ if (hint_node->opcode() == IrOpcode::kLoad) {
+ MachineType hint = LoadRepresentationOf(hint_node->op());
+ if (node->opcode() == IrOpcode::kInt32Constant ||
+ node->opcode() == IrOpcode::kInt64Constant) {
+ int64_t constant = node->opcode() == IrOpcode::kInt32Constant
+ ? OpParameter<int32_t>(node)
+ : OpParameter<int64_t>(node);
+ if (hint == MachineType::Int8()) {
+ if (constant >= std::numeric_limits<int8_t>::min() &&
+ constant <= std::numeric_limits<int8_t>::max()) {
+ return hint;
+ }
+ } else if (hint == MachineType::Uint8()) {
+ if (constant >= std::numeric_limits<uint8_t>::min() &&
+ constant <= std::numeric_limits<uint8_t>::max()) {
+ return hint;
+ }
+ } else if (hint == MachineType::Int16()) {
+ if (constant >= std::numeric_limits<int16_t>::min() &&
+ constant <= std::numeric_limits<int16_t>::max()) {
+ return hint;
+ }
+ } else if (hint == MachineType::Uint16()) {
+ if (constant >= std::numeric_limits<uint16_t>::min() &&
+ constant <= std::numeric_limits<uint16_t>::max()) {
+ return hint;
+ }
+ } else if (hint == MachineType::Int32()) {
+ return hint;
+ } else if (hint == MachineType::Uint32()) {
+ if (constant >= 0) return hint;
+ }
+ }
+ }
+ return node->opcode() == IrOpcode::kLoad ? LoadRepresentationOf(node->op())
+ : MachineType::None();
+}
+
// Tries to match the size of the given opcode to that of the operands, if
// possible.
InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
Node* right, FlagsContinuation* cont) {
- // Currently, if one of the two operands is not a Load, we don't know what its
- // machine representation is, so we bail out.
- // TODO(epertoso): we can probably get some size information out of immediates
- // and phi nodes.
- if (left->opcode() != IrOpcode::kLoad || right->opcode() != IrOpcode::kLoad) {
- return opcode;
- }
+ // TODO(epertoso): we can probably get some size information out of phi nodes.
// If the load representations don't match, both operands will be
// zero/sign-extended to 32bit.
- MachineType left_type = LoadRepresentationOf(left->op());
- MachineType right_type = LoadRepresentationOf(right->op());
+ MachineType left_type = MachineTypeForNarrow(left, right);
+ MachineType right_type = MachineTypeForNarrow(right, left);
if (left_type == right_type) {
switch (left_type.representation()) {
case MachineRepresentation::kBit:
@@ -1321,11 +1372,14 @@ void VisitFloat32Compare(InstructionSelector* selector, Node* node,
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(cont->Encode(kX87Float32Cmp), g.NoOutput(),
g.Use(node->InputAt(0)), g.Use(node->InputAt(1)),
- cont->reason(), cont->frame_state());
- } else {
- DCHECK(cont->IsSet());
+ cont->kind(), cont->reason(), cont->frame_state());
+ } else if (cont->IsSet()) {
selector->Emit(cont->Encode(kX87Float32Cmp),
g.DefineAsByteRegister(cont->result()));
+ } else {
+ DCHECK(cont->IsTrap());
+ selector->Emit(cont->Encode(kX87Float32Cmp), g.NoOutput(),
+ g.UseImmediate(cont->trap_id()));
}
}
@@ -1342,11 +1396,14 @@ void VisitFloat64Compare(InstructionSelector* selector, Node* node,
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(cont->Encode(kX87Float64Cmp), g.NoOutput(),
g.Use(node->InputAt(0)), g.Use(node->InputAt(1)),
- cont->reason(), cont->frame_state());
- } else {
- DCHECK(cont->IsSet());
+ cont->kind(), cont->reason(), cont->frame_state());
+ } else if (cont->IsSet()) {
selector->Emit(cont->Encode(kX87Float64Cmp),
g.DefineAsByteRegister(cont->result()));
+ } else {
+ DCHECK(cont->IsTrap());
+ selector->Emit(cont->Encode(kX87Float64Cmp), g.NoOutput(),
+ g.UseImmediate(cont->trap_id()));
}
}
@@ -1377,10 +1434,8 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
// Match immediates on right side of comparison.
if (g.CanBeImmediate(right)) {
- if (g.CanBeMemoryOperand(opcode, node, left, effect_level)) {
- // TODO(epertoso): we should use `narrowed_opcode' here once we match
- // immediates too.
- return VisitCompareWithMemoryOperand(selector, opcode, left,
+ if (g.CanBeMemoryOperand(narrowed_opcode, node, left, effect_level)) {
+ return VisitCompareWithMemoryOperand(selector, narrowed_opcode, left,
g.UseImmediate(right), cont);
}
return VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right),
@@ -1422,8 +1477,8 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->reason(),
- cont->frame_state());
+ selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->kind(),
+ cont->reason(), cont->frame_state());
} else {
DCHECK(cont->IsSet());
selector->Emit(opcode, g.DefineAsRegister(cont->result()));
@@ -1536,24 +1591,30 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+ DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+ kNotEqual, p.kind(), p.reason(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+ DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+ kEqual, p.kind(), p.reason(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
- UNREACHABLE();
+ FlagsContinuation cont =
+ FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
+ VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitTrapUnless(Node* node,
Runtime::FunctionId func_id) {
- UNREACHABLE();
+ FlagsContinuation cont =
+ FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
+ VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
diff --git a/deps/v8/src/contexts-inl.h b/deps/v8/src/contexts-inl.h
index a51b31257d..1c7392ff97 100644
--- a/deps/v8/src/contexts-inl.h
+++ b/deps/v8/src/contexts-inl.h
@@ -7,6 +7,7 @@
#include "src/contexts.h"
#include "src/objects-inl.h"
+#include "src/objects/regexp-match-info.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc
index 47ffb275b4..e622807b81 100644
--- a/deps/v8/src/contexts.cc
+++ b/deps/v8/src/contexts.cc
@@ -412,9 +412,8 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
static const int kSharedOffset = 0;
static const int kCachedCodeOffset = 1;
-static const int kLiteralsOffset = 2;
-static const int kOsrAstIdOffset = 3;
-static const int kEntryLength = 4;
+static const int kOsrAstIdOffset = 2;
+static const int kEntryLength = 3;
static const int kInitialLength = kEntryLength;
int Context::SearchOptimizedCodeMapEntry(SharedFunctionInfo* shared,
@@ -436,38 +435,28 @@ int Context::SearchOptimizedCodeMapEntry(SharedFunctionInfo* shared,
return -1;
}
-void Context::SearchOptimizedCodeMap(SharedFunctionInfo* shared,
- BailoutId osr_ast_id, Code** pcode,
- LiteralsArray** pliterals) {
+Code* Context::SearchOptimizedCodeMap(SharedFunctionInfo* shared,
+ BailoutId osr_ast_id) {
DCHECK(this->IsNativeContext());
int entry = SearchOptimizedCodeMapEntry(shared, osr_ast_id);
if (entry != -1) {
FixedArray* code_map = osr_code_table();
DCHECK_LE(entry + kEntryLength, code_map->length());
WeakCell* cell = WeakCell::cast(code_map->get(entry + kCachedCodeOffset));
- WeakCell* literals_cell =
- WeakCell::cast(code_map->get(entry + kLiteralsOffset));
-
- *pcode = cell->cleared() ? nullptr : Code::cast(cell->value());
- *pliterals = literals_cell->cleared()
- ? nullptr
- : LiteralsArray::cast(literals_cell->value());
- } else {
- *pcode = nullptr;
- *pliterals = nullptr;
+ return cell->cleared() ? nullptr : Code::cast(cell->value());
}
+ return nullptr;
}
void Context::AddToOptimizedCodeMap(Handle<Context> native_context,
Handle<SharedFunctionInfo> shared,
Handle<Code> code,
- Handle<LiteralsArray> literals,
BailoutId osr_ast_id) {
DCHECK(native_context->IsNativeContext());
Isolate* isolate = native_context->GetIsolate();
if (isolate->serializer_enabled()) return;
- STATIC_ASSERT(kEntryLength == 4);
+ STATIC_ASSERT(kEntryLength == 3);
Handle<FixedArray> new_code_map;
int entry;
@@ -478,12 +467,9 @@ void Context::AddToOptimizedCodeMap(Handle<Context> native_context,
Handle<FixedArray> old_code_map(native_context->osr_code_table(), isolate);
entry = native_context->SearchOptimizedCodeMapEntry(*shared, osr_ast_id);
if (entry >= 0) {
- // Just set the code and literals of the entry.
+ // Just set the code of the entry.
Handle<WeakCell> code_cell = isolate->factory()->NewWeakCell(code);
old_code_map->set(entry + kCachedCodeOffset, *code_cell);
- Handle<WeakCell> literals_cell =
- isolate->factory()->NewWeakCell(literals);
- old_code_map->set(entry + kLiteralsOffset, *literals_cell);
return;
}
@@ -507,12 +493,10 @@ void Context::AddToOptimizedCodeMap(Handle<Context> native_context,
}
Handle<WeakCell> code_cell = isolate->factory()->NewWeakCell(code);
- Handle<WeakCell> literals_cell = isolate->factory()->NewWeakCell(literals);
Handle<WeakCell> shared_cell = isolate->factory()->NewWeakCell(shared);
new_code_map->set(entry + kSharedOffset, *shared_cell);
new_code_map->set(entry + kCachedCodeOffset, *code_cell);
- new_code_map->set(entry + kLiteralsOffset, *literals_cell);
new_code_map->set(entry + kOsrAstIdOffset, Smi::FromInt(osr_ast_id.ToInt()));
#ifdef DEBUG
@@ -523,8 +507,6 @@ void Context::AddToOptimizedCodeMap(Handle<Context> native_context,
DCHECK(cell->cleared() ||
(cell->value()->IsCode() &&
Code::cast(cell->value())->kind() == Code::OPTIMIZED_FUNCTION));
- cell = WeakCell::cast(new_code_map->get(i + kLiteralsOffset));
- DCHECK(cell->cleared() || cell->value()->IsFixedArray());
DCHECK(new_code_map->get(i + kOsrAstIdOffset)->IsSmi());
}
#endif
@@ -565,8 +547,6 @@ void Context::EvictFromOptimizedCodeMap(Code* optimized_code,
code_map->set(dst + kSharedOffset, code_map->get(src + kSharedOffset));
code_map->set(dst + kCachedCodeOffset,
code_map->get(src + kCachedCodeOffset));
- code_map->set(dst + kLiteralsOffset,
- code_map->get(src + kLiteralsOffset));
code_map->set(dst + kOsrAstIdOffset,
code_map->get(src + kOsrAstIdOffset));
}
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index e419913f90..7f9646b98f 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -11,6 +11,7 @@
namespace v8 {
namespace internal {
+class RegExpMatchInfo;
enum ContextLookupFlags {
FOLLOW_CONTEXT_CHAIN = 1 << 0,
@@ -35,6 +36,14 @@ enum ContextLookupFlags {
// Factory::NewContext.
#define NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V) \
+ V(ASYNC_FUNCTION_AWAIT_CAUGHT_INDEX, JSFunction, \
+ async_function_await_caught) \
+ V(ASYNC_FUNCTION_AWAIT_UNCAUGHT_INDEX, JSFunction, \
+ async_function_await_uncaught) \
+ V(ASYNC_FUNCTION_PROMISE_CREATE_INDEX, JSFunction, \
+ async_function_promise_create) \
+ V(ASYNC_FUNCTION_PROMISE_RELEASE_INDEX, JSFunction, \
+ async_function_promise_release) \
V(IS_ARRAYLIKE, JSFunction, is_arraylike) \
V(GENERATOR_NEXT_INTERNAL, JSFunction, generator_next_internal) \
V(GET_TEMPLATE_CALL_SITE_INDEX, JSFunction, get_template_call_site) \
@@ -66,7 +75,6 @@ enum ContextLookupFlags {
promise_internal_constructor) \
V(PROMISE_INTERNAL_REJECT_INDEX, JSFunction, promise_internal_reject) \
V(IS_PROMISE_INDEX, JSFunction, is_promise) \
- V(PERFORM_PROMISE_THEN_INDEX, JSFunction, perform_promise_then) \
V(PROMISE_RESOLVE_INDEX, JSFunction, promise_resolve) \
V(PROMISE_THEN_INDEX, JSFunction, promise_then) \
V(PROMISE_HANDLE_INDEX, JSFunction, promise_handle) \
@@ -80,15 +88,10 @@ enum ContextLookupFlags {
V(ARRAY_SPLICE_INDEX, JSFunction, array_splice) \
V(ARRAY_SLICE_INDEX, JSFunction, array_slice) \
V(ARRAY_UNSHIFT_INDEX, JSFunction, array_unshift) \
+ V(ARRAY_ENTRIES_ITERATOR_INDEX, JSFunction, array_entries_iterator) \
+ V(ARRAY_FOR_EACH_ITERATOR_INDEX, JSFunction, array_for_each_iterator) \
+ V(ARRAY_KEYS_ITERATOR_INDEX, JSFunction, array_keys_iterator) \
V(ARRAY_VALUES_ITERATOR_INDEX, JSFunction, array_values_iterator) \
- V(ASYNC_FUNCTION_AWAIT_CAUGHT_INDEX, JSFunction, \
- async_function_await_caught) \
- V(ASYNC_FUNCTION_AWAIT_UNCAUGHT_INDEX, JSFunction, \
- async_function_await_uncaught) \
- V(ASYNC_FUNCTION_PROMISE_CREATE_INDEX, JSFunction, \
- async_function_promise_create) \
- V(ASYNC_FUNCTION_PROMISE_RELEASE_INDEX, JSFunction, \
- async_function_promise_release) \
V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \
V(ERROR_FUNCTION_INDEX, JSFunction, error_function) \
V(ERROR_TO_STRING, JSFunction, error_to_string) \
@@ -104,11 +107,7 @@ enum ContextLookupFlags {
V(OBJECT_TO_STRING, JSFunction, object_to_string) \
V(PROMISE_CATCH_INDEX, JSFunction, promise_catch) \
V(PROMISE_FUNCTION_INDEX, JSFunction, promise_function) \
- V(PROMISE_ID_RESOLVE_HANDLER_INDEX, JSFunction, promise_id_resolve_handler) \
- V(PROMISE_ID_REJECT_HANDLER_INDEX, JSFunction, promise_id_reject_handler) \
V(RANGE_ERROR_FUNCTION_INDEX, JSFunction, range_error_function) \
- V(REJECT_PROMISE_NO_DEBUG_EVENT_INDEX, JSFunction, \
- reject_promise_no_debug_event) \
V(REFERENCE_ERROR_FUNCTION_INDEX, JSFunction, reference_error_function) \
V(SET_ADD_METHOD_INDEX, JSFunction, set_add) \
V(SET_DELETE_METHOD_INDEX, JSFunction, set_delete) \
@@ -198,10 +197,14 @@ enum ContextLookupFlags {
V(ARRAY_BUFFER_FUN_INDEX, JSFunction, array_buffer_fun) \
V(ARRAY_BUFFER_MAP_INDEX, Map, array_buffer_map) \
V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \
+ V(ASYNC_FROM_SYNC_ITERATOR_MAP_INDEX, Map, async_from_sync_iterator_map) \
+ V(ASYNC_FUNCTION_AWAIT_REJECT_SHARED_FUN, SharedFunctionInfo, \
+ async_function_await_reject_shared_fun) \
+ V(ASYNC_FUNCTION_AWAIT_RESOLVE_SHARED_FUN, SharedFunctionInfo, \
+ async_function_await_resolve_shared_fun) \
V(ASYNC_FUNCTION_FUNCTION_INDEX, JSFunction, async_function_constructor) \
- V(BOOL16X8_FUNCTION_INDEX, JSFunction, bool16x8_function) \
- V(BOOL32X4_FUNCTION_INDEX, JSFunction, bool32x4_function) \
- V(BOOL8X16_FUNCTION_INDEX, JSFunction, bool8x16_function) \
+ V(ASYNC_ITERATOR_VALUE_UNWRAP_SHARED_FUN, SharedFunctionInfo, \
+ async_iterator_value_unwrap_shared_fun) \
V(BOOLEAN_FUNCTION_INDEX, JSFunction, boolean_function) \
V(BOUND_FUNCTION_WITH_CONSTRUCTOR_MAP_INDEX, Map, \
bound_function_with_constructor_map) \
@@ -220,12 +223,11 @@ enum ContextLookupFlags {
error_message_for_code_gen_from_strings) \
V(ERRORS_THROWN_INDEX, Smi, errors_thrown) \
V(EXTRAS_EXPORTS_OBJECT_INDEX, JSObject, extras_binding_object) \
- V(EXTRAS_UTILS_OBJECT_INDEX, JSObject, extras_utils_object) \
+ V(EXTRAS_UTILS_OBJECT_INDEX, Object, extras_utils_object) \
V(FAST_ALIASED_ARGUMENTS_MAP_INDEX, Map, fast_aliased_arguments_map) \
V(FAST_TEMPLATE_INSTANTIATIONS_CACHE_INDEX, FixedArray, \
fast_template_instantiations_cache) \
V(FLOAT32_ARRAY_FUN_INDEX, JSFunction, float32_array_fun) \
- V(FLOAT32X4_FUNCTION_INDEX, JSFunction, float32x4_function) \
V(FLOAT64_ARRAY_FUN_INDEX, JSFunction, float64_array_fun) \
V(FUNCTION_FUNCTION_INDEX, JSFunction, function_function) \
V(GENERATOR_FUNCTION_FUNCTION_INDEX, JSFunction, \
@@ -240,11 +242,8 @@ enum ContextLookupFlags {
V(INITIAL_ITERATOR_PROTOTYPE_INDEX, JSObject, initial_iterator_prototype) \
V(INITIAL_OBJECT_PROTOTYPE_INDEX, JSObject, initial_object_prototype) \
V(INT16_ARRAY_FUN_INDEX, JSFunction, int16_array_fun) \
- V(INT16X8_FUNCTION_INDEX, JSFunction, int16x8_function) \
V(INT32_ARRAY_FUN_INDEX, JSFunction, int32_array_fun) \
- V(INT32X4_FUNCTION_INDEX, JSFunction, int32x4_function) \
V(INT8_ARRAY_FUN_INDEX, JSFunction, int8_array_fun) \
- V(INT8X16_FUNCTION_INDEX, JSFunction, int8x16_function) \
V(INTERNAL_ARRAY_FUNCTION_INDEX, JSFunction, internal_array_function) \
V(ITERATOR_RESULT_MAP_INDEX, Map, iterator_result_map) \
V(INTL_DATE_TIME_FORMAT_FUNCTION_INDEX, JSFunction, \
@@ -294,6 +293,14 @@ enum ContextLookupFlags {
V(PROMISE_RESOLVE_SHARED_FUN, SharedFunctionInfo, \
promise_resolve_shared_fun) \
V(PROMISE_REJECT_SHARED_FUN, SharedFunctionInfo, promise_reject_shared_fun) \
+ V(PROMISE_THEN_FINALLY_SHARED_FUN, SharedFunctionInfo, \
+ promise_then_finally_shared_fun) \
+ V(PROMISE_CATCH_FINALLY_SHARED_FUN, SharedFunctionInfo, \
+ promise_catch_finally_shared_fun) \
+ V(PROMISE_VALUE_THUNK_FINALLY_SHARED_FUN, SharedFunctionInfo, \
+ promise_value_thunk_finally_shared_fun) \
+ V(PROMISE_THROWER_FINALLY_SHARED_FUN, SharedFunctionInfo, \
+ promise_thrower_finally_shared_fun) \
V(PROMISE_PROTOTYPE_MAP_INDEX, Map, promise_prototype_map) \
V(REGEXP_EXEC_FUNCTION_INDEX, JSFunction, regexp_exec_function) \
V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \
@@ -330,6 +337,7 @@ enum ContextLookupFlags {
V(STRING_FUNCTION_PROTOTYPE_MAP_INDEX, Map, string_function_prototype_map) \
V(STRING_ITERATOR_MAP_INDEX, Map, string_iterator_map) \
V(SYMBOL_FUNCTION_INDEX, JSFunction, symbol_function) \
+ V(NATIVE_FUNCTION_MAP_INDEX, Map, native_function_map) \
V(WASM_FUNCTION_MAP_INDEX, Map, wasm_function_map) \
V(WASM_INSTANCE_CONSTRUCTOR_INDEX, JSFunction, wasm_instance_constructor) \
V(WASM_INSTANCE_SYM_INDEX, Symbol, wasm_instance_sym) \
@@ -342,12 +350,10 @@ enum ContextLookupFlags {
V(TYPED_ARRAY_FUN_INDEX, JSFunction, typed_array_function) \
V(TYPED_ARRAY_PROTOTYPE_INDEX, JSObject, typed_array_prototype) \
V(UINT16_ARRAY_FUN_INDEX, JSFunction, uint16_array_fun) \
- V(UINT16X8_FUNCTION_INDEX, JSFunction, uint16x8_function) \
V(UINT32_ARRAY_FUN_INDEX, JSFunction, uint32_array_fun) \
- V(UINT32X4_FUNCTION_INDEX, JSFunction, uint32x4_function) \
V(UINT8_ARRAY_FUN_INDEX, JSFunction, uint8_array_fun) \
V(UINT8_CLAMPED_ARRAY_FUN_INDEX, JSFunction, uint8_clamped_array_fun) \
- V(UINT8X16_FUNCTION_INDEX, JSFunction, uint8x16_function) \
+ V(EXPORTS_CONTAINER, Object, exports_container) \
NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V) \
NATIVE_CONTEXT_IMPORTED_FIELDS(V) \
NATIVE_CONTEXT_JS_ARRAY_ITERATOR_MAPS(V)
@@ -572,15 +578,14 @@ class Context: public FixedArray {
// A native context keeps track of all osrd optimized functions.
inline bool OptimizedCodeMapIsCleared();
- void SearchOptimizedCodeMap(SharedFunctionInfo* shared, BailoutId osr_ast_id,
- Code** pcode, LiteralsArray** pliterals);
+ Code* SearchOptimizedCodeMap(SharedFunctionInfo* shared,
+ BailoutId osr_ast_id);
int SearchOptimizedCodeMapEntry(SharedFunctionInfo* shared,
BailoutId osr_ast_id);
static void AddToOptimizedCodeMap(Handle<Context> native_context,
Handle<SharedFunctionInfo> shared,
Handle<Code> code,
- Handle<LiteralsArray> literals,
BailoutId osr_ast_id);
// A native context holds a list of all functions with optimized code.
diff --git a/deps/v8/src/conversions.cc b/deps/v8/src/conversions.cc
index 2d4aaa0692..d26274c220 100644
--- a/deps/v8/src/conversions.cc
+++ b/deps/v8/src/conversions.cc
@@ -15,6 +15,7 @@
#include "src/conversions-inl.h"
#include "src/dtoa.h"
#include "src/factory.h"
+#include "src/handles.h"
#include "src/list-inl.h"
#include "src/strtod.h"
#include "src/utils.h"
diff --git a/deps/v8/src/conversions.h b/deps/v8/src/conversions.h
index a408132fa8..4a54e70049 100644
--- a/deps/v8/src/conversions.h
+++ b/deps/v8/src/conversions.h
@@ -8,12 +8,13 @@
#include <limits>
#include "src/base/logging.h"
-#include "src/handles.h"
#include "src/utils.h"
namespace v8 {
namespace internal {
+template <typename T>
+class Handle;
class UnicodeCache;
// Maximum number of significant digits in decimal representation.
diff --git a/deps/v8/src/counters.h b/deps/v8/src/counters.h
index 4f3706d665..06a680c60d 100644
--- a/deps/v8/src/counters.h
+++ b/deps/v8/src/counters.h
@@ -546,7 +546,6 @@ class RuntimeCallTimer final {
V(Date_New) \
V(Date_NumberValue) \
V(Debug_Call) \
- V(Debug_GetMirror) \
V(Error_New) \
V(External_New) \
V(Float32Array_New) \
@@ -702,7 +701,7 @@ class RuntimeCallTimer final {
V(CompileScopeAnalysis) \
V(CompileScript) \
V(CompileSerialize) \
- V(CompilerDispatcher) \
+ V(CompileWaitForDispatcher) \
V(DeoptimizeCode) \
V(FunctionCallback) \
V(GC) \
@@ -942,6 +941,7 @@ class RuntimeCallTimerScope {
HR(incremental_marking_reason, V8.GCIncrementalMarkingReason, 0, 21, 22) \
HR(mark_compact_reason, V8.GCMarkCompactReason, 0, 21, 22) \
HR(scavenge_reason, V8.GCScavengeReason, 0, 21, 22) \
+ HR(young_generation_handling, V8.GCYoungGenerationHandling, 0, 2, 3) \
/* Asm/Wasm. */ \
HR(wasm_functions_per_module, V8.WasmFunctionsPerModule, 1, 10000, 51)
@@ -983,6 +983,8 @@ class RuntimeCallTimerScope {
HT(wasm_compile_module_time, V8.WasmCompileModuleMicroSeconds, 1000000, \
MICROSECOND) \
HT(wasm_compile_function_time, V8.WasmCompileFunctionMicroSeconds, 1000000, \
+ MICROSECOND) \
+ HT(asm_wasm_translation_time, V8.AsmWasmTranslationMicroSeconds, 1000000, \
MICROSECOND)
#define AGGREGATABLE_HISTOGRAM_TIMER_LIST(AHT) \
@@ -1146,10 +1148,6 @@ class RuntimeCallTimerScope {
SC(lo_space_bytes_available, V8.MemoryLoSpaceBytesAvailable) \
SC(lo_space_bytes_committed, V8.MemoryLoSpaceBytesCommitted) \
SC(lo_space_bytes_used, V8.MemoryLoSpaceBytesUsed) \
- SC(turbo_escape_allocs_replaced, V8.TurboEscapeAllocsReplaced) \
- SC(crankshaft_escape_allocs_replaced, V8.CrankshaftEscapeAllocsReplaced) \
- SC(turbo_escape_loads_replaced, V8.TurboEscapeLoadsReplaced) \
- SC(crankshaft_escape_loads_replaced, V8.CrankshaftEscapeLoadsReplaced) \
/* Total code size (including metadata) of baseline code or bytecode. */ \
SC(total_baseline_code_size, V8.TotalBaselineCodeSize) \
/* Total count of functions compiled using the baseline compiler. */ \
diff --git a/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc b/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc
index 2fb746dcbd..4783808ba7 100644
--- a/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc
@@ -270,7 +270,7 @@ bool LCodeGen::GenerateDeferredCode() {
DCHECK(!frame_is_built_);
DCHECK(info()->IsStub());
frame_is_built_ = true;
- __ Move(scratch0(), Smi::FromInt(StackFrame::STUB));
+ __ mov(scratch0(), Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
__ PushCommonFrame(scratch0());
Comment(";;; Deferred code");
}
@@ -344,7 +344,7 @@ bool LCodeGen::GenerateJumpTable() {
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
- __ mov(ip, Operand(Smi::FromInt(StackFrame::STUB)));
+ __ mov(ip, Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
__ push(ip);
DCHECK(info()->IsStub());
}
@@ -688,7 +688,7 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
// Block literal pool emission to ensure nop indicating no inlined smi code
// is in the correct position.
Assembler::BlockConstPoolScope block_const_pool(masm());
- __ Call(code, mode, TypeFeedbackId::None(), al, storage_mode);
+ __ Call(code, mode, TypeFeedbackId::None(), al, storage_mode, false);
RecordSafepointWithLazyDeopt(instr, safepoint_mode);
// Signal that we don't inline smi code before these stubs in the
@@ -2130,12 +2130,6 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ b(eq, instr->TrueLabel(chunk_));
}
- if (expected & ToBooleanHint::kSimdValue) {
- // SIMD value -> true.
- __ CompareInstanceType(map, ip, SIMD128_VALUE_TYPE);
- __ b(eq, instr->TrueLabel(chunk_));
- }
-
if (expected & ToBooleanHint::kHeapNumber) {
// heap number -> false iff +0, -0, or NaN.
DwVfpRegister dbl_scratch = double_scratch0();
@@ -2941,7 +2935,8 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
__ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ ldr(result, MemOperand(scratch,
CommonFrameConstants::kContextOrFrameTypeOffset));
- __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ cmp(result,
+ Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
// Result is the frame pointer for the frame if not adapted and for the real
// frame below the adaptor frame if adapted.
@@ -3505,7 +3500,8 @@ void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
__ ldr(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ ldr(scratch3,
MemOperand(scratch2, StandardFrameConstants::kContextOffset));
- __ cmp(scratch3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ cmp(scratch3,
+ Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(ne, &no_arguments_adaptor);
// Drop current frame and load arguments count from arguments adaptor frame.
@@ -4763,6 +4759,13 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+ Label deopt, done;
+ // If the map is not deprecated the migration attempt does not make sense.
+ __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
+ __ ldr(scratch0(), FieldMemOperand(scratch0(), Map::kBitField3Offset));
+ __ tst(scratch0(), Operand(Map::Deprecated::kMask));
+ __ b(eq, &deopt);
+
{
PushSafepointRegistersScope scope(this);
__ push(object);
@@ -4773,7 +4776,12 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
__ StoreToSafepointRegisterSlot(r0, scratch0());
}
__ tst(scratch0(), Operand(kSmiTagMask));
- DeoptimizeIf(eq, instr, DeoptimizeReason::kInstanceMigrationFailed);
+ __ b(ne, &done);
+
+ __ bind(&deopt);
+ DeoptimizeIf(al, instr, DeoptimizeReason::kInstanceMigrationFailed);
+
+ __ bind(&done);
}
@@ -5124,17 +5132,6 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
final_branch_condition = eq;
-// clang-format off
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
- } else if (String::Equals(type_name, factory->type##_string())) { \
- __ JumpIfSmi(input, false_label); \
- __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); \
- __ CompareRoot(scratch, Heap::k##Type##MapRootIndex); \
- final_branch_condition = eq;
- SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
- // clang-format on
-
} else {
__ b(false_label);
}
@@ -5231,6 +5228,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ cmp(sp, Operand(ip));
__ b(hs, &done);
Handle<Code> stack_check = isolate()->builtins()->StackCheck();
+ masm()->MaybeCheckConstPool();
PredictableCodeSizeScope predictable(masm());
predictable.ExpectSize(CallCodeSize(stack_check, RelocInfo::CODE_TARGET));
DCHECK(instr->context()->IsRegister());
diff --git a/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc b/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc
index 141ac3f610..8152924420 100644
--- a/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc
+++ b/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc
@@ -724,7 +724,7 @@ bool LCodeGen::GenerateDeferredCode() {
DCHECK(info()->IsStub());
frame_is_built_ = true;
__ Push(lr, fp);
- __ Mov(fp, Smi::FromInt(StackFrame::STUB));
+ __ Mov(fp, StackFrame::TypeToMarker(StackFrame::STUB));
__ Push(fp);
__ Add(fp, __ StackPointer(),
TypedFrameConstants::kFixedFrameSizeFromFp);
@@ -803,7 +803,7 @@ bool LCodeGen::GenerateJumpTable() {
UseScratchRegisterScope temps(masm());
Register stub_marker = temps.AcquireX();
__ Bind(&needs_frame);
- __ Mov(stub_marker, Smi::FromInt(StackFrame::STUB));
+ __ Mov(stub_marker, StackFrame::TypeToMarker(StackFrame::STUB));
__ Push(cp, stub_marker);
__ Add(fp, __ StackPointer(), 2 * kPointerSize);
}
@@ -1618,7 +1618,7 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ Ldr(result, MemOperand(previous_fp,
CommonFrameConstants::kContextOrFrameTypeOffset));
- __ Cmp(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ Cmp(result, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR));
__ Csel(result, fp, previous_fp, ne);
} else {
__ Mov(result, fp);
@@ -1865,12 +1865,6 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ B(eq, true_label);
}
- if (expected & ToBooleanHint::kSimdValue) {
- // SIMD value -> true.
- __ CompareInstanceType(map, scratch, SIMD128_VALUE_TYPE);
- __ B(eq, true_label);
- }
-
if (expected & ToBooleanHint::kHeapNumber) {
Label not_heap_number;
__ JumpIfNotRoot(map, Heap::kHeapNumberMapRootIndex, &not_heap_number);
@@ -2024,6 +2018,13 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
Register temp = ToRegister(instr->temp());
+ Label deopt, done;
+ // If the map is not deprecated the migration attempt does not make sense.
+ __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ Ldr(temp, FieldMemOperand(temp, Map::kBitField3Offset));
+ __ Tst(temp, Operand(Map::Deprecated::kMask));
+ __ B(eq, &deopt);
+
{
PushSafepointRegistersScope scope(this);
__ Push(object);
@@ -2033,7 +2034,13 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
__ StoreToSafepointRegisterSlot(x0, temp);
}
- DeoptimizeIfSmi(temp, instr, DeoptimizeReason::kInstanceMigrationFailed);
+ __ Tst(temp, Operand(kSmiTagMask));
+ __ B(ne, &done);
+
+ __ bind(&deopt);
+ Deoptimize(instr, DeoptimizeReason::kInstanceMigrationFailed);
+
+ __ bind(&done);
}
@@ -2833,7 +2840,8 @@ void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
__ Ldr(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ Ldr(scratch3,
MemOperand(scratch2, StandardFrameConstants::kContextOffset));
- __ Cmp(scratch3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ Cmp(scratch3,
+ Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ B(ne, &no_arguments_adaptor);
// Drop current frame and load arguments count from arguments adaptor frame.
@@ -5439,20 +5447,6 @@ void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
EmitTestAndBranch(instr, eq, scratch,
(1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
-// clang-format off
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
- } else if (String::Equals(type_name, factory->type##_string())) { \
- DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL)); \
- Register map = ToRegister(instr->temp1()); \
- \
- __ JumpIfSmi(value, false_label); \
- __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset)); \
- __ CompareRoot(map, Heap::k##Type##MapRootIndex); \
- EmitBranch(instr, eq);
- SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
- // clang-format on
-
} else {
__ B(false_label);
}
diff --git a/deps/v8/src/crankshaft/compilation-phase.cc b/deps/v8/src/crankshaft/compilation-phase.cc
index 4be0b1a488..11300701b0 100644
--- a/deps/v8/src/crankshaft/compilation-phase.cc
+++ b/deps/v8/src/crankshaft/compilation-phase.cc
@@ -6,6 +6,7 @@
#include "src/crankshaft/hydrogen.h"
#include "src/isolate.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/crankshaft/hydrogen-bce.cc b/deps/v8/src/crankshaft/hydrogen-bce.cc
index 7910c5bdae..333fafbf13 100644
--- a/deps/v8/src/crankshaft/hydrogen-bce.cc
+++ b/deps/v8/src/crankshaft/hydrogen-bce.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/crankshaft/hydrogen-bce.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/crankshaft/hydrogen-canonicalize.cc b/deps/v8/src/crankshaft/hydrogen-canonicalize.cc
index 4a07357d58..20e771763f 100644
--- a/deps/v8/src/crankshaft/hydrogen-canonicalize.cc
+++ b/deps/v8/src/crankshaft/hydrogen-canonicalize.cc
@@ -4,7 +4,9 @@
#include "src/crankshaft/hydrogen-canonicalize.h"
+#include "src/counters.h"
#include "src/crankshaft/hydrogen-redundant-phi.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/crankshaft/hydrogen-check-elimination.cc b/deps/v8/src/crankshaft/hydrogen-check-elimination.cc
index 548e4cd8bd..951628e3bb 100644
--- a/deps/v8/src/crankshaft/hydrogen-check-elimination.cc
+++ b/deps/v8/src/crankshaft/hydrogen-check-elimination.cc
@@ -6,6 +6,7 @@
#include "src/crankshaft/hydrogen-alias-analysis.h"
#include "src/crankshaft/hydrogen-flow-engine.h"
+#include "src/objects-inl.h"
#define GLOBAL 1
diff --git a/deps/v8/src/crankshaft/hydrogen-dce.cc b/deps/v8/src/crankshaft/hydrogen-dce.cc
index 3cb9cf4a07..60b41cda76 100644
--- a/deps/v8/src/crankshaft/hydrogen-dce.cc
+++ b/deps/v8/src/crankshaft/hydrogen-dce.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/crankshaft/hydrogen-dce.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/crankshaft/hydrogen-dehoist.cc b/deps/v8/src/crankshaft/hydrogen-dehoist.cc
index 34de94afc5..0fccecc4d3 100644
--- a/deps/v8/src/crankshaft/hydrogen-dehoist.cc
+++ b/deps/v8/src/crankshaft/hydrogen-dehoist.cc
@@ -5,6 +5,7 @@
#include "src/crankshaft/hydrogen-dehoist.h"
#include "src/base/safe_math.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/crankshaft/hydrogen-environment-liveness.cc b/deps/v8/src/crankshaft/hydrogen-environment-liveness.cc
index 7965a9432a..e1eb11692f 100644
--- a/deps/v8/src/crankshaft/hydrogen-environment-liveness.cc
+++ b/deps/v8/src/crankshaft/hydrogen-environment-liveness.cc
@@ -4,7 +4,7 @@
#include "src/crankshaft/hydrogen-environment-liveness.h"
-
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/crankshaft/hydrogen-escape-analysis.cc b/deps/v8/src/crankshaft/hydrogen-escape-analysis.cc
index ab3bff2edc..91b4ff2b67 100644
--- a/deps/v8/src/crankshaft/hydrogen-escape-analysis.cc
+++ b/deps/v8/src/crankshaft/hydrogen-escape-analysis.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/crankshaft/hydrogen-escape-analysis.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -142,7 +143,6 @@ HValue* HEscapeAnalysisPhase::NewMapCheckAndInsert(HCapturedObject* state,
// necessary.
HValue* HEscapeAnalysisPhase::NewLoadReplacement(
HLoadNamedField* load, HValue* load_value) {
- isolate()->counters()->crankshaft_escape_loads_replaced()->Increment();
HValue* replacement = load_value;
Representation representation = load->representation();
if (representation.IsSmiOrInteger32() || representation.IsDouble()) {
@@ -320,8 +320,6 @@ void HEscapeAnalysisPhase::Run() {
for (int i = 0; i < max_fixpoint_iteration_count; i++) {
CollectCapturedValues();
if (captured_.is_empty()) break;
- isolate()->counters()->crankshaft_escape_allocs_replaced()->Increment(
- captured_.length());
PerformScalarReplacement();
captured_.Rewind(0);
}
diff --git a/deps/v8/src/crankshaft/hydrogen-gvn.cc b/deps/v8/src/crankshaft/hydrogen-gvn.cc
index bf51bad4bb..70320052b0 100644
--- a/deps/v8/src/crankshaft/hydrogen-gvn.cc
+++ b/deps/v8/src/crankshaft/hydrogen-gvn.cc
@@ -5,8 +5,7 @@
#include "src/crankshaft/hydrogen-gvn.h"
#include "src/crankshaft/hydrogen.h"
-#include "src/list.h"
-#include "src/list-inl.h"
+#include "src/objects-inl.h"
#include "src/v8.h"
namespace v8 {
@@ -652,23 +651,19 @@ SideEffects
HGlobalValueNumberingPhase::CollectSideEffectsOnPathsToDominatedBlock(
HBasicBlock* dominator, HBasicBlock* dominated) {
SideEffects side_effects;
- List<HBasicBlock*> blocks;
- for (;;) {
- for (int i = 0; i < dominated->predecessors()->length(); ++i) {
- HBasicBlock* block = dominated->predecessors()->at(i);
- if (dominator->block_id() < block->block_id() &&
- block->block_id() < dominated->block_id() &&
- !visited_on_paths_.Contains(block->block_id())) {
- visited_on_paths_.Add(block->block_id());
- side_effects.Add(block_side_effects_[block->block_id()]);
- if (block->IsLoopHeader()) {
- side_effects.Add(loop_side_effects_[block->block_id()]);
- }
- blocks.Add(block);
+ for (int i = 0; i < dominated->predecessors()->length(); ++i) {
+ HBasicBlock* block = dominated->predecessors()->at(i);
+ if (dominator->block_id() < block->block_id() &&
+ block->block_id() < dominated->block_id() &&
+ !visited_on_paths_.Contains(block->block_id())) {
+ visited_on_paths_.Add(block->block_id());
+ side_effects.Add(block_side_effects_[block->block_id()]);
+ if (block->IsLoopHeader()) {
+ side_effects.Add(loop_side_effects_[block->block_id()]);
}
+ side_effects.Add(CollectSideEffectsOnPathsToDominatedBlock(
+ dominator, block));
}
- if (blocks.is_empty()) break;
- dominated = blocks.RemoveLast();
}
return side_effects;
}
diff --git a/deps/v8/src/crankshaft/hydrogen-infer-representation.cc b/deps/v8/src/crankshaft/hydrogen-infer-representation.cc
index 74f264e17a..bbff24e5d1 100644
--- a/deps/v8/src/crankshaft/hydrogen-infer-representation.cc
+++ b/deps/v8/src/crankshaft/hydrogen-infer-representation.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/crankshaft/hydrogen-infer-representation.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/crankshaft/hydrogen-infer-types.cc b/deps/v8/src/crankshaft/hydrogen-infer-types.cc
index bfd3dd2281..a2fd72e443 100644
--- a/deps/v8/src/crankshaft/hydrogen-infer-types.cc
+++ b/deps/v8/src/crankshaft/hydrogen-infer-types.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/crankshaft/hydrogen-infer-types.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/crankshaft/hydrogen-instructions.cc b/deps/v8/src/crankshaft/hydrogen-instructions.cc
index be1ac9a18c..8cf49201d0 100644
--- a/deps/v8/src/crankshaft/hydrogen-instructions.cc
+++ b/deps/v8/src/crankshaft/hydrogen-instructions.cc
@@ -12,6 +12,7 @@
#include "src/double.h"
#include "src/elements.h"
#include "src/factory.h"
+#include "src/objects-inl.h"
#if V8_TARGET_ARCH_IA32
#include "src/crankshaft/ia32/lithium-ia32.h" // NOLINT
@@ -1072,9 +1073,9 @@ std::ostream& HReturn::PrintDataTo(std::ostream& os) const { // NOLINT
Representation HBranch::observed_input_representation(int index) {
- if (expected_input_types_ & (ToBooleanHint::kNull | ToBooleanHint::kReceiver |
- ToBooleanHint::kString | ToBooleanHint::kSymbol |
- ToBooleanHint::kSimdValue)) {
+ if (expected_input_types_ &
+ (ToBooleanHint::kNull | ToBooleanHint::kReceiver |
+ ToBooleanHint::kString | ToBooleanHint::kSymbol)) {
return Representation::Tagged();
}
if (expected_input_types_ & ToBooleanHint::kUndefined) {
@@ -1244,17 +1245,6 @@ String* TypeOfString(HConstant* constant, Isolate* isolate) {
}
case SYMBOL_TYPE:
return heap->symbol_string();
- case SIMD128_VALUE_TYPE: {
- Unique<Map> map = constant->ObjectMap();
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
- if (map.IsKnownGlobal(heap->type##_map())) { \
- return heap->type##_string(); \
- }
- SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
- UNREACHABLE();
- return nullptr;
- }
default:
if (constant->IsUndetectable()) return heap->undefined_string();
if (constant->IsCallable()) return heap->function_string();
@@ -2177,6 +2167,11 @@ HConstant::HConstant(Special special)
InstanceTypeField::encode(kUnknownInstanceType)),
int32_value_(0) {
DCHECK_EQ(kHoleNaN, special);
+ // Manipulating the signaling NaN used for the hole in C++, e.g. with bit_cast
+ // will change its value on ia32 (the x87 stack is used to return values
+ // and stores to the stack silently clear the signalling bit).
+ // Therefore we have to use memcpy for initializing |double_value_| with
+ // kHoleNanInt64 here.
std::memcpy(&double_value_, &kHoleNanInt64, sizeof(double_value_));
Initialize(Representation::Double());
}
diff --git a/deps/v8/src/crankshaft/hydrogen-instructions.h b/deps/v8/src/crankshaft/hydrogen-instructions.h
index b20bc9b53f..7059425cb0 100644
--- a/deps/v8/src/crankshaft/hydrogen-instructions.h
+++ b/deps/v8/src/crankshaft/hydrogen-instructions.h
@@ -3086,11 +3086,8 @@ class HConstant final : public HTemplateInstruction<0> {
return double_value_;
}
uint64_t DoubleValueAsBits() const {
- uint64_t bits;
DCHECK(HasDoubleValue());
- STATIC_ASSERT(sizeof(bits) == sizeof(double_value_));
- std::memcpy(&bits, &double_value_, sizeof(bits));
- return bits;
+ return bit_cast<uint64_t>(double_value_);
}
bool IsTheHole() const {
if (HasDoubleValue() && DoubleValueAsBits() == kHoleNanInt64) {
@@ -5125,10 +5122,6 @@ class HObjectAccess final {
return HObjectAccess(kElementsPointer, JSObject::kElementsOffset);
}
- static HObjectAccess ForLiteralsPointer() {
- return HObjectAccess(kInobject, JSFunction::kLiteralsOffset);
- }
-
static HObjectAccess ForNextFunctionLinkPointer() {
return HObjectAccess(kInobject, JSFunction::kNextFunctionLinkOffset);
}
diff --git a/deps/v8/src/crankshaft/hydrogen-load-elimination.cc b/deps/v8/src/crankshaft/hydrogen-load-elimination.cc
index 88963fc18b..99f4947a84 100644
--- a/deps/v8/src/crankshaft/hydrogen-load-elimination.cc
+++ b/deps/v8/src/crankshaft/hydrogen-load-elimination.cc
@@ -7,6 +7,7 @@
#include "src/crankshaft/hydrogen-alias-analysis.h"
#include "src/crankshaft/hydrogen-flow-engine.h"
#include "src/crankshaft/hydrogen-instructions.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/crankshaft/hydrogen-mark-unreachable.cc b/deps/v8/src/crankshaft/hydrogen-mark-unreachable.cc
index 4e1dd689ee..2393b5a8a4 100644
--- a/deps/v8/src/crankshaft/hydrogen-mark-unreachable.cc
+++ b/deps/v8/src/crankshaft/hydrogen-mark-unreachable.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/crankshaft/hydrogen-mark-unreachable.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/crankshaft/hydrogen-osr.cc b/deps/v8/src/crankshaft/hydrogen-osr.cc
index 607bfbd85d..093f94b83f 100644
--- a/deps/v8/src/crankshaft/hydrogen-osr.cc
+++ b/deps/v8/src/crankshaft/hydrogen-osr.cc
@@ -5,6 +5,7 @@
#include "src/crankshaft/hydrogen-osr.h"
#include "src/crankshaft/hydrogen.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/crankshaft/hydrogen-range-analysis.cc b/deps/v8/src/crankshaft/hydrogen-range-analysis.cc
index a489e014eb..50592d32ca 100644
--- a/deps/v8/src/crankshaft/hydrogen-range-analysis.cc
+++ b/deps/v8/src/crankshaft/hydrogen-range-analysis.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/crankshaft/hydrogen-range-analysis.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/crankshaft/hydrogen-redundant-phi.cc b/deps/v8/src/crankshaft/hydrogen-redundant-phi.cc
index ef8b29159d..08644c874c 100644
--- a/deps/v8/src/crankshaft/hydrogen-redundant-phi.cc
+++ b/deps/v8/src/crankshaft/hydrogen-redundant-phi.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/crankshaft/hydrogen-redundant-phi.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/crankshaft/hydrogen-removable-simulates.cc b/deps/v8/src/crankshaft/hydrogen-removable-simulates.cc
index ceef7430eb..e68168cf9c 100644
--- a/deps/v8/src/crankshaft/hydrogen-removable-simulates.cc
+++ b/deps/v8/src/crankshaft/hydrogen-removable-simulates.cc
@@ -6,6 +6,7 @@
#include "src/crankshaft/hydrogen-flow-engine.h"
#include "src/crankshaft/hydrogen-instructions.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/crankshaft/hydrogen-representation-changes.cc b/deps/v8/src/crankshaft/hydrogen-representation-changes.cc
index 4d74df4952..5fd72618fa 100644
--- a/deps/v8/src/crankshaft/hydrogen-representation-changes.cc
+++ b/deps/v8/src/crankshaft/hydrogen-representation-changes.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/crankshaft/hydrogen-representation-changes.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/crankshaft/hydrogen-sce.cc b/deps/v8/src/crankshaft/hydrogen-sce.cc
index 91e91d2033..a08190de3e 100644
--- a/deps/v8/src/crankshaft/hydrogen-sce.cc
+++ b/deps/v8/src/crankshaft/hydrogen-sce.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/crankshaft/hydrogen-sce.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/crankshaft/hydrogen-store-elimination.cc b/deps/v8/src/crankshaft/hydrogen-store-elimination.cc
index 57c7880aa7..b081c21984 100644
--- a/deps/v8/src/crankshaft/hydrogen-store-elimination.cc
+++ b/deps/v8/src/crankshaft/hydrogen-store-elimination.cc
@@ -5,6 +5,7 @@
#include "src/crankshaft/hydrogen-store-elimination.h"
#include "src/crankshaft/hydrogen-instructions.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/crankshaft/hydrogen-uint32-analysis.cc b/deps/v8/src/crankshaft/hydrogen-uint32-analysis.cc
index ac4a63f8f2..de31a616c1 100644
--- a/deps/v8/src/crankshaft/hydrogen-uint32-analysis.cc
+++ b/deps/v8/src/crankshaft/hydrogen-uint32-analysis.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/crankshaft/hydrogen-uint32-analysis.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/crankshaft/hydrogen.cc b/deps/v8/src/crankshaft/hydrogen.cc
index ea3401c90c..d55bb37c39 100644
--- a/deps/v8/src/crankshaft/hydrogen.cc
+++ b/deps/v8/src/crankshaft/hydrogen.cc
@@ -5287,10 +5287,11 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
return;
} else {
Handle<FeedbackVector> vector(current_feedback_vector(), isolate());
+ FeedbackSlot slot = expr->VariableFeedbackSlot();
+ DCHECK(vector->IsLoadGlobalIC(slot));
HValue* vector_value = Add<HConstant>(vector);
- HValue* slot_value =
- Add<HConstant>(vector->GetIndex(expr->VariableFeedbackSlot()));
+ HValue* slot_value = Add<HConstant>(vector->GetIndex(slot));
Callable callable = CodeFactory::LoadGlobalICInOptimizedCode(
isolate(), ast_context()->typeof_mode());
HValue* stub = Add<HConstant>(callable.code());
@@ -5354,7 +5355,8 @@ void HOptimizedGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
DCHECK(current_block() != NULL);
DCHECK(current_block()->HasPredecessor());
Callable callable = CodeFactory::FastCloneRegExp(isolate());
- HValue* values[] = {AddThisFunction(), Add<HConstant>(expr->literal_index()),
+ int index = FeedbackVector::GetIndex(expr->literal_slot());
+ HValue* values[] = {AddThisFunction(), Add<HConstant>(index),
Add<HConstant>(expr->pattern()),
Add<HConstant>(expr->flags())};
HConstant* stub_value = Add<HConstant>(callable.code());
@@ -5455,7 +5457,7 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
// Check whether to use fast or slow deep-copying for boilerplate.
int max_properties = kMaxFastLiteralProperties;
Handle<Object> literals_cell(
- closure->literals()->literal(expr->literal_index()), isolate());
+ closure->feedback_vector()->Get(expr->literal_slot()), isolate());
Handle<AllocationSite> site;
Handle<JSObject> boilerplate;
if (!literals_cell->IsUndefined(isolate())) {
@@ -5473,9 +5475,9 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
site_context.ExitScope(site, boilerplate);
} else {
NoObservableSideEffectsScope no_effects(this);
- Handle<FixedArray> constant_properties =
+ Handle<BoilerplateDescription> constant_properties =
expr->GetOrBuildConstantProperties(isolate());
- int literal_index = expr->literal_index();
+ int literal_index = FeedbackVector::GetIndex(expr->literal_slot());
int flags = expr->ComputeFlags(true);
Add<HPushArguments>(AddThisFunction(), Add<HConstant>(literal_index),
@@ -5513,7 +5515,7 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<Map> map = property->GetReceiverType();
Handle<String> name = key->AsPropertyName();
HValue* store;
- FeedbackVectorSlot slot = property->GetSlot();
+ FeedbackSlot slot = property->GetSlot();
if (map.is_null()) {
// If we don't know the monomorphic type, do a generic store.
CHECK_ALIVE(store = BuildNamedGeneric(STORE, NULL, slot, literal,
@@ -5527,6 +5529,7 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
store = BuildMonomorphicAccess(
&info, literal, checked_literal, value,
BailoutId::None(), BailoutId::None());
+ DCHECK_NOT_NULL(store);
} else {
CHECK_ALIVE(store = BuildNamedGeneric(STORE, NULL, slot,
literal, name, value));
@@ -5574,10 +5577,9 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
HInstruction* literal;
Handle<AllocationSite> site;
- Handle<LiteralsArray> literals(environment()->closure()->literals(),
- isolate());
- Handle<Object> literals_cell(literals->literal(expr->literal_index()),
- isolate());
+ Handle<FeedbackVector> vector(environment()->closure()->feedback_vector(),
+ isolate());
+ Handle<Object> literals_cell(vector->Get(expr->literal_slot()), isolate());
Handle<JSObject> boilerplate_object;
if (!literals_cell->IsUndefined(isolate())) {
DCHECK(literals_cell->IsAllocationSite());
@@ -5600,7 +5602,7 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
NoObservableSideEffectsScope no_effects(this);
Handle<ConstantElementsPair> constants =
expr->GetOrBuildConstantElements(isolate());
- int literal_index = expr->literal_index();
+ int literal_index = FeedbackVector::GetIndex(expr->literal_slot());
int flags = expr->ComputeFlags(true);
Add<HPushArguments>(AddThisFunction(), Add<HConstant>(literal_index),
@@ -5678,7 +5680,7 @@ HCheckMaps* HOptimizedGraphBuilder::AddCheckMap(HValue* object,
HInstruction* HOptimizedGraphBuilder::BuildLoadNamedField(
PropertyAccessInfo* info,
HValue* checked_object) {
- // See if this is a load for an immutable property
+ // Check if this is a load of an immutable or constant property.
if (checked_object->ActualValue()->IsConstant()) {
Handle<Object> object(
HConstant::cast(checked_object->ActualValue())->handle(isolate()));
@@ -5686,9 +5688,20 @@ HInstruction* HOptimizedGraphBuilder::BuildLoadNamedField(
if (object->IsJSObject()) {
LookupIterator it(object, info->name(),
LookupIterator::OWN_SKIP_INTERCEPTOR);
- Handle<Object> value = JSReceiver::GetDataProperty(&it);
- if (it.IsFound() && it.IsReadOnly() && !it.IsConfigurable()) {
- return New<HConstant>(value);
+ if (it.IsFound()) {
+ bool is_reaonly_non_configurable =
+ it.IsReadOnly() && !it.IsConfigurable();
+ if (is_reaonly_non_configurable ||
+ (FLAG_track_constant_fields && info->IsDataConstantField())) {
+ Handle<Object> value = JSReceiver::GetDataProperty(&it);
+ if (!is_reaonly_non_configurable) {
+ DCHECK(!it.is_dictionary_holder());
+ // Add dependency on the map that introduced the field.
+ Handle<Map> field_owner_map = it.GetFieldOwnerMap();
+ top_info()->dependencies()->AssumeFieldOwner(field_owner_map);
+ }
+ return New<HConstant>(value);
+ }
}
}
}
@@ -5717,15 +5730,17 @@ HInstruction* HOptimizedGraphBuilder::BuildLoadNamedField(
checked_object, checked_object, access, maps, info->field_type());
}
-
-HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
- PropertyAccessInfo* info,
- HValue* checked_object,
- HValue* value) {
+HValue* HOptimizedGraphBuilder::BuildStoreNamedField(PropertyAccessInfo* info,
+ HValue* checked_object,
+ HValue* value) {
bool transition_to_field = info->IsTransition();
// TODO(verwaest): Move this logic into PropertyAccessInfo.
HObjectAccess field_access = info->access();
+ bool store_to_constant_field = FLAG_track_constant_fields &&
+ info->StoreMode() != INITIALIZING_STORE &&
+ info->IsDataConstantField();
+
HStoreNamedField *instr;
if (field_access.representation().IsDouble() &&
(!FLAG_unbox_double_fields || !field_access.IsInobject())) {
@@ -5751,23 +5766,57 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
// Already holds a HeapNumber; load the box and write its value field.
HInstruction* heap_number =
Add<HLoadNamedField>(checked_object, nullptr, heap_number_access);
- instr = New<HStoreNamedField>(heap_number,
- HObjectAccess::ForHeapNumberValue(),
- value, STORE_TO_INITIALIZED_ENTRY);
+
+ if (store_to_constant_field) {
+ // If the field is constant check that the value we are going to store
+ // matches current value.
+ HInstruction* current_value = Add<HLoadNamedField>(
+ heap_number, nullptr, HObjectAccess::ForHeapNumberValue());
+ IfBuilder value_checker(this);
+ value_checker.IfNot<HCompareNumericAndBranch>(current_value, value,
+ Token::EQ);
+ value_checker.ThenDeopt(DeoptimizeReason::kValueMismatch);
+ value_checker.End();
+ return nullptr;
+
+ } else {
+ instr = New<HStoreNamedField>(heap_number,
+ HObjectAccess::ForHeapNumberValue(),
+ value, STORE_TO_INITIALIZED_ENTRY);
+ }
}
} else {
- if (field_access.representation().IsHeapObject()) {
- BuildCheckHeapObject(value);
- }
+ if (store_to_constant_field) {
+ // If the field is constant check that the value we are going to store
+ // matches current value.
+ HInstruction* current_value = Add<HLoadNamedField>(
+ checked_object->ActualValue(), checked_object, field_access);
+
+ IfBuilder value_checker(this);
+ if (field_access.representation().IsDouble()) {
+ value_checker.IfNot<HCompareNumericAndBranch>(current_value, value,
+ Token::EQ);
+ } else {
+ value_checker.IfNot<HCompareObjectEqAndBranch>(current_value, value);
+ }
+ value_checker.ThenDeopt(DeoptimizeReason::kValueMismatch);
+ value_checker.End();
+ return nullptr;
- if (!info->field_maps()->is_empty()) {
- DCHECK(field_access.representation().IsHeapObject());
- value = Add<HCheckMaps>(value, info->field_maps());
- }
+ } else {
+ if (field_access.representation().IsHeapObject()) {
+ BuildCheckHeapObject(value);
+ }
- // This is a normal store.
- instr = New<HStoreNamedField>(checked_object->ActualValue(), field_access,
- value, info->StoreMode());
+ if (!info->field_maps()->is_empty()) {
+ DCHECK(field_access.representation().IsHeapObject());
+ value = Add<HCheckMaps>(value, info->field_maps());
+ }
+
+ // This is a normal store.
+ instr = New<HStoreNamedField>(checked_object->ActualValue(), field_access,
+ value, info->StoreMode());
+ }
}
if (transition_to_field) {
@@ -6151,9 +6200,8 @@ HValue* HOptimizedGraphBuilder::BuildMonomorphicAccess(
}
}
-
void HOptimizedGraphBuilder::HandlePolymorphicNamedFieldAccess(
- PropertyAccessType access_type, Expression* expr, FeedbackVectorSlot slot,
+ PropertyAccessType access_type, Expression* expr, FeedbackSlot slot,
BailoutId ast_id, BailoutId return_id, HValue* object, HValue* value,
SmallMapList* maps, Handle<Name> name) {
// Something did not match; must use a polymorphic load.
@@ -6351,8 +6399,8 @@ static bool AreStringTypes(SmallMapList* maps) {
}
void HOptimizedGraphBuilder::BuildStore(Expression* expr, Property* prop,
- FeedbackVectorSlot slot,
- BailoutId ast_id, BailoutId return_id,
+ FeedbackSlot slot, BailoutId ast_id,
+ BailoutId return_id,
bool is_uninitialized) {
if (!prop->key()->IsPropertyName()) {
// Keyed store.
@@ -6471,8 +6519,10 @@ HInstruction* HOptimizedGraphBuilder::InlineGlobalPropertyStore(
// Because not every expression has a position and there is not common
// superclass of Assignment and CountOperation, we cannot just pass the
// owning expression instead of position and ast_id separately.
-void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
- Variable* var, HValue* value, FeedbackVectorSlot slot, BailoutId ast_id) {
+void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(Variable* var,
+ HValue* value,
+ FeedbackSlot slot,
+ BailoutId ast_id) {
Handle<JSGlobalObject> global(current_info()->global_object());
// Lookup in script contexts.
@@ -6523,6 +6573,7 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
HValue* name = Add<HConstant>(var->name());
HValue* vector_value = Add<HConstant>(vector);
HValue* slot_value = Add<HConstant>(vector->GetIndex(slot));
+ DCHECK_EQ(vector->GetLanguageMode(slot), function_language_mode());
Callable callable = CodeFactory::StoreICInOptimizedCode(
isolate(), function_language_mode());
HValue* stub = Add<HConstant>(callable.code());
@@ -6818,9 +6869,8 @@ HInstruction* HGraphBuilder::BuildLoadStringLength(HValue* string) {
HObjectAccess::ForStringLength());
}
-
HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
- PropertyAccessType access_type, Expression* expr, FeedbackVectorSlot slot,
+ PropertyAccessType access_type, Expression* expr, FeedbackSlot slot,
HValue* object, Handle<Name> name, HValue* value, bool is_uninitialized) {
if (is_uninitialized) {
Add<HDeoptimize>(
@@ -6836,6 +6886,7 @@ HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
if (access_type == LOAD) {
HValue* values[] = {object, key, slot_value, vector_value};
if (!expr->AsProperty()->key()->IsPropertyName()) {
+ DCHECK(vector->IsKeyedLoadIC(slot));
// It's possible that a keyed load of a constant string was converted
// to a named load. Here, at the last minute, we need to make sure to
// use a generic Keyed Load if we are using the type vector, because
@@ -6847,6 +6898,7 @@ HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
callable.descriptor(), ArrayVector(values));
return result;
}
+ DCHECK(vector->IsLoadIC(slot));
Callable callable = CodeFactory::LoadICInOptimizedCode(isolate());
HValue* stub = Add<HConstant>(callable.code());
HCallWithDescriptor* result = New<HCallWithDescriptor>(
@@ -6855,11 +6907,12 @@ HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
} else {
HValue* values[] = {object, key, value, slot_value, vector_value};
- if (vector->GetKind(slot) == FeedbackVectorSlotKind::KEYED_STORE_IC) {
+ if (vector->IsKeyedStoreIC(slot)) {
// It's possible that a keyed store of a constant string was converted
// to a named store. Here, at the last minute, we need to make sure to
// use a generic Keyed Store if we are using the type vector, because
// it has to share information with full code.
+ DCHECK_EQ(vector->GetLanguageMode(slot), function_language_mode());
Callable callable = CodeFactory::KeyedStoreICInOptimizedCode(
isolate(), function_language_mode());
HValue* stub = Add<HConstant>(callable.code());
@@ -6868,18 +6921,27 @@ HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
callable.descriptor(), ArrayVector(values));
return result;
}
- Callable callable = CodeFactory::StoreICInOptimizedCode(
- isolate(), function_language_mode());
- HValue* stub = Add<HConstant>(callable.code());
- HCallWithDescriptor* result = New<HCallWithDescriptor>(
- Code::STORE_IC, stub, 0, callable.descriptor(), ArrayVector(values));
+ HCallWithDescriptor* result;
+ if (vector->IsStoreOwnIC(slot)) {
+ Callable callable = CodeFactory::StoreOwnICInOptimizedCode(isolate());
+ HValue* stub = Add<HConstant>(callable.code());
+ result = New<HCallWithDescriptor>(
+ Code::STORE_IC, stub, 0, callable.descriptor(), ArrayVector(values));
+ } else {
+ DCHECK(vector->IsStoreIC(slot));
+ DCHECK_EQ(vector->GetLanguageMode(slot), function_language_mode());
+ Callable callable = CodeFactory::StoreICInOptimizedCode(
+ isolate(), function_language_mode());
+ HValue* stub = Add<HConstant>(callable.code());
+ result = New<HCallWithDescriptor>(
+ Code::STORE_IC, stub, 0, callable.descriptor(), ArrayVector(values));
+ }
return result;
}
}
-
HInstruction* HOptimizedGraphBuilder::BuildKeyedGeneric(
- PropertyAccessType access_type, Expression* expr, FeedbackVectorSlot slot,
+ PropertyAccessType access_type, Expression* expr, FeedbackSlot slot,
HValue* object, HValue* key, HValue* value) {
Handle<FeedbackVector> vector(current_feedback_vector(), isolate());
HValue* vector_value = Add<HConstant>(vector);
@@ -7074,9 +7136,8 @@ HInstruction* HOptimizedGraphBuilder::TryBuildConsolidatedElementLoad(
return instr;
}
-
HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
- Expression* expr, FeedbackVectorSlot slot, HValue* object, HValue* key,
+ Expression* expr, FeedbackSlot slot, HValue* object, HValue* key,
HValue* val, SmallMapList* maps, PropertyAccessType access_type,
KeyedAccessStoreMode store_mode, bool* has_side_effects) {
*has_side_effects = false;
@@ -7211,9 +7272,9 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
}
HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
- HValue* obj, HValue* key, HValue* val, Expression* expr,
- FeedbackVectorSlot slot, BailoutId ast_id, BailoutId return_id,
- PropertyAccessType access_type, bool* has_side_effects) {
+ HValue* obj, HValue* key, HValue* val, Expression* expr, FeedbackSlot slot,
+ BailoutId ast_id, BailoutId return_id, PropertyAccessType access_type,
+ bool* has_side_effects) {
// A keyed name access with type feedback may contain the name.
Handle<FeedbackVector> vector = handle(current_feedback_vector(), isolate());
HValue* expected_key = key;
@@ -7439,8 +7500,8 @@ bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) {
HValue* HOptimizedGraphBuilder::BuildNamedAccess(
PropertyAccessType access, BailoutId ast_id, BailoutId return_id,
- Expression* expr, FeedbackVectorSlot slot, HValue* object,
- Handle<Name> name, HValue* value, bool is_uninitialized) {
+ Expression* expr, FeedbackSlot slot, HValue* object, Handle<Name> name,
+ HValue* value, bool is_uninitialized) {
SmallMapList* maps;
ComputeReceiverTypes(expr, object, &maps, this);
DCHECK(maps != NULL);
@@ -7667,7 +7728,7 @@ HInstruction* HOptimizedGraphBuilder::NewCallFunction(
HInstruction* HOptimizedGraphBuilder::NewCallFunctionViaIC(
HValue* function, int argument_count, TailCallMode syntactic_tail_call_mode,
ConvertReceiverMode convert_mode, TailCallMode tail_call_mode,
- FeedbackVectorSlot slot) {
+ FeedbackSlot slot) {
if (syntactic_tail_call_mode == TailCallMode::kAllow) {
BuildEnsureCallable(function);
} else {
@@ -7680,8 +7741,8 @@ HInstruction* HOptimizedGraphBuilder::NewCallFunctionViaIC(
HValue* vector_val = Add<HConstant>(vector);
HValue* op_vals[] = {function, arity_val, index_val, vector_val};
- Callable callable = CodeFactory::CallICInOptimizedCode(
- isolate(), convert_mode, tail_call_mode);
+ Callable callable =
+ CodeFactory::CallIC(isolate(), convert_mode, tail_call_mode);
HConstant* stub = Add<HConstant>(callable.code());
return New<HCallWithDescriptor>(stub, argument_count, callable.descriptor(),
@@ -8034,12 +8095,12 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
// Use the same AstValueFactory for creating strings in the sub-compilation
// step, but don't transfer ownership to target_info.
Handle<SharedFunctionInfo> target_shared(target->shared());
- ParseInfo parse_info(zone(), target_shared);
+ ParseInfo parse_info(target_shared, top_info()->parse_info()->zone_shared());
parse_info.set_ast_value_factory(
top_info()->parse_info()->ast_value_factory());
parse_info.set_ast_value_factory_owned(false);
- CompilationInfo target_info(&parse_info, target);
+ CompilationInfo target_info(parse_info.zone(), &parse_info, target);
if (inlining_kind != CONSTRUCT_CALL_RETURN &&
IsClassConstructor(target_shared->kind())) {
@@ -10400,7 +10461,7 @@ HInstruction* HOptimizedGraphBuilder::BuildIncrement(CountOperation* expr) {
}
void HOptimizedGraphBuilder::BuildStoreForEffect(
- Expression* expr, Property* prop, FeedbackVectorSlot slot, BailoutId ast_id,
+ Expression* expr, Property* prop, FeedbackSlot slot, BailoutId ast_id,
BailoutId return_id, HValue* object, HValue* key, HValue* value) {
EffectContext for_effect(this);
Push(object);
@@ -11133,11 +11194,9 @@ bool IsLiteralCompareStrict(Isolate* isolate, HValue* left, Token::Value op,
return op == Token::EQ_STRICT &&
((left->IsConstant() &&
!HConstant::cast(left)->handle(isolate)->IsNumber() &&
- !HConstant::cast(left)->handle(isolate)->IsSimd128Value() &&
!HConstant::cast(left)->handle(isolate)->IsString()) ||
(right->IsConstant() &&
!HConstant::cast(right)->handle(isolate)->IsNumber() &&
- !HConstant::cast(right)->handle(isolate)->IsSimd128Value() &&
!HConstant::cast(right)->handle(isolate)->IsString()));
}
@@ -11846,10 +11905,11 @@ void HOptimizedGraphBuilder::VisitVariableDeclaration(
case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
globals_.Add(variable->name(), zone());
- FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+ FeedbackSlot slot = proxy->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals_.Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
globals_.Add(isolate()->factory()->undefined_value(), zone());
+ globals_.Add(isolate()->factory()->undefined_value(), zone());
return;
}
case VariableLocation::PARAMETER:
@@ -11885,9 +11945,15 @@ void HOptimizedGraphBuilder::VisitFunctionDeclaration(
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
globals_.Add(variable->name(), zone());
- FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+ FeedbackSlot slot = proxy->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals_.Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+
+ // We need the slot where the literals array lives, too.
+ slot = declaration->fun()->LiteralFeedbackSlot();
+ DCHECK(!slot.IsInvalid());
+ globals_.Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+
Handle<SharedFunctionInfo> function = Compiler::GetSharedFunctionInfo(
declaration->fun(), current_info()->script(), top_info());
// Check for stack-overflow exception.
@@ -12117,32 +12183,6 @@ void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) {
return ast_context()->ReturnInstruction(result, call->id());
}
-// Support for direct calls from JavaScript to native RegExp code.
-void HOptimizedGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
- DCHECK_EQ(4, call->arguments()->length());
- CHECK_ALIVE(VisitExpressions(call->arguments()));
- Callable callable = CodeFactory::RegExpExec(isolate());
- HValue* last_match_info = Pop();
- HValue* index = Pop();
- HValue* subject = Pop();
- HValue* regexp_object = Pop();
- HValue* stub = Add<HConstant>(callable.code());
- HValue* values[] = {regexp_object, subject, index, last_match_info};
- HInstruction* result = New<HCallWithDescriptor>(
- stub, 0, callable.descriptor(), ArrayVector(values));
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-// Fast support for number to string.
-void HOptimizedGraphBuilder::GenerateNumberToString(CallRuntime* call) {
- DCHECK_EQ(1, call->arguments()->length());
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* number = Pop();
- HValue* result = BuildNumberToString(number, AstType::Any());
- return ast_context()->ReturnValue(result);
-}
-
// Fast support for calls.
void HOptimizedGraphBuilder::GenerateCall(CallRuntime* call) {
diff --git a/deps/v8/src/crankshaft/hydrogen.h b/deps/v8/src/crankshaft/hydrogen.h
index 97cb9f2d80..2ce6454f13 100644
--- a/deps/v8/src/crankshaft/hydrogen.h
+++ b/deps/v8/src/crankshaft/hydrogen.h
@@ -12,10 +12,13 @@
#include "src/bailout-reason.h"
#include "src/compilation-info.h"
#include "src/compiler.h"
+#include "src/counters.h"
#include "src/crankshaft/compilation-phase.h"
#include "src/crankshaft/hydrogen-instructions.h"
#include "src/globals.h"
#include "src/parsing/parse-info.h"
+#include "src/string-stream.h"
+#include "src/transitions.h"
#include "src/zone/zone.h"
namespace v8 {
@@ -37,9 +40,8 @@ class HCompilationJob final : public CompilationJob {
public:
explicit HCompilationJob(Handle<JSFunction> function)
: CompilationJob(function->GetIsolate(), &info_, "Crankshaft"),
- zone_(function->GetIsolate()->allocator(), ZONE_NAME),
- parse_info_(&zone_, handle(function->shared())),
- info_(&parse_info_, function),
+ parse_info_(handle(function->shared())),
+ info_(parse_info_.zone(), &parse_info_, function),
graph_(nullptr),
chunk_(nullptr) {}
@@ -49,7 +51,6 @@ class HCompilationJob final : public CompilationJob {
virtual Status FinalizeJobImpl();
private:
- Zone zone_;
ParseInfo parse_info_;
CompilationInfo info_;
HGraph* graph_;
@@ -2167,8 +2168,6 @@ class HOptimizedGraphBuilder : public HGraphBuilder,
F(DebugBreakInOptimizedCode) \
F(StringCharCodeAt) \
F(SubString) \
- F(RegExpExec) \
- F(NumberToString) \
F(DebugIsActive) \
/* Typed Arrays */ \
F(TypedArrayInitialize) \
@@ -2387,15 +2386,16 @@ class HOptimizedGraphBuilder : public HGraphBuilder,
TailCallMode tail_call_mode = TailCallMode::kDisallow);
void HandleGlobalVariableAssignment(Variable* var, HValue* value,
- FeedbackVectorSlot slot,
- BailoutId ast_id);
+ FeedbackSlot slot, BailoutId ast_id);
void HandlePropertyAssignment(Assignment* expr);
void HandleCompoundAssignment(Assignment* expr);
- void HandlePolymorphicNamedFieldAccess(
- PropertyAccessType access_type, Expression* expr, FeedbackVectorSlot slot,
- BailoutId ast_id, BailoutId return_id, HValue* object, HValue* value,
- SmallMapList* types, Handle<Name> name);
+ void HandlePolymorphicNamedFieldAccess(PropertyAccessType access_type,
+ Expression* expr, FeedbackSlot slot,
+ BailoutId ast_id, BailoutId return_id,
+ HValue* object, HValue* value,
+ SmallMapList* types,
+ Handle<Name> name);
HValue* BuildAllocateExternalElements(
ExternalArrayType array_type,
@@ -2525,6 +2525,12 @@ class HOptimizedGraphBuilder : public HGraphBuilder,
bool IsFound() const { return lookup_type_ != NOT_FOUND; }
bool IsProperty() const { return IsFound() && !IsTransition(); }
bool IsTransition() const { return lookup_type_ == TRANSITION_TYPE; }
+ // TODO(ishell): rename to IsDataConstant() once constant field tracking
+ // is done.
+ bool IsDataConstantField() const {
+ return lookup_type_ == DESCRIPTOR_TYPE && details_.kind() == kData &&
+ details_.location() == kField && details_.constness() == kConst;
+ }
bool IsData() const {
return lookup_type_ == DESCRIPTOR_TYPE && details_.kind() == kData &&
details_.location() == kField;
@@ -2641,9 +2647,8 @@ class HOptimizedGraphBuilder : public HGraphBuilder,
HValue* BuildNamedAccess(PropertyAccessType access, BailoutId ast_id,
BailoutId reutrn_id, Expression* expr,
- FeedbackVectorSlot slot, HValue* object,
- Handle<Name> name, HValue* value,
- bool is_uninitialized = false);
+ FeedbackSlot slot, HValue* object, Handle<Name> name,
+ HValue* value, bool is_uninitialized = false);
void HandlePolymorphicCallNamed(Call* expr,
HValue* receiver,
@@ -2677,7 +2682,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder,
PushBeforeSimulateBehavior push_sim_result);
HInstruction* BuildIncrement(CountOperation* expr);
HInstruction* BuildKeyedGeneric(PropertyAccessType access_type,
- Expression* expr, FeedbackVectorSlot slot,
+ Expression* expr, FeedbackSlot slot,
HValue* object, HValue* key, HValue* value);
HInstruction* TryBuildConsolidatedElementLoad(HValue* object,
@@ -2695,19 +2700,21 @@ class HOptimizedGraphBuilder : public HGraphBuilder,
PropertyAccessType access_type,
KeyedAccessStoreMode store_mode);
- HValue* HandlePolymorphicElementAccess(
- Expression* expr, FeedbackVectorSlot slot, HValue* object, HValue* key,
- HValue* val, SmallMapList* maps, PropertyAccessType access_type,
- KeyedAccessStoreMode store_mode, bool* has_side_effects);
+ HValue* HandlePolymorphicElementAccess(Expression* expr, FeedbackSlot slot,
+ HValue* object, HValue* key,
+ HValue* val, SmallMapList* maps,
+ PropertyAccessType access_type,
+ KeyedAccessStoreMode store_mode,
+ bool* has_side_effects);
HValue* HandleKeyedElementAccess(HValue* obj, HValue* key, HValue* val,
- Expression* expr, FeedbackVectorSlot slot,
+ Expression* expr, FeedbackSlot slot,
BailoutId ast_id, BailoutId return_id,
PropertyAccessType access_type,
bool* has_side_effects);
HInstruction* BuildNamedGeneric(PropertyAccessType access, Expression* expr,
- FeedbackVectorSlot slot, HValue* object,
+ FeedbackSlot slot, HValue* object,
Handle<Name> name, HValue* value,
bool is_uninitialized = false);
@@ -2720,19 +2727,18 @@ class HOptimizedGraphBuilder : public HGraphBuilder,
HValue* key);
void BuildStoreForEffect(Expression* expression, Property* prop,
- FeedbackVectorSlot slot, BailoutId ast_id,
+ FeedbackSlot slot, BailoutId ast_id,
BailoutId return_id, HValue* object, HValue* key,
HValue* value);
- void BuildStore(Expression* expression, Property* prop,
- FeedbackVectorSlot slot, BailoutId ast_id,
- BailoutId return_id, bool is_uninitialized = false);
+ void BuildStore(Expression* expression, Property* prop, FeedbackSlot slot,
+ BailoutId ast_id, BailoutId return_id,
+ bool is_uninitialized = false);
HInstruction* BuildLoadNamedField(PropertyAccessInfo* info,
HValue* checked_object);
- HInstruction* BuildStoreNamedField(PropertyAccessInfo* info,
- HValue* checked_object,
- HValue* value);
+ HValue* BuildStoreNamedField(PropertyAccessInfo* info, HValue* checked_object,
+ HValue* value);
HValue* BuildContextChainWalk(Variable* var);
@@ -2778,7 +2784,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder,
TailCallMode syntactic_tail_call_mode,
ConvertReceiverMode convert_mode,
TailCallMode tail_call_mode,
- FeedbackVectorSlot slot);
+ FeedbackSlot slot);
HInstruction* NewCallConstantFunction(Handle<JSFunction> target,
int argument_count,
diff --git a/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc b/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc
index 978ae2f1c2..d5b87492c5 100644
--- a/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc
@@ -309,7 +309,7 @@ bool LCodeGen::GenerateJumpTable() {
// building, install a special marker there instead.
DCHECK(info()->IsStub());
__ mov(MemOperand(esp, 2 * kPointerSize),
- Immediate(Smi::FromInt(StackFrame::STUB)));
+ Immediate(StackFrame::TypeToMarker(StackFrame::STUB)));
/* stack layout
3: old ebp
@@ -346,7 +346,7 @@ bool LCodeGen::GenerateDeferredCode() {
frame_is_built_ = true;
// Build the frame in such a way that esi isn't trashed.
__ push(ebp); // Caller's frame pointer.
- __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+ __ push(Immediate(StackFrame::TypeToMarker(StackFrame::STUB)));
__ lea(ebp, Operand(esp, TypedFrameConstants::kFixedFrameSizeFromFp));
Comment(";;; Deferred code");
}
@@ -1927,12 +1927,6 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ j(equal, instr->TrueLabel(chunk_));
}
- if (expected & ToBooleanHint::kSimdValue) {
- // SIMD value -> true.
- __ CmpInstanceType(map, SIMD128_VALUE_TYPE);
- __ j(equal, instr->TrueLabel(chunk_));
- }
-
if (expected & ToBooleanHint::kHeapNumber) {
// heap number -> false iff +0, -0, or NaN.
Label not_heap_number;
@@ -2696,7 +2690,7 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
__ mov(result,
Operand(result, CommonFrameConstants::kContextOrFrameTypeOffset));
__ cmp(Operand(result),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adapted, Label::kNear);
// No arguments adaptor frame.
@@ -3398,7 +3392,7 @@ void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
Label no_arguments_adaptor, formal_parameter_count_loaded;
__ mov(scratch2, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ cmp(Operand(scratch2, StandardFrameConstants::kContextOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(not_equal, &no_arguments_adaptor, Label::kNear);
// Drop current frame and load arguments count from arguments adaptor frame.
@@ -4548,6 +4542,15 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+ Label deopt, done;
+ // If the map is not deprecated the migration attempt does not make sense.
+ __ push(object);
+ __ mov(object, FieldOperand(object, HeapObject::kMapOffset));
+ __ test(FieldOperand(object, Map::kBitField3Offset),
+ Immediate(Map::Deprecated::kMask));
+ __ pop(object);
+ __ j(zero, &deopt);
+
{
PushSafepointRegistersScope scope(this);
__ push(object);
@@ -4558,7 +4561,12 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
__ test(eax, Immediate(kSmiTagMask));
}
- DeoptimizeIf(zero, instr, DeoptimizeReason::kInstanceMigrationFailed);
+ __ j(not_zero, &done);
+
+ __ bind(&deopt);
+ DeoptimizeIf(no_condition, instr, DeoptimizeReason::kInstanceMigrationFailed);
+
+ __ bind(&done);
}
@@ -4899,18 +4907,6 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
__ test_b(FieldOperand(input, Map::kBitFieldOffset),
Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
final_branch_condition = zero;
-
-// clang-format off
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
- } else if (String::Equals(type_name, factory()->type##_string())) { \
- __ JumpIfSmi(input, false_label, false_distance); \
- __ cmp(FieldOperand(input, HeapObject::kMapOffset), \
- factory()->type##_map()); \
- final_branch_condition = equal;
- SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
- // clang-format on
-
} else {
__ jmp(false_label, false_distance);
}
diff --git a/deps/v8/src/crankshaft/lithium-allocator.cc b/deps/v8/src/crankshaft/lithium-allocator.cc
index aa4459b23a..201c6062a8 100644
--- a/deps/v8/src/crankshaft/lithium-allocator.cc
+++ b/deps/v8/src/crankshaft/lithium-allocator.cc
@@ -5,8 +5,9 @@
#include "src/crankshaft/lithium-allocator.h"
#include "src/crankshaft/hydrogen.h"
-#include "src/crankshaft/lithium-inl.h"
#include "src/crankshaft/lithium-allocator-inl.h"
+#include "src/crankshaft/lithium-inl.h"
+#include "src/objects-inl.h"
#include "src/register-configuration.h"
#include "src/string-stream.h"
diff --git a/deps/v8/src/crankshaft/lithium-codegen.cc b/deps/v8/src/crankshaft/lithium-codegen.cc
index 2d165601d7..9569660357 100644
--- a/deps/v8/src/crankshaft/lithium-codegen.cc
+++ b/deps/v8/src/crankshaft/lithium-codegen.cc
@@ -6,6 +6,8 @@
#include <sstream>
+#include "src/objects-inl.h"
+
#if V8_TARGET_ARCH_IA32
#include "src/crankshaft/ia32/lithium-ia32.h" // NOLINT
#include "src/crankshaft/ia32/lithium-codegen-ia32.h" // NOLINT
@@ -237,7 +239,8 @@ void LCodeGenBase::WriteTranslationFrame(LEnvironment* environment,
int shared_id = DefineDeoptimizationLiteral(
environment->entry() ? environment->entry()->shared()
: info()->shared_info());
- translation->BeginConstructStubFrame(shared_id, translation_size);
+ translation->BeginConstructStubFrame(BailoutId::ConstructStubInvoke(),
+ shared_id, translation_size);
if (info()->closure().is_identical_to(environment->closure())) {
translation->StoreJSFrameFunction();
} else {
diff --git a/deps/v8/src/crankshaft/lithium.cc b/deps/v8/src/crankshaft/lithium.cc
index 94d60418fd..5f0e9e386d 100644
--- a/deps/v8/src/crankshaft/lithium.cc
+++ b/deps/v8/src/crankshaft/lithium.cc
@@ -6,6 +6,7 @@
#include "src/ast/scopes.h"
#include "src/codegen.h"
+#include "src/objects-inl.h"
#if V8_TARGET_ARCH_IA32
#include "src/crankshaft/ia32/lithium-ia32.h" // NOLINT
diff --git a/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc b/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc
index 36019cc94d..cd6e45af85 100644
--- a/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc
+++ b/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc
@@ -303,7 +303,7 @@ bool LCodeGen::GenerateDeferredCode() {
DCHECK(!frame_is_built_);
DCHECK(info()->IsStub());
frame_is_built_ = true;
- __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+ __ li(scratch0(), Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
__ PushCommonFrame(scratch0());
Comment(";;; Deferred code");
}
@@ -362,7 +362,7 @@ bool LCodeGen::GenerateJumpTable() {
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
- __ li(at, Operand(Smi::FromInt(StackFrame::STUB)));
+ __ li(at, Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
__ push(at);
DCHECK(info()->IsStub());
}
@@ -2025,14 +2025,6 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
}
- if (expected & ToBooleanHint::kSimdValue) {
- // SIMD value -> true.
- const Register scratch = scratch1();
- __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ Branch(instr->TrueLabel(chunk_), eq, scratch,
- Operand(SIMD128_VALUE_TYPE));
- }
-
if (expected & ToBooleanHint::kHeapNumber) {
// heap number -> false iff +0, -0, or NaN.
DoubleRegister dbl_scratch = double_scratch0();
@@ -2874,7 +2866,8 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
__ lw(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ lw(result,
MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ Xor(temp, result,
+ Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
// Result is the frame pointer for the frame if not adapted and for the real
// frame below the adaptor frame if adapted.
@@ -3486,7 +3479,7 @@ void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
__ lw(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ lw(scratch3, MemOperand(scratch2, StandardFrameConstants::kContextOffset));
__ Branch(&no_arguments_adaptor, ne, scratch3,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
// Drop current frame and load arguments count from arguments adaptor frame.
__ mov(fp, scratch2);
@@ -4768,6 +4761,13 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+ Label deopt, done;
+ // If the map is not deprecated the migration attempt does not make sense.
+ __ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
+ __ lw(scratch0(), FieldMemOperand(scratch0(), Map::kBitField3Offset));
+ __ And(at, scratch0(), Operand(Map::Deprecated::kMask));
+ __ Branch(&deopt, eq, at, Operand(zero_reg));
+
{
PushSafepointRegistersScope scope(this);
__ push(object);
@@ -4778,8 +4778,15 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
__ StoreToSafepointRegisterSlot(v0, scratch0());
}
__ SmiTst(scratch0(), at);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kInstanceMigrationFailed, at,
+ __ Branch(&done, ne, at, Operand(zero_reg));
+
+ __ bind(&deopt);
+ // In case of "al" condition the operands are not used so just pass zero_reg
+ // there.
+ DeoptimizeIf(al, instr, DeoptimizeReason::kInstanceMigrationFailed, zero_reg,
Operand(zero_reg));
+
+ __ bind(&done);
}
@@ -5156,19 +5163,6 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
*cmp2 = Operand(zero_reg);
final_branch_condition = eq;
-// clang-format off
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
- } else if (String::Equals(type_name, factory->type##_string())) { \
- __ JumpIfSmi(input, false_label); \
- __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset)); \
- __ LoadRoot(at, Heap::k##Type##MapRootIndex); \
- *cmp1 = input; \
- *cmp2 = Operand(at); \
- final_branch_condition = eq;
- SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
- // clang-format on
-
} else {
*cmp1 = at;
*cmp2 = Operand(zero_reg); // Set to valid regs, to avoid caller assertion.
diff --git a/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc b/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc
index 350cede90b..d32052c5e7 100644
--- a/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc
+++ b/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc
@@ -279,7 +279,7 @@ bool LCodeGen::GenerateDeferredCode() {
DCHECK(!frame_is_built_);
DCHECK(info()->IsStub());
frame_is_built_ = true;
- __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+ __ li(scratch0(), Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
__ PushCommonFrame(scratch0());
Comment(";;; Deferred code");
}
@@ -347,7 +347,7 @@ bool LCodeGen::GenerateJumpTable() {
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
- __ li(at, Operand(Smi::FromInt(StackFrame::STUB)));
+ __ li(at, Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
__ push(at);
DCHECK(info()->IsStub());
}
@@ -2146,14 +2146,6 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
}
- if (expected & ToBooleanHint::kSimdValue) {
- // SIMD value -> true.
- const Register scratch = scratch1();
- __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ Branch(instr->TrueLabel(chunk_), eq, scratch,
- Operand(SIMD128_VALUE_TYPE));
- }
-
if (expected & ToBooleanHint::kHeapNumber) {
// heap number -> false iff +0, -0, or NaN.
DoubleRegister dbl_scratch = double_scratch0();
@@ -3056,7 +3048,8 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
__ ld(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ ld(result,
MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ Xor(temp, result,
+ Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
// Result is the frame pointer for the frame if not adapted and for the real
// frame below the adaptor frame if adapted.
@@ -3692,7 +3685,7 @@ void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
__ ld(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ ld(scratch3, MemOperand(scratch2, StandardFrameConstants::kContextOffset));
__ Branch(&no_arguments_adaptor, ne, scratch3,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
// Drop current frame and load arguments count from arguments adaptor frame.
__ mov(fp, scratch2);
@@ -4959,6 +4952,13 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+ Label deopt, done;
+ // If the map is not deprecated the migration attempt does not make sense.
+ __ ld(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
+ __ lwu(scratch0(), FieldMemOperand(scratch0(), Map::kBitField3Offset));
+ __ And(at, scratch0(), Operand(Map::Deprecated::kMask));
+ __ Branch(&deopt, eq, at, Operand(zero_reg));
+
{
PushSafepointRegistersScope scope(this);
__ push(object);
@@ -4969,8 +4969,15 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
__ StoreToSafepointRegisterSlot(v0, scratch0());
}
__ SmiTst(scratch0(), at);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kInstanceMigrationFailed, at,
+ __ Branch(&done, ne, at, Operand(zero_reg));
+
+ __ bind(&deopt);
+ // In case of "al" condition the operands are not used so just pass zero_reg
+ // there.
+ DeoptimizeIf(al, instr, DeoptimizeReason::kInstanceMigrationFailed, zero_reg,
Operand(zero_reg));
+
+ __ bind(&done);
}
@@ -5349,20 +5356,6 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
*cmp2 = Operand(zero_reg);
final_branch_condition = eq;
-// clang-format off
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
- } else if (String::Equals(type_name, factory->type##_string())) { \
- __ JumpIfSmi(input, false_label); \
- __ ld(input, FieldMemOperand(input, HeapObject::kMapOffset)); \
- __ LoadRoot(at, Heap::k##Type##MapRootIndex); \
- *cmp1 = input; \
- *cmp2 = Operand(at); \
- final_branch_condition = eq;
- SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
- // clang-format on
-
-
} else {
*cmp1 = at;
*cmp2 = Operand(zero_reg); // Set to valid regs, to avoid caller assertion.
diff --git a/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc b/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc
index 1450a714c4..f930611b14 100644
--- a/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc
+++ b/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc
@@ -287,7 +287,7 @@ bool LCodeGen::GenerateDeferredCode() {
DCHECK(!frame_is_built_);
DCHECK(info()->IsStub());
frame_is_built_ = true;
- __ LoadSmiLiteral(scratch0(), Smi::FromInt(StackFrame::STUB));
+ __ mov(scratch0(), Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
__ PushCommonFrame(scratch0());
Comment(";;; Deferred code");
}
@@ -356,7 +356,7 @@ bool LCodeGen::GenerateJumpTable() {
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
- __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::STUB));
+ __ mov(ip, Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
__ push(ip);
DCHECK(info()->IsStub());
}
@@ -1707,12 +1707,15 @@ void LCodeGen::DoSubI(LSubI* instr) {
} else {
__ sub(result, left, EmitLoadRegister(right, ip));
}
-#if V8_TARGET_ARCH_PPC64
if (can_overflow) {
+#if V8_TARGET_ARCH_PPC64
__ TestIfInt32(result, r0);
+#else
+ __ TestIfInt32(scratch0(), result, r0);
+#endif
DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
}
-#endif
+
} else {
if (right->IsConstantOperand()) {
__ AddAndCheckForOverflow(result, left, -(ToOperand(right).immediate()),
@@ -2203,13 +2206,6 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ beq(instr->TrueLabel(chunk_));
}
- if (expected & ToBooleanHint::kSimdValue) {
- // SIMD value -> true.
- Label not_simd;
- __ CompareInstanceType(map, ip, SIMD128_VALUE_TYPE);
- __ beq(instr->TrueLabel(chunk_));
- }
-
if (expected & ToBooleanHint::kHeapNumber) {
// heap number -> false iff +0, -0, or NaN.
Label not_heap_number;
@@ -3133,7 +3129,8 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
__ LoadP(
result,
MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ CmpSmiLiteral(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ cmpi(result,
+ Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
// Result is the frame pointer for the frame if not adapted and for the real
// frame below the adaptor frame if adapted.
@@ -3771,7 +3768,8 @@ void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
__ LoadP(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ LoadP(scratch3,
MemOperand(scratch2, StandardFrameConstants::kContextOffset));
- __ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ cmpi(scratch3,
+ Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ bne(&no_arguments_adaptor);
// Drop current frame and load arguments count from arguments adaptor frame.
@@ -5064,6 +5062,13 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
Register temp = ToRegister(instr->temp());
+ Label deopt, done;
+ // If the map is not deprecated the migration attempt does not make sense.
+ __ LoadP(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ lwz(temp, FieldMemOperand(temp, Map::kBitField3Offset));
+ __ TestBitMask(temp, Map::Deprecated::kMask, r0);
+ __ beq(&deopt, cr0);
+
{
PushSafepointRegistersScope scope(this);
__ push(object);
@@ -5074,7 +5079,13 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
__ StoreToSafepointRegisterSlot(r3, temp);
}
__ TestIfSmi(temp, r0);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kInstanceMigrationFailed, cr0);
+ __ bne(&done, cr0);
+
+ __ bind(&deopt);
+ // In case of "al" condition the operand is not used so just pass cr0 there.
+ DeoptimizeIf(al, instr, DeoptimizeReason::kInstanceMigrationFailed, cr0);
+
+ __ bind(&done);
}
@@ -5426,17 +5437,6 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label,
__ cmpi(r0, Operand::Zero());
final_branch_condition = eq;
-// clang-format off
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
- } else if (String::Equals(type_name, factory->type##_string())) { \
- __ JumpIfSmi(input, false_label); \
- __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); \
- __ CompareRoot(scratch, Heap::k##Type##MapRootIndex); \
- final_branch_condition = eq;
- SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
- // clang-format on
-
} else {
__ b(false_label);
}
diff --git a/deps/v8/src/crankshaft/s390/lithium-codegen-s390.cc b/deps/v8/src/crankshaft/s390/lithium-codegen-s390.cc
index 7bbc917bc6..02c6b6f7fa 100644
--- a/deps/v8/src/crankshaft/s390/lithium-codegen-s390.cc
+++ b/deps/v8/src/crankshaft/s390/lithium-codegen-s390.cc
@@ -275,7 +275,8 @@ bool LCodeGen::GenerateDeferredCode() {
DCHECK(!frame_is_built_);
DCHECK(info()->IsStub());
frame_is_built_ = true;
- __ LoadSmiLiteral(scratch0(), Smi::FromInt(StackFrame::STUB));
+ __ Load(scratch0(),
+ Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
__ PushCommonFrame(scratch0());
Comment(";;; Deferred code");
}
@@ -344,7 +345,7 @@ bool LCodeGen::GenerateJumpTable() {
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
DCHECK(info()->IsStub());
- __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::STUB));
+ __ Load(ip, Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
__ push(ip);
DCHECK(info()->IsStub());
}
@@ -1698,10 +1699,17 @@ void LCodeGen::DoSubI(LSubI* instr) {
#endif
if (right->IsConstantOperand()) {
- if (!isInteger || !checkOverflow)
+ if (!isInteger || !checkOverflow) {
__ SubP(ToRegister(result), ToRegister(left), ToOperand(right));
- else
- __ Sub32(ToRegister(result), ToRegister(left), ToOperand(right));
+ } else {
+ // -(MinInt) will overflow
+ if (ToInteger32(LConstantOperand::cast(right)) == kMinInt) {
+ __ Load(scratch0(), ToOperand(right));
+ __ Sub32(ToRegister(result), ToRegister(left), scratch0());
+ } else {
+ __ Sub32(ToRegister(result), ToRegister(left), ToOperand(right));
+ }
+ }
} else if (right->IsRegister()) {
if (!isInteger)
__ SubP(ToRegister(result), ToRegister(left), ToRegister(right));
@@ -2202,13 +2210,6 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ beq(instr->TrueLabel(chunk_));
}
- if (expected & ToBooleanHint::kSimdValue) {
- // SIMD value -> true.
- Label not_simd;
- __ CompareInstanceType(map, ip, SIMD128_VALUE_TYPE);
- __ beq(instr->TrueLabel(chunk_));
- }
-
if (expected & ToBooleanHint::kHeapNumber) {
// heap number -> false iff +0, -0, or NaN.
Label not_heap_number;
@@ -3095,8 +3096,8 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
__ LoadP(
result,
MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ LoadSmiLiteral(r0, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ CmpP(result, r0);
+ __ CmpP(result,
+ Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
// Result is the frame pointer for the frame if not adapted and for the real
// frame below the adaptor frame if adapted.
@@ -3673,7 +3674,8 @@ void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
__ LoadP(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ LoadP(scratch3,
MemOperand(scratch2, StandardFrameConstants::kContextOffset));
- __ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ CmpP(scratch3,
+ Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ bne(&no_arguments_adaptor);
// Drop current frame and load arguments count from arguments adaptor frame.
@@ -5005,6 +5007,13 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
Register temp = ToRegister(instr->temp());
+ Label deopt, done;
+ // If the map is not deprecated the migration attempt does not make sense.
+ __ LoadP(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ LoadlW(temp, FieldMemOperand(temp, Map::kBitField3Offset));
+ __ TestBitMask(temp, Map::Deprecated::kMask, r0);
+ __ beq(&deopt);
+
{
PushSafepointRegistersScope scope(this);
__ push(object);
@@ -5015,7 +5024,13 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
__ StoreToSafepointRegisterSlot(r2, temp);
}
__ TestIfSmi(temp);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kInstanceMigrationFailed, cr0);
+ __ bne(&done);
+
+ __ bind(&deopt);
+ // In case of "al" condition the operand is not used so just pass cr0 there.
+ DeoptimizeIf(al, instr, DeoptimizeReason::kInstanceMigrationFailed, cr0);
+
+ __ bind(&done);
}
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
@@ -5364,17 +5379,6 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label,
__ CmpP(r0, Operand::Zero());
final_branch_condition = eq;
-// clang-format off
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
- } else if (String::Equals(type_name, factory->type##_string())) { \
- __ JumpIfSmi(input, false_label); \
- __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); \
- __ CompareRoot(scratch, Heap::k##Type##MapRootIndex); \
- final_branch_condition = eq;
- SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
- // clang-format on
-
} else {
__ b(false_label);
}
diff --git a/deps/v8/src/crankshaft/typing.cc b/deps/v8/src/crankshaft/typing.cc
index bbf629d45e..9713e4fd6f 100644
--- a/deps/v8/src/crankshaft/typing.cc
+++ b/deps/v8/src/crankshaft/typing.cc
@@ -405,7 +405,7 @@ void AstTyper::VisitObjectLiteral(ObjectLiteral* expr) {
prop->key()->AsLiteral()->value()->IsInternalizedString() &&
prop->emit_store()) {
// Record type feed back for the property.
- FeedbackVectorSlot slot = prop->GetSlot();
+ FeedbackSlot slot = prop->GetSlot();
SmallMapList maps;
oracle()->CollectReceiverTypes(slot, &maps);
prop->set_receiver_type(maps.length() == 1 ? maps.at(0)
@@ -435,7 +435,7 @@ void AstTyper::VisitAssignment(Assignment* expr) {
// Collect type feedback.
Property* prop = expr->target()->AsProperty();
if (prop != NULL) {
- FeedbackVectorSlot slot = expr->AssignmentSlot();
+ FeedbackSlot slot = expr->AssignmentSlot();
expr->set_is_uninitialized(oracle()->StoreIsUninitialized(slot));
if (!expr->IsUninitialized()) {
SmallMapList* receiver_types = expr->GetReceiverTypes();
@@ -486,7 +486,7 @@ void AstTyper::VisitThrow(Throw* expr) {
void AstTyper::VisitProperty(Property* expr) {
// Collect type feedback.
- FeedbackVectorSlot slot = expr->PropertyFeedbackSlot();
+ FeedbackSlot slot = expr->PropertyFeedbackSlot();
expr->set_inline_cache_state(oracle()->LoadInlineCacheState(slot));
if (!expr->IsUninitialized()) {
@@ -515,7 +515,7 @@ void AstTyper::VisitProperty(Property* expr) {
void AstTyper::VisitCall(Call* expr) {
// Collect type feedback.
RECURSE(Visit(expr->expression()));
- FeedbackVectorSlot slot = expr->CallFeedbackICSlot();
+ FeedbackSlot slot = expr->CallFeedbackICSlot();
bool is_uninitialized = oracle()->CallIsUninitialized(slot);
if (!expr->expression()->IsProperty() && oracle()->CallIsMonomorphic(slot)) {
expr->set_target(oracle()->GetCallTarget(slot));
@@ -541,8 +541,7 @@ void AstTyper::VisitCall(Call* expr) {
void AstTyper::VisitCallNew(CallNew* expr) {
// Collect type feedback.
- FeedbackVectorSlot allocation_site_feedback_slot =
- expr->CallNewFeedbackSlot();
+ FeedbackSlot allocation_site_feedback_slot = expr->CallNewFeedbackSlot();
expr->set_allocation_site(
oracle()->GetCallNewAllocationSite(allocation_site_feedback_slot));
bool monomorphic =
@@ -602,7 +601,7 @@ void AstTyper::VisitUnaryOperation(UnaryOperation* expr) {
void AstTyper::VisitCountOperation(CountOperation* expr) {
// Collect type feedback.
- FeedbackVectorSlot slot = expr->CountSlot();
+ FeedbackSlot slot = expr->CountSlot();
KeyedAccessStoreMode store_mode;
IcCheckType key_type;
oracle()->GetStoreModeAndKeyType(slot, &store_mode, &key_type);
diff --git a/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc b/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc
index f09af7136e..65816a1b69 100644
--- a/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc
@@ -13,6 +13,7 @@
#include "src/crankshaft/hydrogen-osr.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -337,7 +338,8 @@ bool LCodeGen::GenerateJumpTable() {
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
DCHECK(info()->IsStub());
- __ Move(MemOperand(rsp, 2 * kPointerSize), Smi::FromInt(StackFrame::STUB));
+ __ movp(MemOperand(rsp, 2 * kPointerSize),
+ Immediate(StackFrame::TypeToMarker(StackFrame::STUB)));
/* stack layout
3: old rbp
@@ -375,7 +377,7 @@ bool LCodeGen::GenerateDeferredCode() {
frame_is_built_ = true;
// Build the frame in such a way that esi isn't trashed.
__ pushq(rbp); // Caller's frame pointer.
- __ Push(Smi::FromInt(StackFrame::STUB));
+ __ Push(Immediate(StackFrame::TypeToMarker(StackFrame::STUB)));
__ leap(rbp, Operand(rsp, TypedFrameConstants::kFixedFrameSizeFromFp));
Comment(";;; Deferred code");
}
@@ -2065,12 +2067,6 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ j(equal, instr->TrueLabel(chunk_));
}
- if (expected & ToBooleanHint::kSimdValue) {
- // SIMD value -> true.
- __ CmpInstanceType(map, SIMD128_VALUE_TYPE);
- __ j(equal, instr->TrueLabel(chunk_));
- }
-
if (expected & ToBooleanHint::kHeapNumber) {
// heap number -> false iff +0, -0, or NaN.
Label not_heap_number;
@@ -2887,8 +2883,8 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
// Check for arguments adapter frame.
Label done, adapted;
__ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ Cmp(Operand(result, CommonFrameConstants::kContextOrFrameTypeOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ cmpp(Operand(result, CommonFrameConstants::kContextOrFrameTypeOffset),
+ Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adapted, Label::kNear);
// No arguments adaptor frame.
@@ -3563,8 +3559,8 @@ void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
Register caller_args_count_reg = scratch1;
Label no_arguments_adaptor, formal_parameter_count_loaded;
__ movp(scratch2, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ Cmp(Operand(scratch2, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ cmpp(Operand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset),
+ Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(not_equal, &no_arguments_adaptor, Label::kNear);
// Drop current frame and load arguments count from arguments adaptor frame.
@@ -4817,9 +4813,19 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+ Label deopt, done;
+ // If the map is not deprecated the migration attempt does not make sense.
+ __ Push(object);
+ __ movp(object, FieldOperand(object, HeapObject::kMapOffset));
+ __ testl(FieldOperand(object, Map::kBitField3Offset),
+ Immediate(Map::Deprecated::kMask));
+ __ Pop(object);
+ __ j(zero, &deopt);
+
{
PushSafepointRegistersScope scope(this);
__ Push(object);
+
__ Set(rsi, 0);
__ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
RecordSafepointWithRegisters(
@@ -4827,7 +4833,12 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
__ testp(rax, Immediate(kSmiTagMask));
}
- DeoptimizeIf(zero, instr, DeoptimizeReason::kInstanceMigrationFailed);
+ __ j(not_zero, &done);
+
+ __ bind(&deopt);
+ DeoptimizeIf(always, instr, DeoptimizeReason::kInstanceMigrationFailed);
+
+ __ bind(&done);
}
@@ -5180,17 +5191,6 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
final_branch_condition = zero;
-// clang-format off
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
- } else if (String::Equals(type_name, factory->type##_string())) { \
- __ JumpIfSmi(input, false_label, false_distance); \
- __ CompareRoot(FieldOperand(input, HeapObject::kMapOffset), \
- Heap::k##Type##MapRootIndex); \
- final_branch_condition = equal;
- SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
- // clang-format on
-
} else {
__ jmp(false_label, false_distance);
}
diff --git a/deps/v8/src/crankshaft/x64/lithium-gap-resolver-x64.cc b/deps/v8/src/crankshaft/x64/lithium-gap-resolver-x64.cc
index 94dffb333a..38b7d4525a 100644
--- a/deps/v8/src/crankshaft/x64/lithium-gap-resolver-x64.cc
+++ b/deps/v8/src/crankshaft/x64/lithium-gap-resolver-x64.cc
@@ -7,6 +7,7 @@
#include "src/crankshaft/x64/lithium-gap-resolver-x64.h"
#include "src/crankshaft/x64/lithium-codegen-x64.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/crankshaft/x64/lithium-x64.cc b/deps/v8/src/crankshaft/x64/lithium-x64.cc
index bc9040b94c..d0671e9d41 100644
--- a/deps/v8/src/crankshaft/x64/lithium-x64.cc
+++ b/deps/v8/src/crankshaft/x64/lithium-x64.cc
@@ -11,6 +11,7 @@
#include "src/crankshaft/hydrogen-osr.h"
#include "src/crankshaft/lithium-inl.h"
#include "src/crankshaft/x64/lithium-codegen-x64.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc b/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc
index 9c932bc6ae..f526a19603 100644
--- a/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc
+++ b/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc
@@ -2198,12 +2198,6 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ j(equal, instr->TrueLabel(chunk_));
}
- if (expected & ToBooleanHint::kSimdValue) {
- // SIMD value -> true.
- __ CmpInstanceType(map, SIMD128_VALUE_TYPE);
- __ j(equal, instr->TrueLabel(chunk_));
- }
-
if (expected & ToBooleanHint::kHeapNumber) {
// heap number -> false iff +0, -0, or NaN.
Label not_heap_number;
@@ -4946,6 +4940,15 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+ Label deopt, done;
+ // If the map is not deprecated the migration attempt does not make sense.
+ __ push(object);
+ __ mov(object, FieldOperand(object, HeapObject::kMapOffset));
+ __ test(FieldOperand(object, Map::kBitField3Offset),
+ Immediate(Map::Deprecated::kMask));
+ __ pop(object);
+ __ j(zero, &deopt);
+
{
PushSafepointRegistersScope scope(this);
__ push(object);
@@ -4956,7 +4959,12 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
__ test(eax, Immediate(kSmiTagMask));
}
- DeoptimizeIf(zero, instr, DeoptimizeReason::kInstanceMigrationFailed);
+ __ j(not_zero, &done);
+
+ __ bind(&deopt);
+ DeoptimizeIf(no_condition, instr, DeoptimizeReason::kInstanceMigrationFailed);
+
+ __ bind(&done);
}
@@ -5392,17 +5400,6 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
final_branch_condition = zero;
-// clang-format off
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
- } else if (String::Equals(type_name, factory()->type##_string())) { \
- __ JumpIfSmi(input, false_label, false_distance); \
- __ cmp(FieldOperand(input, HeapObject::kMapOffset), \
- factory()->type##_map()); \
- final_branch_condition = equal;
- SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
- // clang-format on
-
} else {
__ jmp(false_label, false_distance);
}
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index 368b33ecc7..64349f2761 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -30,6 +30,7 @@
#include "src/base/platform/time.h"
#include "src/base/sys-info.h"
#include "src/basic-block-profiler.h"
+#include "src/debug/debug-interface.h"
#include "src/interpreter/interpreter.h"
#include "src/list-inl.h"
#include "src/msan.h"
@@ -65,6 +66,7 @@ namespace {
const int MB = 1024 * 1024;
const int kMaxWorkers = 50;
+const int kMaxSerializerMemoryUsage = 1 * MB; // Arbitrary maximum for testing.
#define USE_VM 1
#define VM_THRESHOLD 65536
@@ -407,10 +409,7 @@ Global<Function> Shell::stringify_function_;
base::LazyMutex Shell::workers_mutex_;
bool Shell::allow_new_workers_ = true;
i::List<Worker*> Shell::workers_;
-std::unordered_set<SharedArrayBuffer::Contents,
- Shell::SharedArrayBufferContentsHash,
- Shell::SharedArrayBufferContentsIsEqual>
- Shell::externalized_shared_contents_;
+std::vector<ExternalizedContents> Shell::externalized_contents_;
Global<Context> Shell::evaluation_context_;
ArrayBuffer::Allocator* Shell::array_buffer_allocator;
@@ -694,7 +693,9 @@ MaybeLocal<Module> Shell::FetchModuleTree(Local<Context> context,
}
ScriptOrigin origin(
String::NewFromUtf8(isolate, file_name.c_str(), NewStringType::kNormal)
- .ToLocalChecked());
+ .ToLocalChecked(),
+ Local<Integer>(), Local<Integer>(), Local<Boolean>(), Local<Integer>(),
+ Local<Value>(), Local<Boolean>(), Local<Boolean>(), True(isolate));
ScriptCompiler::Source source(source_text, origin);
Local<Module> module;
if (!ScriptCompiler::CompileModule(isolate, &source).ToLocal(&module)) {
@@ -1230,6 +1231,7 @@ void Shell::QuitOnce(v8::FunctionCallbackInfo<v8::Value>* args) {
->Int32Value(args->GetIsolate()->GetCurrentContext())
.FromMaybe(0);
CleanupWorkers();
+ args->GetIsolate()->Exit();
OnExit(args->GetIsolate());
Exit(exit_code);
}
@@ -1690,8 +1692,68 @@ void Shell::WriteIgnitionDispatchCountersFile(v8::Isolate* isolate) {
JSON::Stringify(context, dispatch_counters).ToLocalChecked());
}
+// Write coverage data in LCOV format. See man page for geninfo(1).
+void Shell::WriteLcovData(v8::Isolate* isolate, const char* file) {
+ if (!file) return;
+ HandleScope handle_scope(isolate);
+ debug::Coverage coverage = debug::Coverage::Collect(isolate, false);
+ std::ofstream sink(file, std::ofstream::app);
+ for (size_t i = 0; i < coverage.ScriptCount(); i++) {
+ debug::Coverage::ScriptData script_data = coverage.GetScriptData(i);
+ Local<debug::Script> script = script_data.GetScript();
+ // Skip unnamed scripts.
+ Local<String> name;
+ if (!script->Name().ToLocal(&name)) continue;
+ std::string file_name = ToSTLString(name);
+ // Skip scripts not backed by a file.
+ if (!std::ifstream(file_name).good()) continue;
+ sink << "SF:";
+ sink << NormalizePath(file_name, GetWorkingDirectory()) << std::endl;
+ std::vector<uint32_t> lines;
+ for (size_t j = 0; j < script_data.FunctionCount(); j++) {
+ debug::Coverage::FunctionData function_data =
+ script_data.GetFunctionData(j);
+ int start_line = function_data.Start().GetLineNumber();
+ int end_line = function_data.End().GetLineNumber();
+ uint32_t count = function_data.Count();
+ // Ensure space in the array.
+ lines.resize(std::max(static_cast<size_t>(end_line + 1), lines.size()),
+ 0);
+ // Boundary lines could be shared between two functions with different
+ // invocation counts. Take the maximum.
+ lines[start_line] = std::max(lines[start_line], count);
+ lines[end_line] = std::max(lines[end_line], count);
+ // Invocation counts for non-boundary lines are overwritten.
+ for (int k = start_line + 1; k < end_line; k++) lines[k] = count;
+ // Write function stats.
+ Local<String> name;
+ std::stringstream name_stream;
+ if (function_data.Name().ToLocal(&name)) {
+ name_stream << ToSTLString(name);
+ } else {
+ name_stream << "<" << start_line + 1 << "-";
+ name_stream << function_data.Start().GetColumnNumber() << ">";
+ }
+ sink << "FN:" << start_line + 1 << "," << name_stream.str() << std::endl;
+ sink << "FNDA:" << count << "," << name_stream.str() << std::endl;
+ }
+ // Write per-line coverage. LCOV uses 1-based line numbers.
+ for (size_t i = 0; i < lines.size(); i++) {
+ sink << "DA:" << (i + 1) << "," << lines[i] << std::endl;
+ }
+ sink << "end_of_record" << std::endl;
+ }
+}
void Shell::OnExit(v8::Isolate* isolate) {
+ // Dump basic block profiling data.
+ if (i::BasicBlockProfiler* profiler =
+ reinterpret_cast<i::Isolate*>(isolate)->basic_block_profiler()) {
+ i::OFStream os(stdout);
+ os << *profiler;
+ }
+ isolate->Dispose();
+
if (i::FLAG_dump_counters || i::FLAG_dump_counters_nvp) {
int number_of_counters = 0;
for (CounterMap::Iterator i(counter_map_); i.More(); i.Next()) {
@@ -1750,7 +1812,6 @@ void Shell::OnExit(v8::Isolate* isolate) {
}
-
static FILE* FOpen(const char* path, const char* mode) {
#if defined(_MSC_VER) && (defined(_WIN32) || defined(_WIN64))
FILE* result;
@@ -2148,21 +2209,8 @@ void SourceGroup::JoinThread() {
thread_->Join();
}
-SerializationData::~SerializationData() {
- // Any ArrayBuffer::Contents are owned by this SerializationData object if
- // ownership hasn't been transferred out.
- // SharedArrayBuffer::Contents may be used by multiple threads, so must be
- // cleaned up by the main thread in Shell::CleanupWorkers().
- for (const auto& contents : array_buffer_contents_) {
- if (contents.Data()) {
- Shell::array_buffer_allocator->Free(contents.Data(),
- contents.ByteLength());
- }
- }
-}
-
-void SerializationData::ClearTransferredArrayBuffers() {
- array_buffer_contents_.clear();
+ExternalizedContents::~ExternalizedContents() {
+ Shell::array_buffer_allocator->Free(data_, size_);
}
void SerializationDataQueue::Enqueue(std::unique_ptr<SerializationData> data) {
@@ -2360,7 +2408,8 @@ bool Shell::SetOptions(int argc, char* argv[]) {
if (strcmp(argv[i], "--stress-opt") == 0) {
options.stress_opt = true;
argv[i] = NULL;
- } else if (strcmp(argv[i], "--nostress-opt") == 0) {
+ } else if (strcmp(argv[i], "--nostress-opt") == 0 ||
+ strcmp(argv[i], "--no-stress-opt") == 0) {
options.stress_opt = false;
argv[i] = NULL;
} else if (strcmp(argv[i], "--stress-deopt") == 0) {
@@ -2369,7 +2418,8 @@ bool Shell::SetOptions(int argc, char* argv[]) {
} else if (strcmp(argv[i], "--mock-arraybuffer-allocator") == 0) {
options.mock_arraybuffer_allocator = true;
argv[i] = NULL;
- } else if (strcmp(argv[i], "--noalways-opt") == 0) {
+ } else if (strcmp(argv[i], "--noalways-opt") == 0 ||
+ strcmp(argv[i], "--no-always-opt") == 0) {
// No support for stressing if we can't use --always-opt.
options.stress_opt = false;
options.stress_deopt = false;
@@ -2443,6 +2493,9 @@ bool Shell::SetOptions(int argc, char* argv[]) {
} else if (strcmp(argv[i], "--enable-inspector") == 0) {
options.enable_inspector = true;
argv[i] = NULL;
+ } else if (strncmp(argv[i], "--lcov=", 7) == 0) {
+ options.lcov_file = argv[i] + 7;
+ argv[i] = NULL;
}
}
@@ -2484,6 +2537,7 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[], bool last_run) {
options.isolate_sources[i].StartExecuteInThread();
}
{
+ if (options.lcov_file) debug::Coverage::TogglePrecise(isolate, true);
HandleScope scope(isolate);
Local<Context> context = CreateEvaluationContext(isolate);
if (last_run && options.use_interactive_shell()) {
@@ -2497,6 +2551,7 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[], bool last_run) {
options.isolate_sources[0].Execute(isolate);
}
DisposeModuleEmbedderData(context);
+ WriteLcovData(isolate, options.lcov_file);
}
CollectGarbage(isolate);
for (int i = 1; i < options.num_isolates; ++i) {
@@ -2538,7 +2593,9 @@ void Shell::EmptyMessageQueues(Isolate* isolate) {
class Serializer : public ValueSerializer::Delegate {
public:
explicit Serializer(Isolate* isolate)
- : isolate_(isolate), serializer_(isolate, this) {}
+ : isolate_(isolate),
+ serializer_(isolate, this),
+ current_memory_usage_(0) {}
Maybe<bool> WriteValue(Local<Context> context, Local<Value> value,
Local<Value> transfer) {
@@ -2589,6 +2646,11 @@ class Serializer : public ValueSerializer::Delegate {
void* ReallocateBufferMemory(void* old_buffer, size_t size,
size_t* actual_size) override {
+ // Not accurate, because we don't take into account reallocated buffers,
+ // but this is fine for testing.
+ current_memory_usage_ += size;
+ if (current_memory_usage_ > kMaxSerializerMemoryUsage) return nullptr;
+
void* result = realloc(old_buffer, size);
*actual_size = result ? size : 0;
return result;
@@ -2626,6 +2688,17 @@ class Serializer : public ValueSerializer::Delegate {
}
}
+ template <typename T>
+ typename T::Contents MaybeExternalize(Local<T> array_buffer) {
+ if (array_buffer->IsExternal()) {
+ return array_buffer->GetContents();
+ } else {
+ typename T::Contents contents = array_buffer->Externalize();
+ data_->externalized_contents_.emplace_back(contents);
+ return contents;
+ }
+ }
+
Maybe<bool> FinalizeTransfer() {
for (const auto& global_array_buffer : array_buffers_) {
Local<ArrayBuffer> array_buffer =
@@ -2635,10 +2708,7 @@ class Serializer : public ValueSerializer::Delegate {
return Nothing<bool>();
}
- if (!array_buffer->IsExternal()) {
- array_buffer->Externalize();
- }
- ArrayBuffer::Contents contents = array_buffer->GetContents();
+ ArrayBuffer::Contents contents = MaybeExternalize(array_buffer);
array_buffer->Neuter();
data_->array_buffer_contents_.push_back(contents);
}
@@ -2646,11 +2716,8 @@ class Serializer : public ValueSerializer::Delegate {
for (const auto& global_shared_array_buffer : shared_array_buffers_) {
Local<SharedArrayBuffer> shared_array_buffer =
Local<SharedArrayBuffer>::New(isolate_, global_shared_array_buffer);
- if (!shared_array_buffer->IsExternal()) {
- shared_array_buffer->Externalize();
- }
data_->shared_array_buffer_contents_.push_back(
- shared_array_buffer->GetContents());
+ MaybeExternalize(shared_array_buffer));
}
return Just(true);
@@ -2661,6 +2728,7 @@ class Serializer : public ValueSerializer::Delegate {
std::unique_ptr<SerializationData> data_;
std::vector<Global<ArrayBuffer>> array_buffers_;
std::vector<Global<SharedArrayBuffer>> shared_array_buffers_;
+ size_t current_memory_usage_;
DISALLOW_COPY_AND_ASSIGN(Serializer);
};
@@ -2694,11 +2762,7 @@ class Deserializer : public ValueDeserializer::Delegate {
deserializer_.TransferSharedArrayBuffer(index++, shared_array_buffer);
}
- MaybeLocal<Value> result = deserializer_.ReadValue(context);
- if (!result.IsEmpty()) {
- data_->ClearTransferredArrayBuffers();
- }
- return result;
+ return deserializer_.ReadValue(context);
}
private:
@@ -2717,9 +2781,7 @@ std::unique_ptr<SerializationData> Shell::SerializeValue(
if (serializer.WriteValue(context, value, transfer).To(&ok)) {
std::unique_ptr<SerializationData> data = serializer.Release();
base::LockGuard<base::Mutex> lock_guard(workers_mutex_.Pointer());
- for (const auto& contents : data->shared_array_buffer_contents()) {
- externalized_shared_contents_.insert(contents);
- }
+ data->AppendExternalizedContentsTo(&externalized_contents_);
return data;
}
return nullptr;
@@ -2755,11 +2817,7 @@ void Shell::CleanupWorkers() {
// Now that all workers are terminated, we can re-enable Worker creation.
base::LockGuard<base::Mutex> lock_guard(workers_mutex_.Pointer());
allow_new_workers_ = true;
-
- for (const auto& contents : externalized_shared_contents_) {
- Shell::array_buffer_allocator->Free(contents.Data(), contents.ByteLength());
- }
- externalized_shared_contents_.clear();
+ externalized_contents_.clear();
}
@@ -2962,13 +3020,6 @@ int Shell::Main(int argc, char* argv[]) {
CollectGarbage(isolate);
}
OnExit(isolate);
- // Dump basic block profiling data.
- if (i::BasicBlockProfiler* profiler =
- reinterpret_cast<i::Isolate*>(isolate)->basic_block_profiler()) {
- i::OFStream os(stdout);
- os << *profiler;
- }
- isolate->Dispose();
V8::Dispose();
V8::ShutdownPlatform();
delete g_platform;
diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h
index 558b8bb58d..21e4c4f083 100644
--- a/deps/v8/src/d8.h
+++ b/deps/v8/src/d8.h
@@ -5,13 +5,12 @@
#ifndef V8_D8_H_
#define V8_D8_H_
+#include <iterator>
#include <memory>
#include <string>
-#include <unordered_set>
#include <vector>
#include "src/allocation.h"
-#include "src/base/functional.h"
#include "src/base/hashmap.h"
#include "src/base/platform/time.h"
#include "src/list.h"
@@ -147,11 +146,40 @@ class SourceGroup {
int end_offset_;
};
+// The backing store of an ArrayBuffer or SharedArrayBuffer, after
+// Externalize() has been called on it.
+class ExternalizedContents {
+ public:
+ explicit ExternalizedContents(const ArrayBuffer::Contents& contents)
+ : data_(contents.Data()), size_(contents.ByteLength()) {}
+ explicit ExternalizedContents(const SharedArrayBuffer::Contents& contents)
+ : data_(contents.Data()), size_(contents.ByteLength()) {}
+ ExternalizedContents(ExternalizedContents&& other)
+ : data_(other.data_), size_(other.size_) {
+ other.data_ = nullptr;
+ other.size_ = 0;
+ }
+ ExternalizedContents& operator=(ExternalizedContents&& other) {
+ if (this != &other) {
+ data_ = other.data_;
+ size_ = other.size_;
+ other.data_ = nullptr;
+ other.size_ = 0;
+ }
+ return *this;
+ }
+ ~ExternalizedContents();
+
+ private:
+ void* data_;
+ size_t size_;
+
+ DISALLOW_COPY_AND_ASSIGN(ExternalizedContents);
+};
class SerializationData {
public:
- SerializationData() : data_(nullptr), size_(0) {}
- ~SerializationData();
+ SerializationData() : size_(0) {}
uint8_t* data() { return data_.get(); }
size_t size() { return size_; }
@@ -163,7 +191,12 @@ class SerializationData {
return shared_array_buffer_contents_;
}
- void ClearTransferredArrayBuffers();
+ void AppendExternalizedContentsTo(std::vector<ExternalizedContents>* to) {
+ to->insert(to->end(),
+ std::make_move_iterator(externalized_contents_.begin()),
+ std::make_move_iterator(externalized_contents_.end()));
+ externalized_contents_.clear();
+ }
private:
struct DataDeleter {
@@ -174,6 +207,7 @@ class SerializationData {
size_t size_;
std::vector<ArrayBuffer::Contents> array_buffer_contents_;
std::vector<SharedArrayBuffer::Contents> shared_array_buffer_contents_;
+ std::vector<ExternalizedContents> externalized_contents_;
private:
friend class Serializer;
@@ -270,7 +304,8 @@ class ShellOptions {
natives_blob(NULL),
snapshot_blob(NULL),
trace_enabled(false),
- trace_config(NULL) {}
+ trace_config(NULL),
+ lcov_file(NULL) {}
~ShellOptions() {
delete[] isolate_sources;
@@ -301,6 +336,7 @@ class ShellOptions {
const char* snapshot_blob;
bool trace_enabled;
const char* trace_config;
+ const char* lcov_file;
};
class Shell : public i::AllStatic {
@@ -425,28 +461,14 @@ class Shell : public i::AllStatic {
static base::LazyMutex context_mutex_;
static const base::TimeTicks kInitialTicks;
- struct SharedArrayBufferContentsHash {
- size_t operator()(const v8::SharedArrayBuffer::Contents& contents) const {
- return base::hash_combine(contents.Data(), contents.ByteLength());
- }
- };
-
- struct SharedArrayBufferContentsIsEqual {
- bool operator()(const SharedArrayBuffer::Contents& a,
- const SharedArrayBuffer::Contents& b) const {
- return a.Data() == b.Data() && a.ByteLength() == b.ByteLength();
- }
- };
-
static base::LazyMutex workers_mutex_;
static bool allow_new_workers_;
static i::List<Worker*> workers_;
- static std::unordered_set<SharedArrayBuffer::Contents,
- SharedArrayBufferContentsHash,
- SharedArrayBufferContentsIsEqual>
- externalized_shared_contents_;
+ static std::vector<ExternalizedContents> externalized_contents_;
static void WriteIgnitionDispatchCountersFile(v8::Isolate* isolate);
+ // Append LCOV coverage data to file.
+ static void WriteLcovData(v8::Isolate* isolate, const char* file);
static Counter* GetCounter(const char* name, bool is_histogram);
static Local<String> Stringify(Isolate* isolate, Local<Value> value);
static void Initialize(Isolate* isolate);
diff --git a/deps/v8/src/dateparser-inl.h b/deps/v8/src/dateparser-inl.h
index 47a7c6e7ff..fd4bed2df6 100644
--- a/deps/v8/src/dateparser-inl.h
+++ b/deps/v8/src/dateparser-inl.h
@@ -343,8 +343,13 @@ DateParser::DateToken DateParser::ParseES5DateTime(
}
if (!scanner->Peek().IsEndOfInput()) return DateToken::Invalid();
}
- // Successfully parsed ES5 Date Time String. Default to UTC if no TZ given.
- if (tz->IsEmpty()) tz->Set(0);
+ // Successfully parsed ES5 Date Time String.
+ // ES#sec-date-time-string-format Date Time String Format
+ // "When the time zone offset is absent, date-only forms are interpreted
+ // as a UTC time and date-time forms are interpreted as a local time."
+ if (tz->IsEmpty() && time->IsEmpty()) {
+ tz->Set(0);
+ }
day->set_iso_date();
return DateToken::EndOfInput();
}
diff --git a/deps/v8/src/debug/arm/debug-arm.cc b/deps/v8/src/debug/arm/debug-arm.cc
index 145f371f99..4839282e87 100644
--- a/deps/v8/src/debug/arm/debug-arm.cc
+++ b/deps/v8/src/debug/arm/debug-arm.cc
@@ -75,14 +75,6 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- // Load padding words on stack.
- __ mov(ip, Operand(Smi::FromInt(LiveEdit::kFramePaddingValue)));
- for (int i = 0; i < LiveEdit::kFramePaddingInitialSize; i++) {
- __ push(ip);
- }
- __ mov(ip, Operand(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
- __ push(ip);
-
// Push arguments for DebugBreak call.
if (mode == SAVE_RESULT_REGISTER) {
// Break on return.
@@ -109,50 +101,45 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
}
}
}
-
- // Don't bother removing padding bytes pushed on the stack
- // as the frame is going to be restored right away.
-
// Leave the internal frame.
}
- // Now that the break point has been handled, resume normal execution by
- // jumping to the target address intended by the caller and that was
- // overwritten by the address of DebugBreakXXX.
- ExternalReference after_break_target =
- ExternalReference::debug_after_break_target_address(masm->isolate());
- __ mov(ip, Operand(after_break_target));
- __ ldr(ip, MemOperand(ip));
- __ Jump(ip);
+ __ MaybeDropFrames();
+
+ // Return to caller.
+ __ Ret();
}
+void DebugCodegen::GenerateHandleDebuggerStatement(MacroAssembler* masm) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kHandleDebuggerStatement, 0);
+ }
+ __ MaybeDropFrames();
-void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- // Load the function pointer off of our current stack frame.
- __ ldr(r1, MemOperand(fp, FrameDropperFrameConstants::kFunctionOffset));
+ // Return to caller.
+ __ Ret();
+}
- // Pop return address, frame and constant pool pointer (if
- // FLAG_enable_embedded_constant_pool).
+void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
+ // Frame is being dropped:
+ // - Drop to the target frame specified by r1.
+ // - Look up current function on the frame.
+ // - Leave the frame.
+ // - Restart the frame by calling the function.
+ __ mov(fp, r1);
+ __ ldr(r1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ LeaveFrame(StackFrame::INTERNAL);
- ParameterCount dummy(0);
- __ CheckDebugHook(r1, no_reg, dummy, dummy);
-
- { ConstantPoolUnavailableScope constant_pool_unavailable(masm);
- // Load context from the function.
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+ __ ldr(r0, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r0,
+ FieldMemOperand(r0, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ mov(r2, r0);
- // Clear new.target as a safety measure.
- __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
-
- // Get function code.
- __ ldr(ip, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset));
- __ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // Re-run JSFunction, r1 is function, cp is context.
- __ Jump(ip);
- }
+ ParameterCount dummy1(r2);
+ ParameterCount dummy2(r0);
+ __ InvokeFunction(r1, dummy1, dummy2, JUMP_FUNCTION,
+ CheckDebugStepCallWrapper());
}
diff --git a/deps/v8/src/debug/arm64/debug-arm64.cc b/deps/v8/src/debug/arm64/debug-arm64.cc
index 75eb2837c2..06929c63b6 100644
--- a/deps/v8/src/debug/arm64/debug-arm64.cc
+++ b/deps/v8/src/debug/arm64/debug-arm64.cc
@@ -88,12 +88,6 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
{
FrameScope scope(masm, StackFrame::INTERNAL);
- // Load padding words on stack.
- __ Mov(scratch, Smi::FromInt(LiveEdit::kFramePaddingValue));
- __ PushMultipleTimes(scratch, LiveEdit::kFramePaddingInitialSize);
- __ Mov(scratch, Smi::FromInt(LiveEdit::kFramePaddingInitialSize));
- __ Push(scratch);
-
// Push arguments for DebugBreak call.
if (mode == SAVE_RESULT_REGISTER) {
// Break on return.
@@ -119,52 +113,48 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
}
}
}
-
- // Don't bother removing padding bytes pushed on the stack
- // as the frame is going to be restored right away.
-
// Leave the internal frame.
}
- // Now that the break point has been handled, resume normal execution by
- // jumping to the target address intended by the caller and that was
- // overwritten by the address of DebugBreakXXX.
- ExternalReference after_break_target =
- ExternalReference::debug_after_break_target_address(masm->isolate());
- __ Mov(scratch, after_break_target);
- __ Ldr(scratch, MemOperand(scratch));
- __ Br(scratch);
+ __ MaybeDropFrames();
+
+ // Return to caller.
+ __ Ret();
}
+void DebugCodegen::GenerateHandleDebuggerStatement(MacroAssembler* masm) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kHandleDebuggerStatement, 0);
+ }
+ __ MaybeDropFrames();
-void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- // We do not know our frame height, but set sp based on fp.
- __ Add(masm->StackPointer(), fp, FrameDropperFrameConstants::kFunctionOffset);
+ // Return to caller.
+ __ Ret();
+}
+
+void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
+ // Frame is being dropped:
+ // - Drop to the target frame specified by x1.
+ // - Look up current function on the frame.
+ // - Leave the frame.
+ // - Restart the frame by calling the function.
+ __ Mov(fp, x1);
__ AssertStackConsistency();
+ __ Ldr(x1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Pop(x1); // Function
__ Mov(masm->StackPointer(), Operand(fp));
__ Pop(fp, lr); // Frame, Return address.
- ParameterCount dummy(0);
- __ CheckDebugHook(x1, no_reg, dummy, dummy);
-
- UseScratchRegisterScope temps(masm);
- Register scratch = temps.AcquireX();
-
- // Load context from the function.
- __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
-
- // Clear new.target as a safety measure.
- __ LoadRoot(x3, Heap::kUndefinedValueRootIndex);
-
- // Get function code.
- __ Ldr(scratch, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(scratch, FieldMemOperand(scratch, SharedFunctionInfo::kCodeOffset));
- __ Add(scratch, scratch, Code::kHeaderSize - kHeapObjectTag);
+ __ Ldr(x0, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(x0,
+ FieldMemOperand(x0, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ mov(x2, x0);
- // Re-run JSFunction, x1 is function, cp is context.
- __ Br(scratch);
+ ParameterCount dummy1(x2);
+ ParameterCount dummy2(x0);
+ __ InvokeFunction(x1, dummy1, dummy2, JUMP_FUNCTION,
+ CheckDebugStepCallWrapper());
}
diff --git a/deps/v8/src/debug/debug-coverage.cc b/deps/v8/src/debug/debug-coverage.cc
new file mode 100644
index 0000000000..8a13b6c92d
--- /dev/null
+++ b/deps/v8/src/debug/debug-coverage.cc
@@ -0,0 +1,169 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/debug/debug-coverage.h"
+
+#include "src/base/hashmap.h"
+#include "src/deoptimizer.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+class SharedToCounterMap
+ : public base::TemplateHashMapImpl<SharedFunctionInfo*, uint32_t,
+ base::KeyEqualityMatcher<void*>,
+ base::DefaultAllocationPolicy> {
+ public:
+ typedef base::TemplateHashMapEntry<SharedFunctionInfo*, uint32_t> Entry;
+ inline void Add(SharedFunctionInfo* key, uint32_t count) {
+ Entry* entry = LookupOrInsert(key, Hash(key), []() { return 0; });
+ uint32_t old_count = entry->value;
+ if (UINT32_MAX - count < old_count) {
+ entry->value = UINT32_MAX;
+ } else {
+ entry->value = old_count + count;
+ }
+ }
+
+ inline uint32_t Get(SharedFunctionInfo* key) {
+ Entry* entry = Lookup(key, Hash(key));
+ if (entry == nullptr) return 0;
+ return entry->value;
+ }
+
+ private:
+ static uint32_t Hash(SharedFunctionInfo* key) {
+ return static_cast<uint32_t>(reinterpret_cast<intptr_t>(key));
+ }
+
+ DisallowHeapAllocation no_gc;
+};
+
+namespace {
+int StartPosition(SharedFunctionInfo* info) {
+ int start = info->function_token_position();
+ if (start == kNoSourcePosition) start = info->start_position();
+ return start;
+}
+
+bool CompareSharedFunctionInfo(SharedFunctionInfo* a, SharedFunctionInfo* b) {
+ int a_start = StartPosition(a);
+ int b_start = StartPosition(b);
+ if (a_start == b_start) return a->end_position() > b->end_position();
+ return a_start < b_start;
+}
+} // anonymous namespace
+
+Coverage* Coverage::Collect(Isolate* isolate, bool reset_count) {
+ SharedToCounterMap counter_map;
+
+ // Feed invocation count into the counter map.
+ if (isolate->IsCodeCoverageEnabled()) {
+ // Feedback vectors are already listed to prevent losing them to GC.
+ Handle<ArrayList> list =
+ Handle<ArrayList>::cast(isolate->factory()->code_coverage_list());
+ for (int i = 0; i < list->Length(); i++) {
+ FeedbackVector* vector = FeedbackVector::cast(list->Get(i));
+ SharedFunctionInfo* shared = vector->shared_function_info();
+ DCHECK(shared->IsSubjectToDebugging());
+ uint32_t count = static_cast<uint32_t>(vector->invocation_count());
+ if (reset_count) vector->clear_invocation_count();
+ counter_map.Add(shared, count);
+ }
+ } else {
+ // Iterate the heap to find all feedback vectors and accumulate the
+ // invocation counts into the map for each shared function info.
+ HeapIterator heap_iterator(isolate->heap());
+ while (HeapObject* current_obj = heap_iterator.next()) {
+ if (!current_obj->IsFeedbackVector()) continue;
+ FeedbackVector* vector = FeedbackVector::cast(current_obj);
+ SharedFunctionInfo* shared = vector->shared_function_info();
+ if (!shared->IsSubjectToDebugging()) continue;
+ uint32_t count = static_cast<uint32_t>(vector->invocation_count());
+ if (reset_count) vector->clear_invocation_count();
+ counter_map.Add(shared, count);
+ }
+ }
+
+ // Iterate shared function infos of every script and build a mapping
+ // between source ranges and invocation counts.
+ Coverage* result = new Coverage();
+ Script::Iterator scripts(isolate);
+ while (Script* script = scripts.Next()) {
+ // Dismiss non-user scripts.
+ if (script->type() != Script::TYPE_NORMAL) continue;
+
+ // Create and add new script data.
+ Handle<Script> script_handle(script, isolate);
+ result->emplace_back(isolate, script_handle);
+ std::vector<CoverageFunction>* functions = &result->back().functions;
+
+ std::vector<SharedFunctionInfo*> sorted;
+ bool has_toplevel = false;
+
+ {
+ // Sort functions by start position, from outer to inner functions.
+ SharedFunctionInfo::ScriptIterator infos(script_handle);
+ while (SharedFunctionInfo* info = infos.Next()) {
+ has_toplevel |= info->is_toplevel();
+ sorted.push_back(info);
+ }
+ std::sort(sorted.begin(), sorted.end(), CompareSharedFunctionInfo);
+ }
+
+ functions->reserve(sorted.size() + (has_toplevel ? 0 : 1));
+
+ if (!has_toplevel) {
+ // Add a replacement toplevel function if it does not exist.
+ int source_end = String::cast(script->source())->length();
+ functions->emplace_back(0, source_end, 1u,
+ isolate->factory()->empty_string());
+ }
+
+ // Use sorted list to reconstruct function nesting.
+ for (SharedFunctionInfo* info : sorted) {
+ int start = StartPosition(info);
+ int end = info->end_position();
+ uint32_t count = counter_map.Get(info);
+ Handle<String> name(info->DebugName(), isolate);
+ functions->emplace_back(start, end, count, name);
+ }
+ }
+ return result;
+}
+
+void Coverage::TogglePrecise(Isolate* isolate, bool enable) {
+ if (enable) {
+ HandleScope scope(isolate);
+ // Remove all optimized function. Optimized and inlined functions do not
+ // increment invocation count.
+ Deoptimizer::DeoptimizeAll(isolate);
+ // Collect existing feedback vectors.
+ std::vector<Handle<FeedbackVector>> vectors;
+ {
+ HeapIterator heap_iterator(isolate->heap());
+ while (HeapObject* current_obj = heap_iterator.next()) {
+ if (!current_obj->IsFeedbackVector()) continue;
+ FeedbackVector* vector = FeedbackVector::cast(current_obj);
+ SharedFunctionInfo* shared = vector->shared_function_info();
+ if (!shared->IsSubjectToDebugging()) continue;
+ vector->clear_invocation_count();
+ vectors.emplace_back(vector, isolate);
+ }
+ }
+ // Add collected feedback vectors to the root list lest we lose them to GC.
+ Handle<ArrayList> list =
+ ArrayList::New(isolate, static_cast<int>(vectors.size()));
+ for (const auto& vector : vectors) list = ArrayList::Add(list, vector);
+ isolate->SetCodeCoverageList(*list);
+ } else {
+ isolate->SetCodeCoverageList(isolate->heap()->undefined_value());
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/debug/debug-coverage.h b/deps/v8/src/debug/debug-coverage.h
new file mode 100644
index 0000000000..36128bc8a9
--- /dev/null
+++ b/deps/v8/src/debug/debug-coverage.h
@@ -0,0 +1,53 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_DEBUG_DEBUG_COVERAGE_H_
+#define V8_DEBUG_DEBUG_COVERAGE_H_
+
+#include <vector>
+
+#include "src/debug/debug-interface.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declaration.
+class Isolate;
+
+struct CoverageFunction {
+ CoverageFunction(int s, int e, uint32_t c, Handle<String> n)
+ : start(s), end(e), count(c), name(n) {}
+ int start;
+ int end;
+ uint32_t count;
+ Handle<String> name;
+};
+
+struct CoverageScript {
+ // Initialize top-level function in case it has been garbage-collected.
+ CoverageScript(Isolate* isolate, Handle<Script> s) : script(s) {}
+ Handle<Script> script;
+ // Functions are sorted by start position, from outer to inner function.
+ std::vector<CoverageFunction> functions;
+};
+
+class Coverage : public std::vector<CoverageScript> {
+ public:
+ // Allocate a new Coverage object and populate with result.
+ // The ownership is transferred to the caller.
+ static Coverage* Collect(Isolate* isolate, bool reset_count);
+
+ // Enable precise code coverage. This disables optimization and makes sure
+ // invocation count is not affected by GC.
+ static void TogglePrecise(Isolate* isolate, bool enable);
+
+ private:
+ Coverage() {}
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_DEBUG_DEBUG_COVERAGE_H_
diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc
index 96cd98d3f2..c6fafa557b 100644
--- a/deps/v8/src/debug/debug-evaluate.cc
+++ b/deps/v8/src/debug/debug-evaluate.cc
@@ -41,13 +41,14 @@ MaybeHandle<Object> DebugEvaluate::Global(Isolate* isolate,
Handle<Context> context = isolate->native_context();
Handle<JSObject> receiver(context->global_proxy());
Handle<SharedFunctionInfo> outer_info(context->closure()->shared(), isolate);
- return Evaluate(isolate, outer_info, context, receiver, source);
+ return Evaluate(isolate, outer_info, context, receiver, source, false);
}
MaybeHandle<Object> DebugEvaluate::Local(Isolate* isolate,
StackFrame::Id frame_id,
int inlined_jsframe_index,
- Handle<String> source) {
+ Handle<String> source,
+ bool throw_on_side_effect) {
// Handle the processing of break.
DisableBreak disable_break_scope(isolate->debug());
@@ -74,8 +75,9 @@ MaybeHandle<Object> DebugEvaluate::Local(Isolate* isolate,
Handle<Context> context = context_builder.evaluation_context();
Handle<JSObject> receiver(context->global_proxy());
- MaybeHandle<Object> maybe_result = Evaluate(
- isolate, context_builder.outer_info(), context, receiver, source);
+ MaybeHandle<Object> maybe_result =
+ Evaluate(isolate, context_builder.outer_info(), context, receiver, source,
+ throw_on_side_effect);
if (!maybe_result.is_null()) context_builder.UpdateValues();
return maybe_result;
}
@@ -84,19 +86,19 @@ MaybeHandle<Object> DebugEvaluate::Local(Isolate* isolate,
// Compile and evaluate source for the given context.
MaybeHandle<Object> DebugEvaluate::Evaluate(
Isolate* isolate, Handle<SharedFunctionInfo> outer_info,
- Handle<Context> context, Handle<Object> receiver, Handle<String> source) {
+ Handle<Context> context, Handle<Object> receiver, Handle<String> source,
+ bool throw_on_side_effect) {
Handle<JSFunction> eval_fun;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, eval_fun,
Compiler::GetFunctionFromEval(source, outer_info, context, SLOPPY,
NO_PARSE_RESTRICTION, kNoSourcePosition,
- kNoSourcePosition),
+ kNoSourcePosition, kNoSourcePosition),
Object);
Handle<Object> result;
{
- NoSideEffectScope no_side_effect(isolate,
- FLAG_side_effect_free_debug_evaluate);
+ NoSideEffectScope no_side_effect(isolate, throw_on_side_effect);
ASSIGN_RETURN_ON_EXCEPTION(
isolate, result, Execution::Call(isolate, eval_fun, receiver, 0, NULL),
Object);
@@ -269,10 +271,33 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
case Runtime::kInlineToString:
case Runtime::kToLength:
case Runtime::kInlineToLength:
+ case Runtime::kToNumber:
+ // Type checks.
+ case Runtime::kIsJSReceiver:
+ case Runtime::kInlineIsJSReceiver:
+ case Runtime::kIsSmi:
+ case Runtime::kInlineIsSmi:
+ case Runtime::kIsArray:
+ case Runtime::kInlineIsArray:
+ case Runtime::kIsFunction:
+ case Runtime::kIsDate:
+ case Runtime::kIsJSProxy:
+ case Runtime::kIsRegExp:
+ case Runtime::kIsTypedArray:
// Loads.
case Runtime::kLoadLookupSlotForCall:
+ // Arrays.
+ case Runtime::kArraySpeciesConstructor:
+ case Runtime::kNormalizeElements:
+ case Runtime::kGetArrayKeys:
+ case Runtime::kHasComplexElements:
+ case Runtime::kEstimateNumberOfElements:
// Errors.
+ case Runtime::kReThrow:
case Runtime::kThrowReferenceError:
+ case Runtime::kThrowSymbolIteratorInvalid:
+ case Runtime::kThrowIteratorResultNotAnObject:
+ case Runtime::kNewTypeError:
// Strings.
case Runtime::kInlineStringCharCodeAt:
case Runtime::kStringCharCodeAt:
@@ -280,14 +305,13 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
case Runtime::kStringReplaceOneCharWithString:
case Runtime::kSubString:
case Runtime::kInlineSubString:
- case Runtime::kStringToLowerCase:
- case Runtime::kStringToUpperCase:
case Runtime::kRegExpInternalReplace:
// Literals.
case Runtime::kCreateArrayLiteral:
case Runtime::kCreateObjectLiteral:
case Runtime::kCreateRegExpLiteral:
// Misc.
+ case Runtime::kForInPrepare:
case Runtime::kInlineCall:
case Runtime::kCall:
case Runtime::kInlineMaxSmi:
@@ -306,7 +330,7 @@ bool BytecodeHasNoSideEffect(interpreter::Bytecode bytecode) {
typedef interpreter::Bytecode Bytecode;
typedef interpreter::Bytecodes Bytecodes;
if (Bytecodes::IsWithoutExternalSideEffects(bytecode)) return true;
- if (Bytecodes::IsCallOrNew(bytecode)) return true;
+ if (Bytecodes::IsCallOrConstruct(bytecode)) return true;
if (Bytecodes::WritesBooleanToAccumulator(bytecode)) return true;
if (Bytecodes::IsJumpIfToBoolean(bytecode)) return true;
if (Bytecodes::IsPrefixScalingBytecode(bytecode)) return true;
@@ -350,9 +374,19 @@ bool BytecodeHasNoSideEffect(interpreter::Bytecode bytecode) {
case Bytecode::kCreateArrayLiteral:
case Bytecode::kCreateObjectLiteral:
case Bytecode::kCreateRegExpLiteral:
- // Misc.
+ // Allocations.
+ case Bytecode::kCreateClosure:
case Bytecode::kCreateUnmappedArguments:
+ // Conversions.
+ case Bytecode::kToObject:
+ case Bytecode::kToNumber:
+ // Misc.
+ case Bytecode::kForInPrepare:
+ case Bytecode::kForInContinue:
+ case Bytecode::kForInNext:
+ case Bytecode::kForInStep:
case Bytecode::kThrow:
+ case Bytecode::kReThrow:
case Bytecode::kIllegal:
case Bytecode::kCallJSRuntime:
case Bytecode::kStackCheck:
@@ -371,6 +405,14 @@ bool BytecodeHasNoSideEffect(interpreter::Bytecode bytecode) {
bool BuiltinHasNoSideEffect(Builtins::Name id) {
switch (id) {
// Whitelist for builtins.
+ // Array builtins.
+ case Builtins::kArrayCode:
+ case Builtins::kArrayIndexOf:
+ case Builtins::kArrayPrototypeValues:
+ case Builtins::kArrayIncludes:
+ case Builtins::kArrayPrototypeEntries:
+ case Builtins::kArrayPrototypeKeys:
+ case Builtins::kArrayForEach:
// Math builtins.
case Builtins::kMathAbs:
case Builtins::kMathAcos:
@@ -434,6 +476,8 @@ bool BuiltinHasNoSideEffect(Builtins::Name id) {
case Builtins::kStringPrototypeSubstr:
case Builtins::kStringPrototypeSubstring:
case Builtins::kStringPrototypeToString:
+ case Builtins::kStringPrototypeToLowerCase:
+ case Builtins::kStringPrototypeToUpperCase:
case Builtins::kStringPrototypeTrim:
case Builtins::kStringPrototypeTrimLeft:
case Builtins::kStringPrototypeTrimRight:
@@ -441,6 +485,12 @@ bool BuiltinHasNoSideEffect(Builtins::Name id) {
// JSON builtins.
case Builtins::kJsonParse:
case Builtins::kJsonStringify:
+ // Error builtins.
+ case Builtins::kMakeError:
+ case Builtins::kMakeTypeError:
+ case Builtins::kMakeSyntaxError:
+ case Builtins::kMakeRangeError:
+ case Builtins::kMakeURIError:
return true;
default:
if (FLAG_trace_side_effect_free_debug_evaluate) {
diff --git a/deps/v8/src/debug/debug-evaluate.h b/deps/v8/src/debug/debug-evaluate.h
index 3b4d1f4640..5f5b51e329 100644
--- a/deps/v8/src/debug/debug-evaluate.h
+++ b/deps/v8/src/debug/debug-evaluate.h
@@ -22,7 +22,8 @@ class DebugEvaluate : public AllStatic {
// - The arguments object needs to materialized.
static MaybeHandle<Object> Local(Isolate* isolate, StackFrame::Id frame_id,
int inlined_jsframe_index,
- Handle<String> source);
+ Handle<String> source,
+ bool throw_on_side_effect);
static bool FunctionHasNoSideEffect(Handle<SharedFunctionInfo> info);
static bool CallbackHasNoSideEffect(Address function_addr);
@@ -86,7 +87,8 @@ class DebugEvaluate : public AllStatic {
Handle<SharedFunctionInfo> outer_info,
Handle<Context> context,
Handle<Object> receiver,
- Handle<String> source);
+ Handle<String> source,
+ bool throw_on_side_effect);
};
diff --git a/deps/v8/src/debug/debug-frames.cc b/deps/v8/src/debug/debug-frames.cc
index 15d6ed5b4d..d4899114c9 100644
--- a/deps/v8/src/debug/debug-frames.cc
+++ b/deps/v8/src/debug/debug-frames.cc
@@ -15,7 +15,6 @@ FrameInspector::FrameInspector(StandardFrame* frame, int inlined_frame_index,
Isolate* isolate)
: frame_(frame),
frame_summary_(FrameSummary::Get(frame, inlined_frame_index)),
- deoptimized_frame_(nullptr),
isolate_(isolate) {
JavaScriptFrame* js_frame =
frame->is_java_script() ? javascript_frame() : nullptr;
@@ -35,21 +34,28 @@ FrameInspector::FrameInspector(StandardFrame* frame, int inlined_frame_index,
return;
}
- deoptimized_frame_ = Deoptimizer::DebuggerInspectableFrame(
- js_frame, inlined_frame_index, isolate);
+ deoptimized_frame_.reset(Deoptimizer::DebuggerInspectableFrame(
+ js_frame, inlined_frame_index, isolate));
+ } else if (frame_->is_wasm_interpreter_entry()) {
+ wasm_interpreted_frame_ =
+ frame_summary_.AsWasm()
+ .wasm_instance()
+ ->debug_info()
+ ->GetInterpretedFrame(frame_->fp(), inlined_frame_index);
+ DCHECK(wasm_interpreted_frame_);
}
}
FrameInspector::~FrameInspector() {
- // Get rid of the calculated deoptimized frame if any.
- if (deoptimized_frame_ != nullptr) {
- delete deoptimized_frame_;
- }
+ // Destructor needs to be defined in the .cc file, because it instantiates
+ // std::unique_ptr destructors but the types are not known in the header.
}
int FrameInspector::GetParametersCount() {
- return is_optimized_ ? deoptimized_frame_->parameters_count()
- : frame_->ComputeParametersCount();
+ if (is_optimized_) return deoptimized_frame_->parameters_count();
+ if (wasm_interpreted_frame_)
+ return wasm_interpreted_frame_->GetParameterCount();
+ return frame_->ComputeParametersCount();
}
Handle<Script> FrameInspector::GetScript() {
@@ -61,8 +67,9 @@ Handle<JSFunction> FrameInspector::GetFunction() {
}
Handle<Object> FrameInspector::GetParameter(int index) {
- return is_optimized_ ? deoptimized_frame_->GetParameter(index)
- : handle(frame_->GetParameter(index), isolate_);
+ if (is_optimized_) return deoptimized_frame_->GetParameter(index);
+ // TODO(clemensh): Handle wasm_interpreted_frame_.
+ return handle(frame_->GetParameter(index), isolate_);
}
Handle<Object> FrameInspector::GetExpression(int index) {
diff --git a/deps/v8/src/debug/debug-frames.h b/deps/v8/src/debug/debug-frames.h
index 2793693774..2c9e43ff88 100644
--- a/deps/v8/src/debug/debug-frames.h
+++ b/deps/v8/src/debug/debug-frames.h
@@ -13,6 +13,11 @@
namespace v8 {
namespace internal {
+// Forward declaration:
+namespace wasm {
+class InterpretedFrame;
+}
+
class FrameInspector {
public:
FrameInspector(StandardFrame* frame, int inlined_frame_index,
@@ -54,7 +59,8 @@ class FrameInspector {
StandardFrame* frame_;
FrameSummary frame_summary_;
- DeoptimizedFrameInfo* deoptimized_frame_;
+ std::unique_ptr<DeoptimizedFrameInfo> deoptimized_frame_;
+ std::unique_ptr<wasm::InterpretedFrame> wasm_interpreted_frame_;
Isolate* isolate_;
bool is_optimized_;
bool is_interpreted_;
diff --git a/deps/v8/src/debug/debug-interface.h b/deps/v8/src/debug/debug-interface.h
index 2e8abc6e54..be8ed902d5 100644
--- a/deps/v8/src/debug/debug-interface.h
+++ b/deps/v8/src/debug/debug-interface.h
@@ -12,55 +12,18 @@
#include "include/v8.h"
#include "src/debug/interface-types.h"
+#include "src/globals.h"
namespace v8 {
-namespace debug {
-/**
- * An event details object passed to the debug event listener.
- */
-class EventDetails : public v8::Debug::EventDetails {
- public:
- /**
- * Event type.
- */
- virtual v8::DebugEvent GetEvent() const = 0;
-
- /**
- * Access to execution state and event data of the debug event. Don't store
- * these cross callbacks as their content becomes invalid.
- */
- virtual Local<Object> GetExecutionState() const = 0;
- virtual Local<Object> GetEventData() const = 0;
-
- /**
- * Get the context active when the debug event happened. Note this is not
- * the current active context as the JavaScript part of the debugger is
- * running in its own context which is entered at this point.
- */
- virtual Local<Context> GetEventContext() const = 0;
-
- /**
- * Client data passed with the corresponding callback when it was
- * registered.
- */
- virtual Local<Value> GetCallbackData() const = 0;
-
- virtual ~EventDetails() {}
-};
+namespace internal {
+struct CoverageFunction;
+struct CoverageScript;
+class Coverage;
+class Script;
+}
-/**
- * Debug event callback function.
- *
- * \param event_details object providing information about the debug event
- *
- * A EventCallback does not take possession of the event data,
- * and must not rely on the data persisting after the handler returns.
- */
-typedef void (*EventCallback)(const EventDetails& event_details);
-
-bool SetDebugEventListener(Isolate* isolate, EventCallback that,
- Local<Value> data = Local<Value>());
+namespace debug {
/**
* Debugger is running in its own context which is entered while debugger
@@ -129,16 +92,18 @@ enum ExceptionBreakState {
*/
void ChangeBreakOnException(Isolate* isolate, ExceptionBreakState state);
+void SetBreakPointsActive(Isolate* isolate, bool is_active);
+
enum StepAction {
StepOut = 0, // Step out of the current function.
StepNext = 1, // Step to the next statement in the current function.
- StepIn = 2, // Step into new functions invoked or the next statement
+ StepIn = 2 // Step into new functions invoked or the next statement
// in the current function.
- StepFrame = 3 // Step into a new frame or return to previous frame.
};
void PrepareStep(Isolate* isolate, StepAction action);
-void ClearStepping(Isolate* isolate);
+
+bool HasNonBlackboxedFrameOnStack(Isolate* isolate);
/**
* Out-of-memory callback function.
@@ -153,7 +118,7 @@ void SetOutOfMemoryCallback(Isolate* isolate, OutOfMemoryCallback callback,
/**
* Native wrapper around v8::internal::Script object.
*/
-class Script {
+class V8_EXPORT_PRIVATE Script {
public:
v8::Isolate* GetIsolate() const;
@@ -169,21 +134,11 @@ class Script {
MaybeLocal<Value> ContextData() const;
MaybeLocal<String> Source() const;
bool IsWasm() const;
+ bool IsModule() const;
bool GetPossibleBreakpoints(const debug::Location& start,
const debug::Location& end,
std::vector<debug::Location>* locations) const;
- /**
- * script parameter is a wrapper v8::internal::JSObject for
- * v8::internal::Script.
- * This function gets v8::internal::Script from v8::internal::JSObject and
- * wraps it with DebugInterface::Script.
- * Returns empty local if not called with a valid wrapper of
- * v8::internal::Script.
- */
- static MaybeLocal<Script> Wrap(Isolate* isolate,
- v8::Local<v8::Object> script);
-
private:
int GetSourcePosition(const debug::Location& location) const;
};
@@ -196,6 +151,8 @@ class WasmScript : public Script {
int NumFunctions() const;
int NumImportedFunctions() const;
+ std::pair<int, int> GetFunctionRange(int function_index) const;
+
debug::WasmDisassembly DisassembleFunction(int function_index) const;
};
@@ -204,14 +161,103 @@ void GetLoadedScripts(Isolate* isolate, PersistentValueVector<Script>& scripts);
MaybeLocal<UnboundScript> CompileInspectorScript(Isolate* isolate,
Local<String> source);
-typedef std::function<void(debug::PromiseDebugActionType type, int id,
- void* data)>
- AsyncTaskListener;
-void SetAsyncTaskListener(Isolate* isolate, AsyncTaskListener listener,
- void* data);
+class DebugDelegate {
+ public:
+ virtual ~DebugDelegate() {}
+ virtual void PromiseEventOccurred(debug::PromiseDebugActionType type, int id,
+ int parent_id) {}
+ virtual void ScriptCompiled(v8::Local<Script> script,
+ bool has_compile_error) {}
+ virtual void BreakProgramRequested(v8::Local<v8::Context> paused_context,
+ v8::Local<v8::Object> exec_state,
+ v8::Local<v8::Value> break_points_hit) {}
+ virtual void ExceptionThrown(v8::Local<v8::Context> paused_context,
+ v8::Local<v8::Object> exec_state,
+ v8::Local<v8::Value> exception,
+ v8::Local<v8::Value> promise, bool is_uncaught) {
+ }
+ virtual bool IsFunctionBlackboxed(v8::Local<debug::Script> script,
+ const debug::Location& start,
+ const debug::Location& end) {
+ return false;
+ }
+};
+
+void SetDebugDelegate(Isolate* isolate, DebugDelegate* listener);
+
+void ResetBlackboxedStateCache(Isolate* isolate,
+ v8::Local<debug::Script> script);
int EstimatedValueSize(Isolate* isolate, v8::Local<v8::Value> value);
+v8::MaybeLocal<v8::Array> EntriesPreview(Isolate* isolate,
+ v8::Local<v8::Value> value,
+ bool* is_key_value);
+
+/**
+ * Native wrapper around v8::internal::JSGeneratorObject object.
+ */
+class GeneratorObject {
+ public:
+ v8::MaybeLocal<debug::Script> Script();
+ v8::Local<v8::Function> Function();
+ debug::Location SuspendedLocation();
+ bool IsSuspended();
+
+ static v8::Local<debug::GeneratorObject> Cast(v8::Local<v8::Value> value);
+};
+
+/*
+ * Provide API layer between inspector and code coverage.
+ */
+class V8_EXPORT_PRIVATE Coverage {
+ public:
+ class ScriptData; // Forward declaration.
+
+ class V8_EXPORT_PRIVATE FunctionData {
+ public:
+ // 0-based line and colum numbers.
+ Location Start() { return start_; }
+ Location End() { return end_; }
+ uint32_t Count();
+ MaybeLocal<String> Name();
+
+ private:
+ FunctionData(i::CoverageFunction* function, Local<debug::Script> script);
+ i::CoverageFunction* function_;
+ Location start_;
+ Location end_;
+
+ friend class v8::debug::Coverage::ScriptData;
+ };
+
+ class V8_EXPORT_PRIVATE ScriptData {
+ public:
+ Local<debug::Script> GetScript();
+ size_t FunctionCount();
+ FunctionData GetFunctionData(size_t i);
+
+ private:
+ explicit ScriptData(i::CoverageScript* script) : script_(script) {}
+ i::CoverageScript* script_;
+
+ friend class v8::debug::Coverage;
+ };
+
+ static Coverage Collect(Isolate* isolate, bool reset_count);
+
+ static void TogglePrecise(Isolate* isolate, bool enable);
+
+ size_t ScriptCount();
+ ScriptData GetScriptData(size_t i);
+ bool IsEmpty() { return coverage_ == nullptr; }
+
+ ~Coverage();
+
+ private:
+ explicit Coverage(i::Coverage* coverage) : coverage_(coverage) {}
+ i::Coverage* coverage_;
+};
} // namespace debug
} // namespace v8
diff --git a/deps/v8/src/debug/debug-scopes.cc b/deps/v8/src/debug/debug-scopes.cc
index 3434a83088..cf957bc1ec 100644
--- a/deps/v8/src/debug/debug-scopes.cc
+++ b/deps/v8/src/debug/debug-scopes.cc
@@ -61,9 +61,10 @@ ScopeIterator::ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
// inspect the function scope.
// This can only happen if we set a break point inside right before the
// return, which requires a debug info to be available.
+ Handle<DebugInfo> debug_info(shared_info->GetDebugInfo());
// Find the break point where execution has stopped.
- BreakLocation location = BreakLocation::FromFrame(GetFrame());
+ BreakLocation location = BreakLocation::FromFrame(debug_info, GetFrame());
ignore_nested_scopes = location.IsReturn();
}
@@ -86,12 +87,11 @@ ScopeIterator::ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
// Reparse the code and analyze the scopes.
// Check whether we are in global, eval or function code.
- Zone zone(isolate->allocator(), ZONE_NAME);
std::unique_ptr<ParseInfo> info;
if (scope_info->scope_type() != FUNCTION_SCOPE) {
// Global or eval code.
Handle<Script> script(Script::cast(shared_info->script()));
- info.reset(new ParseInfo(&zone, script));
+ info.reset(new ParseInfo(script));
if (scope_info->scope_type() == EVAL_SCOPE) {
info->set_eval();
if (!function->context()->IsNativeContext()) {
@@ -107,7 +107,7 @@ ScopeIterator::ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
}
} else {
// Inner function.
- info.reset(new ParseInfo(&zone, shared_info));
+ info.reset(new ParseInfo(shared_info));
}
if (parsing::ParseAny(info.get()) && Rewriter::Rewrite(info.get())) {
DeclarationScope* scope = info->literal()->scope();
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index 314efba870..dd1f5cfe07 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -8,6 +8,7 @@
#include "src/api.h"
#include "src/arguments.h"
+#include "src/assembler-inl.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
@@ -29,6 +30,7 @@
#include "src/messages.h"
#include "src/snapshot/natives.h"
#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects.h"
#include "include/v8-debug.h"
@@ -37,18 +39,12 @@ namespace internal {
Debug::Debug(Isolate* isolate)
: debug_context_(Handle<Context>()),
- event_listener_(Handle<Object>()),
- event_listener_data_(Handle<Object>()),
- message_handler_(NULL),
- command_received_(0),
- command_queue_(isolate->logger(), kQueueInitialSize),
is_active_(false),
hook_on_function_call_(false),
is_suppressed_(false),
live_edit_enabled_(true), // TODO(yangguo): set to false by default.
break_disabled_(false),
break_points_active_(true),
- in_debug_event_listener_(false),
break_on_exception_(false),
break_on_uncaught_exception_(false),
side_effect_check_failed_(false),
@@ -58,15 +54,12 @@ Debug::Debug(Isolate* isolate)
ThreadInit();
}
-BreakLocation BreakLocation::FromFrame(StandardFrame* frame) {
- // TODO(clemensh): Handle Wasm frames.
- DCHECK(!frame->is_wasm());
-
- auto summary = FrameSummary::GetFirst(frame).AsJavaScript();
+BreakLocation BreakLocation::FromFrame(Handle<DebugInfo> debug_info,
+ JavaScriptFrame* frame) {
+ auto summary = FrameSummary::GetTop(frame).AsJavaScript();
int offset = summary.code_offset();
Handle<AbstractCode> abstract_code = summary.abstract_code();
if (abstract_code->IsCode()) offset = offset - 1;
- Handle<DebugInfo> debug_info(summary.function()->shared()->GetDebugInfo());
auto it = BreakIterator::GetIterator(debug_info, abstract_code);
it->SkipTo(BreakIndexFromCodeOffset(debug_info, abstract_code, offset));
return it->GetBreakLocation();
@@ -75,7 +68,7 @@ BreakLocation BreakLocation::FromFrame(StandardFrame* frame) {
void BreakLocation::AllAtCurrentStatement(Handle<DebugInfo> debug_info,
JavaScriptFrame* frame,
List<BreakLocation>* result_out) {
- auto summary = FrameSummary::GetFirst(frame).AsJavaScript();
+ auto summary = FrameSummary::GetTop(frame).AsJavaScript();
int offset = summary.code_offset();
Handle<AbstractCode> abstract_code = summary.abstract_code();
if (abstract_code->IsCode()) offset = offset - 1;
@@ -121,35 +114,32 @@ bool BreakLocation::HasBreakPoint(Handle<DebugInfo> debug_info) const {
// step to, but not actually a location where we can put a break point.
if (abstract_code_->IsCode()) {
DCHECK_EQ(debug_info->DebugCode(), abstract_code_->GetCode());
- CodeBreakIterator it(debug_info, ALL_BREAK_LOCATIONS);
+ CodeBreakIterator it(debug_info);
it.SkipToPosition(position_, BREAK_POSITION_ALIGNED);
return it.code_offset() == code_offset_;
} else {
DCHECK(abstract_code_->IsBytecodeArray());
- BytecodeArrayBreakIterator it(debug_info, ALL_BREAK_LOCATIONS);
+ BytecodeArrayBreakIterator it(debug_info);
it.SkipToPosition(position_, BREAK_POSITION_ALIGNED);
return it.code_offset() == code_offset_;
}
}
std::unique_ptr<BreakIterator> BreakIterator::GetIterator(
- Handle<DebugInfo> debug_info, Handle<AbstractCode> abstract_code,
- BreakLocatorType type) {
+ Handle<DebugInfo> debug_info, Handle<AbstractCode> abstract_code) {
if (abstract_code->IsBytecodeArray()) {
DCHECK(debug_info->HasDebugBytecodeArray());
return std::unique_ptr<BreakIterator>(
- new BytecodeArrayBreakIterator(debug_info, type));
+ new BytecodeArrayBreakIterator(debug_info));
} else {
DCHECK(abstract_code->IsCode());
DCHECK(debug_info->HasDebugCode());
- return std::unique_ptr<BreakIterator>(
- new CodeBreakIterator(debug_info, type));
+ return std::unique_ptr<BreakIterator>(new CodeBreakIterator(debug_info));
}
}
-BreakIterator::BreakIterator(Handle<DebugInfo> debug_info,
- BreakLocatorType type)
- : debug_info_(debug_info), break_index_(-1), break_locator_type_(type) {
+BreakIterator::BreakIterator(Handle<DebugInfo> debug_info)
+ : debug_info_(debug_info), break_index_(-1) {
position_ = debug_info->shared()->start_position();
statement_position_ = position_;
}
@@ -178,10 +168,9 @@ int BreakIterator::BreakIndexFromPosition(int source_position,
return closest_break;
}
-CodeBreakIterator::CodeBreakIterator(Handle<DebugInfo> debug_info,
- BreakLocatorType type)
- : BreakIterator(debug_info, type),
- reloc_iterator_(debug_info->DebugCode(), GetModeMask(type)),
+CodeBreakIterator::CodeBreakIterator(Handle<DebugInfo> debug_info)
+ : BreakIterator(debug_info),
+ reloc_iterator_(debug_info->DebugCode(), GetModeMask()),
source_position_iterator_(
debug_info->DebugCode()->source_position_table()) {
// There is at least one break location.
@@ -189,17 +178,12 @@ CodeBreakIterator::CodeBreakIterator(Handle<DebugInfo> debug_info,
Next();
}
-int CodeBreakIterator::GetModeMask(BreakLocatorType type) {
+int CodeBreakIterator::GetModeMask() {
int mask = 0;
mask |= RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT_AT_RETURN);
mask |= RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT_AT_CALL);
- if (isolate()->is_tail_call_elimination_enabled()) {
- mask |= RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL);
- }
- if (type == ALL_BREAK_LOCATIONS) {
- mask |= RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION);
- mask |= RelocInfo::ModeMask(RelocInfo::DEBUGGER_STATEMENT);
- }
+ mask |= RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL);
+ mask |= RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION);
return mask;
}
@@ -224,8 +208,7 @@ void CodeBreakIterator::Next() {
source_position_iterator_.Advance();
}
- DCHECK(RelocInfo::IsDebugBreakSlot(rmode()) ||
- RelocInfo::IsDebuggerStatement(rmode()));
+ DCHECK(RelocInfo::IsDebugBreakSlot(rmode()));
break_index_++;
}
@@ -238,8 +221,6 @@ DebugBreakType CodeBreakIterator::GetDebugBreakType() {
return isolate()->is_tail_call_elimination_enabled()
? DEBUG_BREAK_SLOT_AT_TAIL_CALL
: DEBUG_BREAK_SLOT_AT_CALL;
- } else if (RelocInfo::IsDebuggerStatement(rmode())) {
- return DEBUGGER_STATEMENT;
} else if (RelocInfo::IsDebugBreakSlot(rmode())) {
return DEBUG_BREAK_SLOT;
} else {
@@ -249,13 +230,12 @@ DebugBreakType CodeBreakIterator::GetDebugBreakType() {
void CodeBreakIterator::SkipToPosition(int position,
BreakPositionAlignment alignment) {
- CodeBreakIterator it(debug_info_, break_locator_type_);
+ CodeBreakIterator it(debug_info_);
SkipTo(it.BreakIndexFromPosition(position, alignment));
}
void CodeBreakIterator::SetDebugBreak() {
DebugBreakType debug_break_type = GetDebugBreakType();
- if (debug_break_type == DEBUGGER_STATEMENT) return;
DCHECK(debug_break_type >= DEBUG_BREAK_SLOT);
Builtins* builtins = isolate()->builtins();
Handle<Code> target = debug_break_type == DEBUG_BREAK_SLOT_AT_RETURN
@@ -265,16 +245,12 @@ void CodeBreakIterator::SetDebugBreak() {
}
void CodeBreakIterator::ClearDebugBreak() {
- DebugBreakType debug_break_type = GetDebugBreakType();
- if (debug_break_type == DEBUGGER_STATEMENT) return;
- DCHECK(debug_break_type >= DEBUG_BREAK_SLOT);
+ DCHECK(GetDebugBreakType() >= DEBUG_BREAK_SLOT);
DebugCodegen::ClearDebugBreakSlot(isolate(), rinfo()->pc());
}
bool CodeBreakIterator::IsDebugBreak() {
- DebugBreakType debug_break_type = GetDebugBreakType();
- if (debug_break_type == DEBUGGER_STATEMENT) return false;
- DCHECK(debug_break_type >= DEBUG_BREAK_SLOT);
+ DCHECK(GetDebugBreakType() >= DEBUG_BREAK_SLOT);
return DebugCodegen::DebugBreakSlotIsPatched(rinfo()->pc());
}
@@ -284,8 +260,8 @@ BreakLocation CodeBreakIterator::GetBreakLocation() {
}
BytecodeArrayBreakIterator::BytecodeArrayBreakIterator(
- Handle<DebugInfo> debug_info, BreakLocatorType type)
- : BreakIterator(debug_info, type),
+ Handle<DebugInfo> debug_info)
+ : BreakIterator(debug_info),
source_position_iterator_(
debug_info->DebugBytecodeArray()->source_position_table()) {
// There is at least one break location.
@@ -309,13 +285,7 @@ void BytecodeArrayBreakIterator::Next() {
DCHECK(statement_position_ >= 0);
DebugBreakType type = GetDebugBreakType();
- if (type == NOT_DEBUG_BREAK) continue;
-
- if (break_locator_type_ == ALL_BREAK_LOCATIONS) break;
-
- DCHECK_EQ(CALLS_AND_RETURNS, break_locator_type_);
- if (type == DEBUG_BREAK_SLOT_AT_CALL) break;
- if (type == DEBUG_BREAK_SLOT_AT_RETURN) break;
+ if (type != NOT_DEBUG_BREAK) break;
}
break_index_++;
}
@@ -333,7 +303,7 @@ DebugBreakType BytecodeArrayBreakIterator::GetDebugBreakType() {
return isolate()->is_tail_call_elimination_enabled()
? DEBUG_BREAK_SLOT_AT_TAIL_CALL
: DEBUG_BREAK_SLOT_AT_CALL;
- } else if (interpreter::Bytecodes::IsCallOrNew(bytecode)) {
+ } else if (interpreter::Bytecodes::IsCallOrConstruct(bytecode)) {
return DEBUG_BREAK_SLOT_AT_CALL;
} else if (source_position_iterator_.is_statement()) {
return DEBUG_BREAK_SLOT;
@@ -344,7 +314,7 @@ DebugBreakType BytecodeArrayBreakIterator::GetDebugBreakType() {
void BytecodeArrayBreakIterator::SkipToPosition(
int position, BreakPositionAlignment alignment) {
- BytecodeArrayBreakIterator it(debug_info_, break_locator_type_);
+ BytecodeArrayBreakIterator it(debug_info_);
SkipTo(it.BreakIndexFromPosition(position, alignment));
}
@@ -404,12 +374,12 @@ void Debug::ThreadInit() {
thread_local_.break_frame_id_ = StackFrame::NO_ID;
thread_local_.last_step_action_ = StepNone;
thread_local_.last_statement_position_ = kNoSourcePosition;
- thread_local_.last_fp_ = 0;
- thread_local_.target_fp_ = 0;
- thread_local_.return_value_ = Handle<Object>();
+ thread_local_.last_frame_count_ = -1;
+ thread_local_.target_frame_count_ = -1;
+ thread_local_.return_value_ = Smi::kZero;
thread_local_.async_task_count_ = 0;
clear_suspended_generator();
- // TODO(isolates): frames_are_dropped_?
+ thread_local_.restart_fp_ = nullptr;
base::NoBarrier_Store(&thread_local_.current_debug_scope_,
static_cast<base::AtomicWord>(0));
UpdateHookOnFunctionCall();
@@ -432,6 +402,7 @@ char* Debug::RestoreDebug(char* storage) {
int Debug::ArchiveSpacePerThread() { return 0; }
void Debug::Iterate(ObjectVisitor* v) {
+ v->VisitPointer(&thread_local_.return_value_);
v->VisitPointer(&thread_local_.suspended_generator_);
}
@@ -491,6 +462,7 @@ bool Debug::Load() {
void Debug::Unload() {
ClearAllBreakPoints();
ClearStepping();
+ RemoveDebugDelegate();
// Return debugger is not loaded.
if (!is_loaded()) return;
@@ -501,8 +473,6 @@ void Debug::Unload() {
}
void Debug::Break(JavaScriptFrame* frame) {
- HandleScope scope(isolate_);
-
// Initialize LiveEdit.
LiveEdit::InitializeThreadLocal(this);
@@ -515,76 +485,59 @@ void Debug::Break(JavaScriptFrame* frame) {
// Postpone interrupt during breakpoint processing.
PostponeInterruptsScope postpone(isolate_);
+ DisableBreak no_recursive_break(this);
- // Return if we fail to retrieve debug info for javascript frames.
- if (frame->is_java_script()) {
- JavaScriptFrame* js_frame = JavaScriptFrame::cast(frame);
-
- // Get the debug info (create it if it does not exist).
- Handle<JSFunction> function(js_frame->function());
- Handle<SharedFunctionInfo> shared(function->shared());
- if (!EnsureDebugInfo(shared, function)) return;
- }
+ // Return if we fail to retrieve debug info.
+ Handle<JSFunction> function(frame->function());
+ Handle<SharedFunctionInfo> shared(function->shared());
+ if (!EnsureDebugInfo(shared)) return;
+ Handle<DebugInfo> debug_info(shared->GetDebugInfo(), isolate_);
- BreakLocation location = BreakLocation::FromFrame(frame);
+ // Find the break location where execution has stopped.
+ BreakLocation location = BreakLocation::FromFrame(debug_info, frame);
// Find actual break points, if any, and trigger debug break event.
- MaybeHandle<FixedArray> break_points_hit;
- if (!break_points_active()) {
- // Don't try to find hit breakpoints.
- } else if (frame->is_wasm_interpreter_entry()) {
- // TODO(clemensh): Find hit breakpoints for wasm.
- UNIMPLEMENTED();
- } else {
- // Get the debug info, which must exist if we reach here.
- Handle<DebugInfo> debug_info(
- JavaScriptFrame::cast(frame)->function()->shared()->GetDebugInfo(),
- isolate_);
-
- break_points_hit = CheckBreakPoints(debug_info, &location);
- }
-
+ MaybeHandle<FixedArray> break_points_hit =
+ CheckBreakPoints(debug_info, &location);
if (!break_points_hit.is_null()) {
// Clear all current stepping setup.
ClearStepping();
// Notify the debug event listeners.
Handle<JSArray> jsarr = isolate_->factory()->NewJSArrayWithElements(
break_points_hit.ToHandleChecked());
- OnDebugBreak(jsarr, false);
+ OnDebugBreak(jsarr);
return;
}
// No break point. Check for stepping.
StepAction step_action = last_step_action();
- Address current_fp = frame->UnpaddedFP();
- Address target_fp = thread_local_.target_fp_;
- Address last_fp = thread_local_.last_fp_;
+ int current_frame_count = CurrentFrameCount();
+ int target_frame_count = thread_local_.target_frame_count_;
+ int last_frame_count = thread_local_.last_frame_count_;
bool step_break = false;
switch (step_action) {
case StepNone:
return;
case StepOut:
- // Step out has not reached the target frame yet.
- if (current_fp < target_fp) return;
+ // Step out should not break in a deeper frame than target frame.
+ if (current_frame_count > target_frame_count) return;
step_break = true;
break;
case StepNext:
- // Step next should not break in a deeper frame.
- if (current_fp < target_fp) return;
+ // Step next should not break in a deeper frame than target frame.
+ if (current_frame_count > target_frame_count) return;
// For step-next, a tail call is like a return and should break.
step_break = location.IsTailCall();
// Fall through.
case StepIn: {
- FrameSummary summary = FrameSummary::GetFirst(frame);
- step_break = step_break || location.IsReturn() || current_fp != last_fp ||
+ FrameSummary summary = FrameSummary::GetTop(frame);
+ step_break = step_break || location.IsReturn() ||
+ current_frame_count != last_frame_count ||
thread_local_.last_statement_position_ !=
summary.SourceStatementPosition();
break;
}
- case StepFrame:
- step_break = current_fp != last_fp;
- break;
}
// Clear all current stepping setup.
@@ -592,7 +545,7 @@ void Debug::Break(JavaScriptFrame* frame) {
if (step_break) {
// Notify the debug event listeners.
- OnDebugBreak(isolate_->factory()->undefined_value(), false);
+ OnDebugBreak(isolate_->factory()->undefined_value());
} else {
// Re-prepare to continue.
PrepareStep(step_action);
@@ -618,16 +571,16 @@ MaybeHandle<FixedArray> Debug::CheckBreakPoints(Handle<DebugInfo> debug_info,
bool Debug::IsMutedAtCurrentLocation(JavaScriptFrame* frame) {
+ HandleScope scope(isolate_);
// A break location is considered muted if break locations on the current
// statement have at least one break point, and all of these break points
// evaluate to false. Aside from not triggering a debug break event at the
// break location, we also do not trigger one for debugger statements, nor
// an exception event on exception at this location.
- Object* fun = frame->function();
- if (!fun->IsJSFunction()) return false;
- JSFunction* function = JSFunction::cast(fun);
+ FrameSummary summary = FrameSummary::GetTop(frame);
+ DCHECK(!summary.IsWasm());
+ Handle<JSFunction> function = summary.AsJavaScript().function();
if (!function->shared()->HasDebugInfo()) return false;
- HandleScope scope(isolate_);
Handle<DebugInfo> debug_info(function->shared()->GetDebugInfo());
// Enter the debugger.
DebugScope debug_scope(this);
@@ -693,11 +646,7 @@ bool Debug::SetBreakPoint(Handle<JSFunction> function,
// Make sure the function is compiled and has set up the debug info.
Handle<SharedFunctionInfo> shared(function->shared());
- if (!EnsureDebugInfo(shared, function)) {
- // Return if retrieving debug info failed.
- return true;
- }
-
+ if (!EnsureDebugInfo(shared)) return true;
Handle<DebugInfo> debug_info(shared->GetDebugInfo());
// Source positions starts with zero.
DCHECK(*source_position >= 0);
@@ -722,9 +671,12 @@ bool Debug::SetBreakPointForScript(Handle<Script> script,
int* source_position,
BreakPositionAlignment alignment) {
if (script->type() == Script::TYPE_WASM) {
- // TODO(clemensh): set breakpoint for wasm.
- return false;
+ Handle<WasmCompiledModule> compiled_module(
+ WasmCompiledModule::cast(script->wasm_compiled_module()), isolate_);
+ return WasmCompiledModule::SetBreakPoint(compiled_module, source_position,
+ break_point_object);
}
+
HandleScope scope(isolate_);
// Obtain shared function info for the function.
@@ -734,10 +686,7 @@ bool Debug::SetBreakPointForScript(Handle<Script> script,
// Make sure the function has set up the debug info.
Handle<SharedFunctionInfo> shared = Handle<SharedFunctionInfo>::cast(result);
- if (!EnsureDebugInfo(shared, Handle<JSFunction>::null())) {
- // Return if retrieving debug info failed.
- return false;
- }
+ if (!EnsureDebugInfo(shared)) return false;
// Find position within function. The script position might be before the
// source position of the first function.
@@ -767,13 +716,13 @@ int Debug::FindBreakablePosition(Handle<DebugInfo> debug_info,
int statement_position;
int position;
if (debug_info->HasDebugCode()) {
- CodeBreakIterator it(debug_info, ALL_BREAK_LOCATIONS);
+ CodeBreakIterator it(debug_info);
it.SkipToPosition(source_position, alignment);
statement_position = it.statement_position();
position = it.position();
} else {
DCHECK(debug_info->HasDebugBytecodeArray());
- BytecodeArrayBreakIterator it(debug_info, ALL_BREAK_LOCATIONS);
+ BytecodeArrayBreakIterator it(debug_info);
it.SkipToPosition(source_position, alignment);
statement_position = it.statement_position();
position = it.position();
@@ -790,12 +739,12 @@ void Debug::ApplyBreakPoints(Handle<DebugInfo> debug_info) {
BreakPointInfo* info = BreakPointInfo::cast(break_points->get(i));
if (info->GetBreakPointCount() == 0) continue;
if (debug_info->HasDebugCode()) {
- CodeBreakIterator it(debug_info, ALL_BREAK_LOCATIONS);
+ CodeBreakIterator it(debug_info);
it.SkipToPosition(info->source_position(), BREAK_POSITION_ALIGNED);
it.SetDebugBreak();
}
if (debug_info->HasDebugBytecodeArray()) {
- BytecodeArrayBreakIterator it(debug_info, ALL_BREAK_LOCATIONS);
+ BytecodeArrayBreakIterator it(debug_info);
it.SkipToPosition(info->source_position(), BREAK_POSITION_ALIGNED);
it.SetDebugBreak();
}
@@ -805,14 +754,12 @@ void Debug::ApplyBreakPoints(Handle<DebugInfo> debug_info) {
void Debug::ClearBreakPoints(Handle<DebugInfo> debug_info) {
DisallowHeapAllocation no_gc;
if (debug_info->HasDebugCode()) {
- for (CodeBreakIterator it(debug_info, ALL_BREAK_LOCATIONS); !it.Done();
- it.Next()) {
+ for (CodeBreakIterator it(debug_info); !it.Done(); it.Next()) {
it.ClearDebugBreak();
}
}
if (debug_info->HasDebugBytecodeArray()) {
- for (BytecodeArrayBreakIterator it(debug_info, ALL_BREAK_LOCATIONS);
- !it.Done(); it.Next()) {
+ for (BytecodeArrayBreakIterator it(debug_info); !it.Done(); it.Next()) {
it.ClearDebugBreak();
}
}
@@ -853,35 +800,19 @@ void Debug::ClearAllBreakPoints() {
}
}
-void Debug::FloodWithOneShot(Handle<JSFunction> function,
- BreakLocatorType type) {
- // Debug utility functions are not subject to debugging.
- if (function->native_context() == *debug_context()) return;
-
- if (!function->shared()->IsSubjectToDebugging()) {
- // Builtin functions are not subject to stepping, but need to be
- // deoptimized, because optimized code does not check for debug
- // step in at call sites.
- Deoptimizer::DeoptimizeFunction(*function);
- return;
- }
+void Debug::FloodWithOneShot(Handle<SharedFunctionInfo> shared) {
+ if (!shared->IsSubjectToDebugging() || IsBlackboxed(shared)) return;
// Make sure the function is compiled and has set up the debug info.
- Handle<SharedFunctionInfo> shared(function->shared());
- if (!EnsureDebugInfo(shared, function)) {
- // Return if we failed to retrieve the debug info.
- return;
- }
-
- // Flood the function with break points.
+ if (!EnsureDebugInfo(shared)) return;
Handle<DebugInfo> debug_info(shared->GetDebugInfo());
+ // Flood the function with break points.
if (debug_info->HasDebugCode()) {
- for (CodeBreakIterator it(debug_info, type); !it.Done(); it.Next()) {
+ for (CodeBreakIterator it(debug_info); !it.Done(); it.Next()) {
it.SetDebugBreak();
}
}
if (debug_info->HasDebugBytecodeArray()) {
- for (BytecodeArrayBreakIterator it(debug_info, type); !it.Done();
- it.Next()) {
+ for (BytecodeArrayBreakIterator it(debug_info); !it.Done(); it.Next()) {
it.SetDebugBreak();
}
}
@@ -935,7 +866,7 @@ void Debug::PrepareStepIn(Handle<JSFunction> function) {
if (ignore_events()) return;
if (in_debug_scope()) return;
if (break_disabled()) return;
- FloodWithOneShot(function);
+ FloodWithOneShot(Handle<SharedFunctionInfo>(function->shared(), isolate_));
}
void Debug::PrepareStepInSuspendedGenerator() {
@@ -947,7 +878,7 @@ void Debug::PrepareStepInSuspendedGenerator() {
UpdateHookOnFunctionCall();
Handle<JSFunction> function(
JSGeneratorObject::cast(thread_local_.suspended_generator_)->function());
- FloodWithOneShot(function);
+ FloodWithOneShot(Handle<SharedFunctionInfo>(function->shared(), isolate_));
clear_suspended_generator();
}
@@ -959,31 +890,68 @@ void Debug::PrepareStepOnThrow() {
ClearOneShot();
+ int current_frame_count = CurrentFrameCount();
+
// Iterate through the JavaScript stack looking for handlers.
JavaScriptFrameIterator it(isolate_);
while (!it.done()) {
JavaScriptFrame* frame = it.frame();
if (frame->LookupExceptionHandlerInTable(nullptr, nullptr) > 0) break;
+ List<SharedFunctionInfo*> infos;
+ frame->GetFunctions(&infos);
+ current_frame_count -= infos.length();
it.Advance();
}
- if (last_step_action() == StepNext || last_step_action() == StepOut) {
- while (!it.done()) {
- Address current_fp = it.frame()->UnpaddedFP();
- if (current_fp >= thread_local_.target_fp_) break;
- it.Advance();
+ // No handler found. Nothing to instrument.
+ if (it.done()) return;
+
+ bool found_handler = false;
+ // Iterate frames, including inlined frames. First, find the handler frame.
+ // Then skip to the frame we want to break in, then instrument for stepping.
+ for (; !it.done(); it.Advance()) {
+ JavaScriptFrame* frame = JavaScriptFrame::cast(it.frame());
+ if (last_step_action() == StepIn) {
+ // Deoptimize frame to ensure calls are checked for step-in.
+ Deoptimizer::DeoptimizeFunction(frame->function());
}
- }
+ List<FrameSummary> summaries;
+ frame->Summarize(&summaries);
+ for (int i = summaries.length() - 1; i >= 0; i--, current_frame_count--) {
+ if (!found_handler) {
+ // We have yet to find the handler. If the frame inlines multiple
+ // functions, we have to check each one for the handler.
+ // If it only contains one function, we already found the handler.
+ if (summaries.length() > 1) {
+ Handle<AbstractCode> code =
+ summaries[i].AsJavaScript().abstract_code();
+ CHECK_EQ(AbstractCode::INTERPRETED_FUNCTION, code->kind());
+ BytecodeArray* bytecode = code->GetBytecodeArray();
+ HandlerTable* table = HandlerTable::cast(bytecode->handler_table());
+ int code_offset = summaries[i].code_offset();
+ HandlerTable::CatchPrediction prediction;
+ int index = table->LookupRange(code_offset, nullptr, &prediction);
+ if (index > 0) found_handler = true;
+ } else {
+ found_handler = true;
+ }
+ }
- // Find the closest Javascript frame we can flood with one-shots.
- while (!it.done() &&
- !it.frame()->function()->shared()->IsSubjectToDebugging()) {
- it.Advance();
+ if (found_handler) {
+ // We found the handler. If we are stepping next or out, we need to
+ // iterate until we found the suitable target frame to break in.
+ if ((last_step_action() == StepNext || last_step_action() == StepOut) &&
+ current_frame_count > thread_local_.target_frame_count_) {
+ continue;
+ }
+ Handle<SharedFunctionInfo> info(
+ summaries[i].AsJavaScript().function()->shared());
+ if (!info->IsSubjectToDebugging() || IsBlackboxed(info)) continue;
+ FloodWithOneShot(info);
+ return;
+ }
+ }
}
-
- if (it.done()) return; // No suitable Javascript catch handler.
-
- FloodWithOneShot(Handle<JSFunction>(it.frame()->function()));
}
@@ -1000,46 +968,47 @@ void Debug::PrepareStep(StepAction step_action) {
// If there is no JavaScript stack don't do anything.
if (frame_id == StackFrame::NO_ID) return;
- JavaScriptFrameIterator frames_it(isolate_, frame_id);
- JavaScriptFrame* frame = frames_it.frame();
-
feature_tracker()->Track(DebugFeatureTracker::kStepping);
thread_local_.last_step_action_ = step_action;
UpdateHookOnFunctionCall();
- // If the function on the top frame is unresolved perform step out. This will
- // be the case when calling unknown function and having the debugger stopped
- // in an unhandled exception.
- if (!frame->function()->IsJSFunction()) {
- // Step out: Find the calling JavaScript frame and flood it with
- // breakpoints.
- frames_it.Advance();
- // Fill the function to return to with one-shot break points.
- JSFunction* function = frames_it.frame()->function();
- FloodWithOneShot(Handle<JSFunction>(function));
+ StackTraceFrameIterator frames_it(isolate_, frame_id);
+ StandardFrame* frame = frames_it.frame();
+
+ // Handle stepping in wasm functions via the wasm interpreter.
+ if (frame->is_wasm()) {
+ // If the top frame is compiled, we cannot step.
+ if (frame->is_wasm_compiled()) return;
+ WasmInterpreterEntryFrame* wasm_frame =
+ WasmInterpreterEntryFrame::cast(frame);
+ wasm_frame->wasm_instance()->debug_info()->PrepareStep(step_action);
return;
}
+ JavaScriptFrame* js_frame = JavaScriptFrame::cast(frame);
+ DCHECK(js_frame->function()->IsJSFunction());
+
// Get the debug info (create it if it does not exist).
- auto summary = FrameSummary::GetFirst(frame).AsJavaScript();
+ auto summary = FrameSummary::GetTop(frame).AsJavaScript();
Handle<JSFunction> function(summary.function());
Handle<SharedFunctionInfo> shared(function->shared());
- if (!EnsureDebugInfo(shared, function)) {
- // Return if ensuring debug info failed.
- return;
- }
+ if (!EnsureDebugInfo(shared)) return;
+ Handle<DebugInfo> debug_info(shared->GetDebugInfo());
- BreakLocation location = BreakLocation::FromFrame(frame);
+ BreakLocation location = BreakLocation::FromFrame(debug_info, js_frame);
// Any step at a return is a step-out.
if (location.IsReturn()) step_action = StepOut;
// A step-next at a tail call is a step-out.
if (location.IsTailCall() && step_action == StepNext) step_action = StepOut;
+ // A step-next in blackboxed function is a step-out.
+ if (step_action == StepNext && IsBlackboxed(shared)) step_action = StepOut;
thread_local_.last_statement_position_ =
summary.abstract_code()->SourceStatementPosition(summary.code_offset());
- thread_local_.last_fp_ = frame->UnpaddedFP();
+ int current_frame_count = CurrentFrameCount();
+ thread_local_.last_frame_count_ = current_frame_count;
// No longer perform the current async step.
clear_suspended_generator();
@@ -1047,38 +1016,45 @@ void Debug::PrepareStep(StepAction step_action) {
case StepNone:
UNREACHABLE();
break;
- case StepOut:
- // Advance to caller frame.
- frames_it.Advance();
- // Skip native and extension functions on the stack.
- while (!frames_it.done() &&
- !frames_it.frame()->function()->shared()->IsSubjectToDebugging()) {
- // Builtin functions are not subject to stepping, but need to be
- // deoptimized to include checks for step-in at call sites.
- Deoptimizer::DeoptimizeFunction(frames_it.frame()->function());
- frames_it.Advance();
- }
- if (!frames_it.done()) {
- // Fill the caller function to return to with one-shot break points.
- Handle<JSFunction> caller_function(frames_it.frame()->function());
- FloodWithOneShot(caller_function);
- thread_local_.target_fp_ = frames_it.frame()->UnpaddedFP();
- }
+ case StepOut: {
// Clear last position info. For stepping out it does not matter.
thread_local_.last_statement_position_ = kNoSourcePosition;
- thread_local_.last_fp_ = 0;
+ thread_local_.last_frame_count_ = -1;
+ // Skip the current frame, find the first frame we want to step out to
+ // and deoptimize every frame along the way.
+ bool in_current_frame = true;
+ for (; !frames_it.done(); frames_it.Advance()) {
+ // TODO(clemensh): Implement stepping out from JS to WASM.
+ if (frames_it.frame()->is_wasm()) continue;
+ JavaScriptFrame* frame = JavaScriptFrame::cast(frames_it.frame());
+ if (last_step_action() == StepIn) {
+ // Deoptimize frame to ensure calls are checked for step-in.
+ Deoptimizer::DeoptimizeFunction(frame->function());
+ }
+ HandleScope scope(isolate_);
+ List<Handle<SharedFunctionInfo>> infos;
+ frame->GetFunctions(&infos);
+ for (; !infos.is_empty(); current_frame_count--) {
+ Handle<SharedFunctionInfo> info = infos.RemoveLast();
+ if (in_current_frame) {
+ // We want to skip out, so skip the current frame.
+ in_current_frame = false;
+ continue;
+ }
+ if (!info->IsSubjectToDebugging() || IsBlackboxed(info)) continue;
+ FloodWithOneShot(info);
+ thread_local_.target_frame_count_ = current_frame_count;
+ return;
+ }
+ }
break;
+ }
case StepNext:
- thread_local_.target_fp_ = frame->UnpaddedFP();
- FloodWithOneShot(function);
- break;
+ thread_local_.target_frame_count_ = current_frame_count;
+ // Fall through.
case StepIn:
- FloodWithOneShot(function);
- break;
- case StepFrame:
- // No point in setting one-shot breaks at places where we are not about
- // to leave the current frame.
- FloodWithOneShot(function, CALLS_AND_RETURNS);
+ // TODO(clemensh): Implement stepping from JS into WASM.
+ FloodWithOneShot(shared);
break;
}
}
@@ -1107,13 +1083,13 @@ Handle<Object> Debug::GetSourceBreakLocations(
Smi* position = NULL;
if (position_alignment == STATEMENT_ALIGNED) {
if (debug_info->HasDebugCode()) {
- CodeBreakIterator it(debug_info, ALL_BREAK_LOCATIONS);
+ CodeBreakIterator it(debug_info);
it.SkipToPosition(break_point_info->source_position(),
BREAK_POSITION_ALIGNED);
position = Smi::FromInt(it.statement_position());
} else {
DCHECK(debug_info->HasDebugBytecodeArray());
- BytecodeArrayBreakIterator it(debug_info, ALL_BREAK_LOCATIONS);
+ BytecodeArrayBreakIterator it(debug_info);
it.SkipToPosition(break_point_info->source_position(),
BREAK_POSITION_ALIGNED);
position = Smi::FromInt(it.statement_position());
@@ -1134,8 +1110,8 @@ void Debug::ClearStepping() {
thread_local_.last_step_action_ = StepNone;
thread_local_.last_statement_position_ = kNoSourcePosition;
- thread_local_.last_fp_ = 0;
- thread_local_.target_fp_ = 0;
+ thread_local_.last_frame_count_ = -1;
+ thread_local_.target_frame_count_ = -1;
UpdateHookOnFunctionCall();
}
@@ -1354,12 +1330,12 @@ void FindBreakablePositions(Handle<DebugInfo> debug_info, int start_position,
int end_position, BreakPositionAlignment alignment,
std::set<int>* positions) {
if (debug_info->HasDebugCode()) {
- CodeBreakIterator it(debug_info, ALL_BREAK_LOCATIONS);
+ CodeBreakIterator it(debug_info);
GetBreakablePositions(&it, start_position, end_position, alignment,
positions);
} else {
DCHECK(debug_info->HasDebugBytecodeArray());
- BytecodeArrayBreakIterator it(debug_info, ALL_BREAK_LOCATIONS);
+ BytecodeArrayBreakIterator it(debug_info);
GetBreakablePositions(&it, start_position, end_position, alignment,
positions);
}
@@ -1394,8 +1370,7 @@ bool Debug::GetPossibleBreakpoints(Handle<Script> script, int start_position,
was_compiled = true;
}
}
- if (!EnsureDebugInfo(candidates[i], Handle<JSFunction>::null()))
- return false;
+ if (!EnsureDebugInfo(candidates[i])) return false;
}
if (was_compiled) continue;
@@ -1403,7 +1378,7 @@ bool Debug::GetPossibleBreakpoints(Handle<Script> script, int start_position,
CHECK(candidates[i]->HasDebugInfo());
Handle<DebugInfo> debug_info(candidates[i]->GetDebugInfo());
FindBreakablePositions(debug_info, start_position, end_position,
- STATEMENT_ALIGNED, positions);
+ BREAK_POSITION_ALIGNED, positions);
}
return true;
}
@@ -1416,9 +1391,7 @@ void Debug::RecordGenerator(Handle<JSGeneratorObject> generator_object) {
if (last_step_action() == StepNext) {
// Only consider this generator a step-next target if not stepping in.
- JavaScriptFrameIterator stack_iterator(isolate_);
- JavaScriptFrame* frame = stack_iterator.frame();
- if (frame->UnpaddedFP() < thread_local_.target_fp_) return;
+ if (thread_local_.target_frame_count_ < CurrentFrameCount()) return;
}
DCHECK(!has_suspended_generator());
@@ -1526,16 +1499,11 @@ Handle<Object> Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
// Ensures the debug information is present for shared.
-bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared,
- Handle<JSFunction> function) {
- if (!shared->IsSubjectToDebugging()) return false;
-
+bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared) {
// Return if we already have the debug info for shared.
if (shared->HasDebugInfo()) return true;
-
- if (function.is_null()) {
- DCHECK(shared->HasDebugCode());
- } else if (!Compiler::Compile(function, Compiler::CLEAR_EXCEPTION)) {
+ if (!shared->IsSubjectToDebugging()) return false;
+ if (!shared->is_compiled() && !Compiler::CompileDebugCode(shared)) {
return false;
}
@@ -1575,8 +1543,8 @@ void Debug::RemoveDebugInfoAndClearFromShared(Handle<DebugInfo> debug_info) {
} else {
prev->set_next(current->next());
}
+ shared->set_debug_info(Smi::FromInt(debug_info->debugger_hints()));
delete current;
- shared->set_debug_info(DebugInfo::uninitialized());
return;
}
// Move to next in list.
@@ -1587,31 +1555,40 @@ void Debug::RemoveDebugInfoAndClearFromShared(Handle<DebugInfo> debug_info) {
UNREACHABLE();
}
-void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
- after_break_target_ = NULL;
- if (!LiveEdit::SetAfterBreakTarget(this)) {
- // Continue just after the slot.
- after_break_target_ = frame->pc();
- }
-}
-
bool Debug::IsBreakAtReturn(JavaScriptFrame* frame) {
HandleScope scope(isolate_);
+ // Get the executing function in which the debug break occurred.
+ Handle<SharedFunctionInfo> shared(frame->function()->shared());
+
// With no debug info there are no break points, so we can't be at a return.
- if (!frame->function()->shared()->HasDebugInfo()) return false;
+ if (!shared->HasDebugInfo()) return false;
DCHECK(!frame->is_optimized());
- BreakLocation location = BreakLocation::FromFrame(frame);
+ Handle<DebugInfo> debug_info(shared->GetDebugInfo());
+ BreakLocation location = BreakLocation::FromFrame(debug_info, frame);
return location.IsReturn() || location.IsTailCall();
}
-void Debug::FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
- LiveEditFrameDropMode mode) {
- if (mode != LIVE_EDIT_CURRENTLY_SET_MODE) {
- thread_local_.frame_drop_mode_ = mode;
+void Debug::ScheduleFrameRestart(StackFrame* frame) {
+ // Set a target FP for the FrameDropperTrampoline builtin to drop to once
+ // we return from the debugger.
+ DCHECK(frame->is_java_script());
+ // Only reschedule to a frame further below a frame we already scheduled for.
+ if (frame->fp() <= thread_local_.restart_fp_) return;
+ // If the frame is optimized, trigger a deopt and jump into the
+ // FrameDropperTrampoline in the deoptimizer.
+ thread_local_.restart_fp_ = frame->fp();
+
+ // Reset break frame ID to the frame below the restarted frame.
+ StackTraceFrameIterator it(isolate_);
+ thread_local_.break_frame_id_ = StackFrame::NO_ID;
+ for (StackTraceFrameIterator it(isolate_); !it.done(); it.Advance()) {
+ if (it.frame()->fp() > thread_local_.restart_fp_) {
+ thread_local_.break_frame_id_ = it.frame()->id();
+ return;
+ }
}
- thread_local_.break_frame_id_ = new_break_frame_id;
}
@@ -1620,13 +1597,6 @@ bool Debug::IsDebugGlobal(JSGlobalObject* global) {
}
-void Debug::ClearMirrorCache() {
- PostponeInterruptsScope postpone(isolate_);
- HandleScope scope(isolate_);
- CallFunction("ClearMirrorCache", 0, NULL);
-}
-
-
Handle<FixedArray> Debug::GetLoadedScripts() {
isolate_->heap()->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask,
GarbageCollectionReason::kDebugger);
@@ -1686,18 +1656,17 @@ MaybeHandle<Object> Debug::MakeCompileEvent(Handle<Script> script,
return CallFunction("MakeCompileEvent", arraysize(argv), argv);
}
-MaybeHandle<Object> Debug::MakeAsyncTaskEvent(Handle<Smi> type,
- Handle<Smi> id) {
- DCHECK(id->IsNumber());
+MaybeHandle<Object> Debug::MakeAsyncTaskEvent(
+ v8::debug::PromiseDebugActionType type, int id) {
// Create the async task event object.
- Handle<Object> argv[] = {type, id};
+ Handle<Object> argv[] = {Handle<Smi>(Smi::FromInt(type), isolate_),
+ Handle<Smi>(Smi::FromInt(id), isolate_)};
return CallFunction("MakeAsyncTaskEvent", arraysize(argv), argv);
}
void Debug::OnThrow(Handle<Object> exception) {
if (in_debug_scope() || ignore_events()) return;
- PrepareStepOnThrow();
// Temporarily clear any scheduled_exception to allow evaluating
// JavaScript from the debug event handler.
HandleScope scope(isolate_);
@@ -1710,6 +1679,7 @@ void Debug::OnThrow(Handle<Object> exception) {
if (!scheduled_exception.is_null()) {
isolate_->thread_local_top()->scheduled_exception_ = *scheduled_exception;
}
+ PrepareStepOnThrow();
}
void Debug::OnPromiseReject(Handle<Object> promise, Handle<Object> value) {
@@ -1724,6 +1694,44 @@ void Debug::OnPromiseReject(Handle<Object> promise, Handle<Object> value) {
}
}
+namespace {
+v8::Local<v8::Context> GetDebugEventContext(Isolate* isolate) {
+ Handle<Context> context = isolate->debug()->debugger_entry()->GetContext();
+ // Isolate::context() may have been NULL when "script collected" event
+ // occured.
+ if (context.is_null()) return v8::Local<v8::Context>();
+ Handle<Context> native_context(context->native_context());
+ return v8::Utils::ToLocal(native_context);
+}
+} // anonymous namespace
+
+bool Debug::IsExceptionBlackboxed(bool uncaught) {
+ JavaScriptFrameIterator it(isolate_);
+ if (it.done()) return false;
+ // Uncaught exception is blackboxed if all current frames are blackboxed,
+ // caught exception if top frame is blackboxed.
+ bool is_top_frame_blackboxed = IsFrameBlackboxed(it.frame());
+ if (!uncaught || !is_top_frame_blackboxed) return is_top_frame_blackboxed;
+ it.Advance();
+ while (!it.done()) {
+ if (!IsFrameBlackboxed(it.frame())) return false;
+ it.Advance();
+ }
+ return true;
+}
+
+bool Debug::IsFrameBlackboxed(JavaScriptFrame* frame) {
+ HandleScope scope(isolate_);
+ if (!frame->HasInlinedFrames()) {
+ Handle<SharedFunctionInfo> shared(frame->function()->shared(), isolate_);
+ return IsBlackboxed(shared);
+ }
+ List<Handle<SharedFunctionInfo>> infos;
+ frame->GetFunctions(&infos);
+ for (const auto& info : infos)
+ if (!IsBlackboxed(info)) return false;
+ return true;
+}
void Debug::OnException(Handle<Object> exception, Handle<Object> promise) {
// We cannot generate debug events when JS execution is disallowed.
@@ -1745,6 +1753,9 @@ void Debug::OnException(Handle<Object> exception, Handle<Object> promise) {
// Check whether the promise reject is considered an uncaught exception.
uncaught = !isolate_->PromiseHasUserDefinedRejectHandler(jspromise);
}
+
+ if (!debug_delegate_) return;
+
// Bail out if exception breaks are not active
if (uncaught) {
// Uncaught exceptions are reported by either flags.
@@ -1755,28 +1766,33 @@ void Debug::OnException(Handle<Object> exception, Handle<Object> promise) {
}
{
- // Check whether the break location is muted.
JavaScriptFrameIterator it(isolate_);
- if (!it.done() && IsMutedAtCurrentLocation(it.frame())) return;
+ // Check whether the top frame is blackboxed or the break location is muted.
+ if (!it.done() && (IsMutedAtCurrentLocation(it.frame()) ||
+ IsExceptionBlackboxed(uncaught))) {
+ return;
+ }
+ if (it.done()) return; // Do not trigger an event with an empty stack.
}
DebugScope debug_scope(this);
if (debug_scope.failed()) return;
+ HandleScope scope(isolate_);
+ PostponeInterruptsScope postpone(isolate_);
+ DisableBreak no_recursive_break(this);
- // Create the event data object.
- Handle<Object> event_data;
+ // Create the execution state.
+ Handle<Object> exec_state;
// Bail out and don't call debugger if exception.
- if (!MakeExceptionEvent(
- exception, uncaught, promise).ToHandle(&event_data)) {
- return;
- }
+ if (!MakeExecutionState().ToHandle(&exec_state)) return;
- // Process debug event.
- ProcessDebugEvent(v8::Exception, Handle<JSObject>::cast(event_data), false);
- // Return to continue execution from where the exception was thrown.
+ debug_delegate_->ExceptionThrown(
+ GetDebugEventContext(isolate_),
+ v8::Utils::ToLocal(Handle<JSObject>::cast(exec_state)),
+ v8::Utils::ToLocal(exception), v8::Utils::ToLocal(promise), uncaught);
}
-void Debug::OnDebugBreak(Handle<Object> break_points_hit, bool auto_continue) {
+void Debug::OnDebugBreak(Handle<Object> break_points_hit) {
// The caller provided for DebugScope.
AssertDebugContext();
// Bail out if there is no listener for this event
@@ -1786,15 +1802,20 @@ void Debug::OnDebugBreak(Handle<Object> break_points_hit, bool auto_continue) {
PrintBreakLocation();
#endif // DEBUG
+ if (!debug_delegate_) return;
HandleScope scope(isolate_);
- // Create the event data object.
- Handle<Object> event_data;
+ PostponeInterruptsScope no_interrupts(isolate_);
+ DisableBreak no_recursive_break(this);
+
+ // Create the execution state.
+ Handle<Object> exec_state;
// Bail out and don't call debugger if exception.
- if (!MakeBreakEvent(break_points_hit).ToHandle(&event_data)) return;
+ if (!MakeExecutionState().ToHandle(&exec_state)) return;
- // Process debug event.
- ProcessDebugEvent(v8::Break, Handle<JSObject>::cast(event_data),
- auto_continue);
+ debug_delegate_->BreakProgramRequested(
+ GetDebugEventContext(isolate_),
+ v8::Utils::ToLocal(Handle<JSObject>::cast(exec_state)),
+ v8::Utils::ToLocal(break_points_hit));
}
@@ -1825,7 +1846,7 @@ void SendAsyncTaskEventCancel(const v8::WeakCallbackInfo<void>& info) {
reinterpret_cast<CollectedCallbackData*>(info.GetParameter()));
if (!data->debug->is_active()) return;
HandleScope scope(data->isolate);
- data->debug->OnAsyncTaskEvent(debug::kDebugPromiseCollected, data->id);
+ data->debug->OnAsyncTaskEvent(debug::kDebugPromiseCollected, data->id, 0);
}
void ResetPromiseHandle(const v8::WeakCallbackInfo<void>& info) {
@@ -1834,8 +1855,64 @@ void ResetPromiseHandle(const v8::WeakCallbackInfo<void>& info) {
GlobalHandles::Destroy(data->location);
info.SetSecondPassCallback(&SendAsyncTaskEventCancel);
}
+
+// In an async function, reuse the existing stack related to the outer
+// Promise. Otherwise, e.g. in a direct call to then, save a new stack.
+// Promises with multiple reactions with one or more of them being async
+// functions will not get a good stack trace, as async functions require
+// different stacks from direct Promise use, but we save and restore a
+// stack once for all reactions.
+//
+// If this isn't a case of async function, we return false, otherwise
+// we set the correct id and return true.
+//
+// TODO(littledan): Improve this case.
+int GetReferenceAsyncTaskId(Isolate* isolate, Handle<JSPromise> promise) {
+ Handle<Symbol> handled_by_symbol =
+ isolate->factory()->promise_handled_by_symbol();
+ Handle<Object> handled_by_promise =
+ JSObject::GetDataProperty(promise, handled_by_symbol);
+ if (!handled_by_promise->IsJSPromise()) {
+ return isolate->debug()->NextAsyncTaskId(promise);
+ }
+ Handle<JSPromise> handled_by_promise_js =
+ Handle<JSPromise>::cast(handled_by_promise);
+ Handle<Symbol> async_stack_id_symbol =
+ isolate->factory()->promise_async_stack_id_symbol();
+ Handle<Object> async_task_id =
+ JSObject::GetDataProperty(handled_by_promise_js, async_stack_id_symbol);
+ if (!async_task_id->IsSmi()) {
+ return isolate->debug()->NextAsyncTaskId(promise);
+ }
+ return Handle<Smi>::cast(async_task_id)->value();
+}
} // namespace
+void Debug::RunPromiseHook(PromiseHookType type, Handle<JSPromise> promise,
+ Handle<Object> parent) {
+ if (!debug_delegate_) return;
+ int id = GetReferenceAsyncTaskId(isolate_, promise);
+ switch (type) {
+ case PromiseHookType::kInit:
+ OnAsyncTaskEvent(debug::kDebugPromiseCreated, id,
+ parent->IsJSPromise()
+ ? GetReferenceAsyncTaskId(
+ isolate_, Handle<JSPromise>::cast(parent))
+ : 0);
+ return;
+ case PromiseHookType::kResolve:
+ // We can't use this hook because it's called before promise object will
+ // get resolved status.
+ return;
+ case PromiseHookType::kBefore:
+ OnAsyncTaskEvent(debug::kDebugWillHandle, id, 0);
+ return;
+ case PromiseHookType::kAfter:
+ OnAsyncTaskEvent(debug::kDebugDidHandle, id, 0);
+ return;
+ }
+}
+
int Debug::NextAsyncTaskId(Handle<JSObject> promise) {
LookupIterator it(promise, isolate_->factory()->promise_async_id_symbol());
Maybe<bool> maybe = JSReceiver::HasProperty(&it);
@@ -1863,140 +1940,66 @@ int Debug::NextAsyncTaskId(Handle<JSObject> promise) {
return async_id->value();
}
-void Debug::SetAsyncTaskListener(debug::AsyncTaskListener listener,
- void* data) {
- async_task_listener_ = listener;
- async_task_listener_data_ = data;
- UpdateState();
+namespace {
+debug::Location GetDebugLocation(Handle<Script> script, int source_position) {
+ Script::PositionInfo info;
+ Script::GetPositionInfo(script, source_position, &info, Script::WITH_OFFSET);
+ return debug::Location(info.line, info.column);
}
+} // namespace
-void Debug::OnAsyncTaskEvent(debug::PromiseDebugActionType type, int id) {
- if (in_debug_scope() || ignore_events()) return;
-
- if (async_task_listener_) {
- async_task_listener_(type, id, async_task_listener_data_);
- // There are three types of event listeners: C++ message_handler,
- // JavaScript event listener and C++ event listener.
- // Currently inspector still uses C++ event listener and installs
- // more specific event listeners for part of events. Calling of
- // C++ event listener is redundant when more specific event listener
- // is presented. Other clients can install JavaScript event listener
- // (e.g. some of NodeJS module).
- bool non_inspector_listener_exists =
- message_handler_ != nullptr ||
- (event_listener_.is_null() && !event_listener_->IsForeign());
- if (!non_inspector_listener_exists) return;
+bool Debug::IsBlackboxed(Handle<SharedFunctionInfo> shared) {
+ if (!debug_delegate_) return false;
+ if (!shared->computed_debug_is_blackboxed()) {
+ bool is_blackboxed = false;
+ if (shared->script()->IsScript()) {
+ SuppressDebug while_processing(this);
+ HandleScope handle_scope(isolate_);
+ PostponeInterruptsScope no_interrupts(isolate_);
+ DisableBreak no_recursive_break(this);
+ Handle<Script> script(Script::cast(shared->script()));
+ if (script->type() == i::Script::TYPE_NORMAL) {
+ debug::Location start =
+ GetDebugLocation(script, shared->start_position());
+ debug::Location end = GetDebugLocation(script, shared->end_position());
+ is_blackboxed = debug_delegate_->IsFunctionBlackboxed(
+ ToApiHandle<debug::Script>(script), start, end);
+ }
+ }
+ shared->set_debug_is_blackboxed(is_blackboxed);
+ shared->set_computed_debug_is_blackboxed(true);
}
-
- HandleScope scope(isolate_);
- DebugScope debug_scope(this);
- if (debug_scope.failed()) return;
-
- // Create the script collected state object.
- Handle<Object> event_data;
- // Bail out and don't call debugger if exception.
- if (!MakeAsyncTaskEvent(handle(Smi::FromInt(type), isolate_),
- handle(Smi::FromInt(id), isolate_))
- .ToHandle(&event_data))
- return;
-
- // Process debug event.
- ProcessDebugEvent(v8::AsyncTaskEvent, Handle<JSObject>::cast(event_data),
- true);
+ return shared->debug_is_blackboxed();
}
-void Debug::ProcessDebugEvent(v8::DebugEvent event, Handle<JSObject> event_data,
- bool auto_continue) {
+void Debug::OnAsyncTaskEvent(debug::PromiseDebugActionType type, int id,
+ int parent_id) {
+ if (in_debug_scope() || ignore_events()) return;
+ if (!debug_delegate_) return;
+ SuppressDebug while_processing(this);
+ DebugScope debug_scope(isolate_->debug());
+ if (debug_scope.failed()) return;
HandleScope scope(isolate_);
-
- // Create the execution state.
- Handle<Object> exec_state;
- // Bail out and don't call debugger if exception.
- if (!MakeExecutionState().ToHandle(&exec_state)) return;
-
- // First notify the message handler if any.
- if (message_handler_ != NULL) {
- NotifyMessageHandler(event, Handle<JSObject>::cast(exec_state), event_data,
- auto_continue);
- }
- // Notify registered debug event listener. This can be either a C or
- // a JavaScript function. Don't call event listener for v8::Break
- // here, if it's only a debug command -- they will be processed later.
- if ((event != v8::Break || !auto_continue) && !event_listener_.is_null()) {
- CallEventCallback(event, exec_state, event_data, NULL);
- }
-}
-
-
-void Debug::CallEventCallback(v8::DebugEvent event,
- Handle<Object> exec_state,
- Handle<Object> event_data,
- v8::Debug::ClientData* client_data) {
- // Prevent other interrupts from triggering, for example API callbacks,
- // while dispatching event listners.
- PostponeInterruptsScope postpone(isolate_);
- bool previous = in_debug_event_listener_;
- in_debug_event_listener_ = true;
- if (event_listener_->IsForeign()) {
- // Invoke the C debug event listener.
- debug::EventCallback callback = FUNCTION_CAST<debug::EventCallback>(
- Handle<Foreign>::cast(event_listener_)->foreign_address());
- EventDetailsImpl event_details(event,
- Handle<JSObject>::cast(exec_state),
- Handle<JSObject>::cast(event_data),
- event_listener_data_,
- client_data);
- callback(event_details);
- CHECK(!isolate_->has_scheduled_exception());
- } else {
- // Invoke the JavaScript debug event listener.
- DCHECK(event_listener_->IsJSFunction());
- Handle<Object> argv[] = { Handle<Object>(Smi::FromInt(event), isolate_),
- exec_state,
- event_data,
- event_listener_data_ };
- Handle<JSReceiver> global = isolate_->global_proxy();
- MaybeHandle<Object> result =
- Execution::Call(isolate_, Handle<JSFunction>::cast(event_listener_),
- global, arraysize(argv), argv);
- CHECK(!result.is_null()); // Listeners must not throw.
- }
- in_debug_event_listener_ = previous;
+ PostponeInterruptsScope no_interrupts(isolate_);
+ DisableBreak no_recursive_break(this);
+ debug_delegate_->PromiseEventOccurred(type, id, parent_id);
}
-
void Debug::ProcessCompileEvent(v8::DebugEvent event, Handle<Script> script) {
if (ignore_events()) return;
if (script->type() != i::Script::TYPE_NORMAL &&
script->type() != i::Script::TYPE_WASM) {
return;
}
+ if (!debug_delegate_) return;
SuppressDebug while_processing(this);
-
- bool in_nested_debug_scope = in_debug_scope();
- HandleScope scope(isolate_);
DebugScope debug_scope(this);
if (debug_scope.failed()) return;
-
- // Create the compile state object.
- Handle<Object> event_data;
- // Bail out and don't call debugger if exception.
- if (!MakeCompileEvent(script, event).ToHandle(&event_data)) return;
-
- // Don't call NotifyMessageHandler if already in debug scope to avoid running
- // nested command loop.
- if (in_nested_debug_scope) {
- if (event_listener_.is_null()) return;
- // Create the execution state.
- Handle<Object> exec_state;
- // Bail out and don't call debugger if exception.
- if (!MakeExecutionState().ToHandle(&exec_state)) return;
-
- CallEventCallback(event, exec_state, event_data, NULL);
- } else {
- // Process debug event.
- ProcessDebugEvent(event, Handle<JSObject>::cast(event_data), true);
- }
+ HandleScope scope(isolate_);
+ PostponeInterruptsScope postpone(isolate_);
+ DisableBreak no_recursive_break(this);
+ debug_delegate_->ScriptCompiled(ToApiHandle<debug::Script>(script),
+ event != v8::AfterCompile);
}
@@ -2008,170 +2011,46 @@ Handle<Context> Debug::GetDebugContext() {
return handle(*debug_context(), isolate_);
}
-void Debug::NotifyMessageHandler(v8::DebugEvent event,
- Handle<JSObject> exec_state,
- Handle<JSObject> event_data,
- bool auto_continue) {
- // Prevent other interrupts from triggering, for example API callbacks,
- // while dispatching message handler callbacks.
- PostponeInterruptsScope no_interrupts(isolate_);
- DCHECK(is_active_);
- HandleScope scope(isolate_);
- // Process the individual events.
- bool sendEventMessage = false;
- switch (event) {
- case v8::Break:
- sendEventMessage = !auto_continue;
- break;
- case v8::CompileError:
- case v8::AsyncTaskEvent:
- break;
- case v8::Exception:
- case v8::AfterCompile:
- sendEventMessage = true;
- break;
+int Debug::CurrentFrameCount() {
+ StackTraceFrameIterator it(isolate_);
+ if (break_frame_id() != StackFrame::NO_ID) {
+ // Skip to break frame.
+ DCHECK(in_debug_scope());
+ while (!it.done() && it.frame()->id() != break_frame_id()) it.Advance();
}
-
- // The debug command interrupt flag might have been set when the command was
- // added. It should be enough to clear the flag only once while we are in the
- // debugger.
- DCHECK(in_debug_scope());
- isolate_->stack_guard()->ClearDebugCommand();
-
- // Notify the debugger that a debug event has occurred unless auto continue is
- // active in which case no event is send.
- if (sendEventMessage) {
- MessageImpl message = MessageImpl::NewEvent(
- event, auto_continue, Handle<JSObject>::cast(exec_state),
- Handle<JSObject>::cast(event_data));
- InvokeMessageHandler(message);
- }
-
- // If auto continue don't make the event cause a break, but process messages
- // in the queue if any. For script collected events don't even process
- // messages in the queue as the execution state might not be what is expected
- // by the client.
- if (auto_continue && !has_commands()) return;
-
- // DebugCommandProcessor goes here.
- bool running = auto_continue;
-
- Handle<Object> cmd_processor_ctor =
- JSReceiver::GetProperty(isolate_, exec_state, "debugCommandProcessor")
- .ToHandleChecked();
- Handle<Object> ctor_args[] = {isolate_->factory()->ToBoolean(running)};
- Handle<JSReceiver> cmd_processor = Handle<JSReceiver>::cast(
- Execution::Call(isolate_, cmd_processor_ctor, exec_state, 1, ctor_args)
- .ToHandleChecked());
- Handle<JSFunction> process_debug_request = Handle<JSFunction>::cast(
- JSReceiver::GetProperty(isolate_, cmd_processor, "processDebugRequest")
- .ToHandleChecked());
- Handle<Object> is_running =
- JSReceiver::GetProperty(isolate_, cmd_processor, "isRunning")
- .ToHandleChecked();
-
- // Process requests from the debugger.
- do {
- // Wait for new command in the queue.
- command_received_.Wait();
-
- // Get the command from the queue.
- CommandMessage command = command_queue_.Get();
- isolate_->logger()->DebugTag(
- "Got request from command queue, in interactive loop.");
- if (!is_active()) {
- // Delete command text and user data.
- command.Dispose();
- return;
- }
-
- Vector<const uc16> command_text(
- const_cast<const uc16*>(command.text().start()),
- command.text().length());
- Handle<String> request_text = isolate_->factory()
- ->NewStringFromTwoByte(command_text)
- .ToHandleChecked();
- Handle<Object> request_args[] = {request_text};
- Handle<Object> answer_value;
- Handle<String> answer;
- MaybeHandle<Object> maybe_exception;
- MaybeHandle<Object> maybe_result = Execution::TryCall(
- isolate_, process_debug_request, cmd_processor, 1, request_args,
- Execution::MessageHandling::kReport, &maybe_exception);
-
- if (maybe_result.ToHandle(&answer_value)) {
- if (answer_value->IsUndefined(isolate_)) {
- answer = isolate_->factory()->empty_string();
- } else {
- answer = Handle<String>::cast(answer_value);
- }
-
- // Log the JSON request/response.
- if (FLAG_trace_debug_json) {
- PrintF("%s\n", request_text->ToCString().get());
- PrintF("%s\n", answer->ToCString().get());
- }
-
- Handle<Object> is_running_args[] = {answer};
- maybe_result = Execution::Call(isolate_, is_running, cmd_processor, 1,
- is_running_args);
- Handle<Object> result;
- if (!maybe_result.ToHandle(&result)) break;
- running = result->IsTrue(isolate_);
+ int counter = 0;
+ while (!it.done()) {
+ if (it.frame()->is_optimized()) {
+ List<SharedFunctionInfo*> infos;
+ OptimizedFrame::cast(it.frame())->GetFunctions(&infos);
+ counter += infos.length();
} else {
- Handle<Object> exception;
- if (!maybe_exception.ToHandle(&exception)) break;
- Handle<Object> result;
- if (!Object::ToString(isolate_, exception).ToHandle(&result)) break;
- answer = Handle<String>::cast(result);
+ counter++;
}
-
- // Return the result.
- MessageImpl message = MessageImpl::NewResponse(
- event, running, exec_state, event_data, answer, command.client_data());
- InvokeMessageHandler(message);
- command.Dispose();
-
- // Return from debug event processing if either the VM is put into the
- // running state (through a continue command) or auto continue is active
- // and there are no more commands queued.
- } while (!running || has_commands());
- command_queue_.Clear();
-}
-
-void Debug::SetEventListener(Handle<Object> callback,
- Handle<Object> data) {
- GlobalHandles* global_handles = isolate_->global_handles();
-
- // Remove existing entry.
- GlobalHandles::Destroy(event_listener_.location());
- event_listener_ = Handle<Object>();
- GlobalHandles::Destroy(event_listener_data_.location());
- event_listener_data_ = Handle<Object>();
-
- // Set new entry.
- if (!callback->IsNullOrUndefined(isolate_)) {
- event_listener_ = global_handles->Create(*callback);
- if (data.is_null()) data = isolate_->factory()->undefined_value();
- event_listener_data_ = global_handles->Create(*data);
+ it.Advance();
}
+ return counter;
+}
+void Debug::SetDebugDelegate(debug::DebugDelegate* delegate,
+ bool pass_ownership) {
+ RemoveDebugDelegate();
+ debug_delegate_ = delegate;
+ owns_debug_delegate_ = pass_ownership;
UpdateState();
}
-void Debug::SetMessageHandler(v8::Debug::MessageHandler handler) {
- message_handler_ = handler;
- UpdateState();
- if (handler == NULL && in_debug_scope()) {
- // Send an empty command to the debugger if in a break to make JavaScript
- // run again if the debugger is closed.
- EnqueueCommandMessage(Vector<const uint16_t>::empty());
+void Debug::RemoveDebugDelegate() {
+ if (debug_delegate_ == nullptr) return;
+ if (owns_debug_delegate_) {
+ owns_debug_delegate_ = false;
+ delete debug_delegate_;
}
+ debug_delegate_ = nullptr;
}
void Debug::UpdateState() {
- bool is_active = message_handler_ != nullptr || !event_listener_.is_null() ||
- async_task_listener_ != nullptr;
+ bool is_active = debug_delegate_ != nullptr;
if (is_active || in_debug_scope()) {
// Note that the debug context could have already been loaded to
// bootstrap test cases.
@@ -2182,40 +2061,15 @@ void Debug::UpdateState() {
Unload();
}
is_active_ = is_active;
+ isolate_->DebugStateUpdated();
}
void Debug::UpdateHookOnFunctionCall() {
- STATIC_ASSERT(StepFrame > StepIn);
- STATIC_ASSERT(LastStepAction == StepFrame);
- hook_on_function_call_ = thread_local_.last_step_action_ >= StepIn ||
+ STATIC_ASSERT(LastStepAction == StepIn);
+ hook_on_function_call_ = thread_local_.last_step_action_ == StepIn ||
isolate_->needs_side_effect_check();
}
-// Calls the registered debug message handler. This callback is part of the
-// public API.
-void Debug::InvokeMessageHandler(MessageImpl message) {
- if (message_handler_ != NULL) message_handler_(message);
-}
-
-// Puts a command coming from the public API on the queue. Creates
-// a copy of the command string managed by the debugger. Up to this
-// point, the command data was managed by the API client. Called
-// by the API client thread.
-void Debug::EnqueueCommandMessage(Vector<const uint16_t> command,
- v8::Debug::ClientData* client_data) {
- // Need to cast away const.
- CommandMessage message = CommandMessage::New(
- Vector<uint16_t>(const_cast<uint16_t*>(command.start()),
- command.length()),
- client_data);
- isolate_->logger()->DebugTag("Put command on command_queue.");
- command_queue_.Put(message);
- command_received_.Signal();
-
- // Set the debug command break flag to have the command processed.
- if (!in_debug_scope()) isolate_->stack_guard()->RequestDebugCommand();
-}
-
MaybeHandle<Object> Debug::Call(Handle<Object> fun, Handle<Object> data) {
DebugScope debug_scope(this);
if (debug_scope.failed()) return isolate_->factory()->undefined_value();
@@ -2237,6 +2091,8 @@ MaybeHandle<Object> Debug::Call(Handle<Object> fun, Handle<Object> data) {
void Debug::HandleDebugBreak() {
+ // Initialize LiveEdit.
+ LiveEdit::InitializeThreadLocal(this);
// Ignore debug break during bootstrapping.
if (isolate_->bootstrapper()->IsActive()) return;
// Just continue if breaks are disabled.
@@ -2251,8 +2107,21 @@ void Debug::HandleDebugBreak() {
DCHECK(!it.done());
Object* fun = it.frame()->function();
if (fun && fun->IsJSFunction()) {
- // Don't stop in builtin functions.
- if (!JSFunction::cast(fun)->shared()->IsSubjectToDebugging()) return;
+ HandleScope scope(isolate_);
+ // Don't stop in builtin and blackboxed functions.
+ Handle<SharedFunctionInfo> shared(JSFunction::cast(fun)->shared(),
+ isolate_);
+ if (!shared->IsSubjectToDebugging() || IsBlackboxed(shared)) {
+ // Inspector uses pause on next statement for asynchronous breakpoints.
+ // When breakpoint is fired we try to break on first not blackboxed
+ // statement. To achieve this goal we need to deoptimize current
+ // function and don't clear requested DebugBreak even if it's blackboxed
+ // to be able to break on not blackboxed function call.
+ // TODO(yangguo): introduce break_on_function_entry since current
+ // implementation is slow.
+ Deoptimizer::DeoptimizeFunction(JSFunction::cast(fun));
+ return;
+ }
JSGlobalObject* global =
JSFunction::cast(fun)->context()->global_object();
// Don't stop in debugger functions.
@@ -2262,41 +2131,26 @@ void Debug::HandleDebugBreak() {
}
}
- // Collect the break state before clearing the flags.
- bool debug_command_only = isolate_->stack_guard()->CheckDebugCommand() &&
- !isolate_->stack_guard()->CheckDebugBreak();
-
isolate_->stack_guard()->ClearDebugBreak();
// Clear stepping to avoid duplicate breaks.
ClearStepping();
- ProcessDebugMessages(debug_command_only);
-}
-
-void Debug::ProcessDebugMessages(bool debug_command_only) {
- isolate_->stack_guard()->ClearDebugCommand();
-
- StackLimitCheck check(isolate_);
- if (check.HasOverflowed()) return;
-
HandleScope scope(isolate_);
DebugScope debug_scope(this);
if (debug_scope.failed()) return;
- // Notify the debug event listeners. Indicate auto continue if the break was
- // a debug command break.
- OnDebugBreak(isolate_->factory()->undefined_value(), debug_command_only);
+ OnDebugBreak(isolate_->factory()->undefined_value());
}
#ifdef DEBUG
void Debug::PrintBreakLocation() {
if (!FLAG_print_break_location) return;
HandleScope scope(isolate_);
- JavaScriptFrameIterator iterator(isolate_);
+ StackTraceFrameIterator iterator(isolate_);
if (iterator.done()) return;
- JavaScriptFrame* frame = iterator.frame();
- FrameSummary summary = FrameSummary::GetFirst(frame);
+ StandardFrame* frame = iterator.frame();
+ FrameSummary summary = FrameSummary::GetTop(frame);
int source_position = summary.SourcePosition();
Handle<Object> script_obj = summary.script();
PrintF("[debug] break in function '");
@@ -2342,14 +2196,11 @@ DebugScope::DebugScope(Debug* debug)
// Store the previous break id, frame id and return value.
break_id_ = debug_->break_id();
break_frame_id_ = debug_->break_frame_id();
- return_value_ = debug_->return_value();
// Create the new break info. If there is no proper frames there is no break
// frame id.
StackTraceFrameIterator it(isolate());
bool has_frames = !it.done();
- // We don't currently support breaking inside wasm framess.
- DCHECK(!has_frames || !it.is_wasm());
debug_->thread_local_.break_frame_id_ =
has_frames ? it.frame()->id() : StackFrame::NO_ID;
debug_->SetNextBreakId();
@@ -2363,18 +2214,6 @@ DebugScope::DebugScope(Debug* debug)
DebugScope::~DebugScope() {
- if (!failed_ && prev_ == NULL) {
- // Clear mirror cache when leaving the debugger. Skip this if there is a
- // pending exception as clearing the mirror cache calls back into
- // JavaScript. This can happen if the v8::Debug::Call is used in which
- // case the exception should end up in the calling code.
- if (!isolate()->has_pending_exception()) debug_->ClearMirrorCache();
-
- // If there are commands in the queue when leaving the debugger request
- // that these commands are processed.
- if (debug_->has_commands()) isolate()->stack_guard()->RequestDebugCommand();
- }
-
// Leaving this debugger entry.
base::NoBarrier_Store(&debug_->thread_local_.current_debug_scope_,
reinterpret_cast<base::AtomicWord>(prev_));
@@ -2382,11 +2221,18 @@ DebugScope::~DebugScope() {
// Restore to the previous break state.
debug_->thread_local_.break_frame_id_ = break_frame_id_;
debug_->thread_local_.break_id_ = break_id_;
- debug_->thread_local_.return_value_ = return_value_;
debug_->UpdateState();
}
+ReturnValueScope::ReturnValueScope(Debug* debug) : debug_(debug) {
+ return_value_ = debug_->return_value_handle();
+}
+
+ReturnValueScope::~ReturnValueScope() {
+ debug_->set_return_value(*return_value_);
+}
+
bool Debug::PerformSideEffectCheck(Handle<JSFunction> function) {
DCHECK(isolate_->needs_side_effect_check());
DisallowJavascriptExecution no_js(isolate_);
@@ -2415,248 +2261,157 @@ bool Debug::PerformSideEffectCheckForCallback(Address function) {
return false;
}
-NoSideEffectScope::~NoSideEffectScope() {
- if (isolate_->needs_side_effect_check() &&
- isolate_->debug()->side_effect_check_failed_) {
- DCHECK(isolate_->has_pending_exception());
- DCHECK_EQ(isolate_->heap()->termination_exception(),
- isolate_->pending_exception());
- // Convert the termination exception into a regular exception.
- isolate_->CancelTerminateExecution();
- isolate_->Throw(*isolate_->factory()->NewEvalError(
- MessageTemplate::kNoSideEffectDebugEvaluate));
+void LegacyDebugDelegate::PromiseEventOccurred(
+ v8::debug::PromiseDebugActionType type, int id, int parent_id) {
+ Handle<Object> event_data;
+ if (isolate_->debug()->MakeAsyncTaskEvent(type, id).ToHandle(&event_data)) {
+ ProcessDebugEvent(v8::AsyncTaskEvent, Handle<JSObject>::cast(event_data));
}
- isolate_->set_needs_side_effect_check(old_needs_side_effect_check_);
- isolate_->debug()->UpdateHookOnFunctionCall();
- isolate_->debug()->side_effect_check_failed_ = false;
}
-MessageImpl MessageImpl::NewEvent(DebugEvent event, bool running,
- Handle<JSObject> exec_state,
- Handle<JSObject> event_data) {
- MessageImpl message(true, event, running, exec_state, event_data,
- Handle<String>(), NULL);
- return message;
-}
-
-MessageImpl MessageImpl::NewResponse(DebugEvent event, bool running,
- Handle<JSObject> exec_state,
- Handle<JSObject> event_data,
- Handle<String> response_json,
- v8::Debug::ClientData* client_data) {
- MessageImpl message(false, event, running, exec_state, event_data,
- response_json, client_data);
- return message;
-}
-
-MessageImpl::MessageImpl(bool is_event, DebugEvent event, bool running,
- Handle<JSObject> exec_state,
- Handle<JSObject> event_data,
- Handle<String> response_json,
- v8::Debug::ClientData* client_data)
- : is_event_(is_event),
- event_(event),
- running_(running),
- exec_state_(exec_state),
- event_data_(event_data),
- response_json_(response_json),
- client_data_(client_data) {}
-
-bool MessageImpl::IsEvent() const { return is_event_; }
-
-bool MessageImpl::IsResponse() const { return !is_event_; }
-
-DebugEvent MessageImpl::GetEvent() const { return event_; }
-
-bool MessageImpl::WillStartRunning() const { return running_; }
-
-v8::Local<v8::Object> MessageImpl::GetExecutionState() const {
- return v8::Utils::ToLocal(exec_state_);
+void LegacyDebugDelegate::ScriptCompiled(v8::Local<v8::debug::Script> script,
+ bool is_compile_error) {
+ Handle<Object> event_data;
+ v8::DebugEvent event = is_compile_error ? v8::CompileError : v8::AfterCompile;
+ if (isolate_->debug()
+ ->MakeCompileEvent(v8::Utils::OpenHandle(*script), event)
+ .ToHandle(&event_data)) {
+ ProcessDebugEvent(event, Handle<JSObject>::cast(event_data));
+ }
}
-v8::Isolate* MessageImpl::GetIsolate() const {
- return reinterpret_cast<v8::Isolate*>(exec_state_->GetIsolate());
+void LegacyDebugDelegate::BreakProgramRequested(
+ v8::Local<v8::Context> paused_context, v8::Local<v8::Object> exec_state,
+ v8::Local<v8::Value> break_points_hit) {
+ Handle<Object> event_data;
+ if (isolate_->debug()
+ ->MakeBreakEvent(v8::Utils::OpenHandle(*break_points_hit))
+ .ToHandle(&event_data)) {
+ ProcessDebugEvent(
+ v8::Break, Handle<JSObject>::cast(event_data),
+ Handle<JSObject>::cast(v8::Utils::OpenHandle(*exec_state)));
+ }
}
-v8::Local<v8::Object> MessageImpl::GetEventData() const {
- return v8::Utils::ToLocal(event_data_);
+void LegacyDebugDelegate::ExceptionThrown(v8::Local<v8::Context> paused_context,
+ v8::Local<v8::Object> exec_state,
+ v8::Local<v8::Value> exception,
+ v8::Local<v8::Value> promise,
+ bool is_uncaught) {
+ Handle<Object> event_data;
+ if (isolate_->debug()
+ ->MakeExceptionEvent(v8::Utils::OpenHandle(*exception), is_uncaught,
+ v8::Utils::OpenHandle(*promise))
+ .ToHandle(&event_data)) {
+ ProcessDebugEvent(
+ v8::Exception, Handle<JSObject>::cast(event_data),
+ Handle<JSObject>::cast(v8::Utils::OpenHandle(*exec_state)));
+ }
}
-v8::Local<v8::String> MessageImpl::GetJSON() const {
- Isolate* isolate = event_data_->GetIsolate();
- v8::EscapableHandleScope scope(reinterpret_cast<v8::Isolate*>(isolate));
+void LegacyDebugDelegate::ProcessDebugEvent(v8::DebugEvent event,
+ Handle<JSObject> event_data) {
+ Handle<Object> exec_state;
+ if (isolate_->debug()->MakeExecutionState().ToHandle(&exec_state)) {
+ ProcessDebugEvent(event, event_data, Handle<JSObject>::cast(exec_state));
+ }
+}
- if (IsEvent()) {
- // Call toJSONProtocol on the debug event object.
- Handle<Object> fun =
- JSReceiver::GetProperty(isolate, event_data_, "toJSONProtocol")
- .ToHandleChecked();
- if (!fun->IsJSFunction()) {
- return v8::Local<v8::String>();
- }
+JavaScriptDebugDelegate::JavaScriptDebugDelegate(Isolate* isolate,
+ Handle<JSFunction> listener,
+ Handle<Object> data)
+ : LegacyDebugDelegate(isolate) {
+ GlobalHandles* global_handles = isolate->global_handles();
+ listener_ = Handle<JSFunction>::cast(global_handles->Create(*listener));
+ data_ = global_handles->Create(*data);
+}
- MaybeHandle<Object> maybe_exception;
- MaybeHandle<Object> maybe_json = Execution::TryCall(
- isolate, fun, event_data_, 0, nullptr,
- Execution::MessageHandling::kReport, &maybe_exception);
- Handle<Object> json;
- if (!maybe_json.ToHandle(&json) || !json->IsString()) {
- return v8::Local<v8::String>();
- }
- return scope.Escape(v8::Utils::ToLocal(Handle<String>::cast(json)));
- } else {
- return v8::Utils::ToLocal(response_json_);
- }
+JavaScriptDebugDelegate::~JavaScriptDebugDelegate() {
+ GlobalHandles::Destroy(Handle<Object>::cast(listener_).location());
+ GlobalHandles::Destroy(data_.location());
}
-namespace {
-v8::Local<v8::Context> GetDebugEventContext(Isolate* isolate) {
- Handle<Context> context = isolate->debug()->debugger_entry()->GetContext();
- // Isolate::context() may have been NULL when "script collected" event
- // occured.
- if (context.is_null()) return v8::Local<v8::Context>();
- Handle<Context> native_context(context->native_context());
- return v8::Utils::ToLocal(native_context);
+void JavaScriptDebugDelegate::ProcessDebugEvent(v8::DebugEvent event,
+ Handle<JSObject> event_data,
+ Handle<JSObject> exec_state) {
+ Handle<Object> argv[] = {Handle<Object>(Smi::FromInt(event), isolate_),
+ exec_state, event_data, data_};
+ Handle<JSReceiver> global = isolate_->global_proxy();
+ // Listener must not throw.
+ Execution::Call(isolate_, listener_, global, arraysize(argv), argv)
+ .ToHandleChecked();
}
-} // anonymous namespace
-v8::Local<v8::Context> MessageImpl::GetEventContext() const {
- Isolate* isolate = event_data_->GetIsolate();
- v8::Local<v8::Context> context = GetDebugEventContext(isolate);
- // Isolate::context() may be NULL when "script collected" event occurs.
- DCHECK(!context.IsEmpty());
- return context;
+NativeDebugDelegate::NativeDebugDelegate(Isolate* isolate,
+ v8::Debug::EventCallback callback,
+ Handle<Object> data)
+ : LegacyDebugDelegate(isolate), callback_(callback) {
+ data_ = isolate->global_handles()->Create(*data);
}
-v8::Debug::ClientData* MessageImpl::GetClientData() const {
- return client_data_;
+NativeDebugDelegate::~NativeDebugDelegate() {
+ GlobalHandles::Destroy(data_.location());
}
-EventDetailsImpl::EventDetailsImpl(DebugEvent event,
- Handle<JSObject> exec_state,
- Handle<JSObject> event_data,
- Handle<Object> callback_data,
- v8::Debug::ClientData* client_data)
+NativeDebugDelegate::EventDetails::EventDetails(DebugEvent event,
+ Handle<JSObject> exec_state,
+ Handle<JSObject> event_data,
+ Handle<Object> callback_data)
: event_(event),
exec_state_(exec_state),
event_data_(event_data),
- callback_data_(callback_data),
- client_data_(client_data) {}
-
+ callback_data_(callback_data) {}
-DebugEvent EventDetailsImpl::GetEvent() const {
+DebugEvent NativeDebugDelegate::EventDetails::GetEvent() const {
return event_;
}
-
-v8::Local<v8::Object> EventDetailsImpl::GetExecutionState() const {
+v8::Local<v8::Object> NativeDebugDelegate::EventDetails::GetExecutionState()
+ const {
return v8::Utils::ToLocal(exec_state_);
}
-
-v8::Local<v8::Object> EventDetailsImpl::GetEventData() const {
+v8::Local<v8::Object> NativeDebugDelegate::EventDetails::GetEventData() const {
return v8::Utils::ToLocal(event_data_);
}
-
-v8::Local<v8::Context> EventDetailsImpl::GetEventContext() const {
+v8::Local<v8::Context> NativeDebugDelegate::EventDetails::GetEventContext()
+ const {
return GetDebugEventContext(exec_state_->GetIsolate());
}
-
-v8::Local<v8::Value> EventDetailsImpl::GetCallbackData() const {
+v8::Local<v8::Value> NativeDebugDelegate::EventDetails::GetCallbackData()
+ const {
return v8::Utils::ToLocal(callback_data_);
}
-
-v8::Debug::ClientData* EventDetailsImpl::GetClientData() const {
- return client_data_;
-}
-
-v8::Isolate* EventDetailsImpl::GetIsolate() const {
+v8::Isolate* NativeDebugDelegate::EventDetails::GetIsolate() const {
return reinterpret_cast<v8::Isolate*>(exec_state_->GetIsolate());
}
-CommandMessage::CommandMessage()
- : text_(Vector<uint16_t>::empty()), client_data_(NULL) {}
-
-CommandMessage::CommandMessage(const Vector<uint16_t>& text,
- v8::Debug::ClientData* data)
- : text_(text), client_data_(data) {}
-
-void CommandMessage::Dispose() {
- text_.Dispose();
- delete client_data_;
- client_data_ = NULL;
-}
-
-CommandMessage CommandMessage::New(const Vector<uint16_t>& command,
- v8::Debug::ClientData* data) {
- return CommandMessage(command.Clone(), data);
-}
-
-CommandMessageQueue::CommandMessageQueue(int size)
- : start_(0), end_(0), size_(size) {
- messages_ = NewArray<CommandMessage>(size);
-}
-
-CommandMessageQueue::~CommandMessageQueue() {
- while (!IsEmpty()) Get().Dispose();
- DeleteArray(messages_);
-}
-
-CommandMessage CommandMessageQueue::Get() {
- DCHECK(!IsEmpty());
- int result = start_;
- start_ = (start_ + 1) % size_;
- return messages_[result];
+void NativeDebugDelegate::ProcessDebugEvent(v8::DebugEvent event,
+ Handle<JSObject> event_data,
+ Handle<JSObject> exec_state) {
+ EventDetails event_details(event, exec_state, event_data, data_);
+ Isolate* isolate = isolate_;
+ callback_(event_details);
+ CHECK(!isolate->has_scheduled_exception());
}
-void CommandMessageQueue::Put(const CommandMessage& message) {
- if ((end_ + 1) % size_ == start_) {
- Expand();
- }
- messages_[end_] = message;
- end_ = (end_ + 1) % size_;
-}
-
-void CommandMessageQueue::Expand() {
- CommandMessageQueue new_queue(size_ * 2);
- while (!IsEmpty()) {
- new_queue.Put(Get());
+NoSideEffectScope::~NoSideEffectScope() {
+ if (isolate_->needs_side_effect_check() &&
+ isolate_->debug()->side_effect_check_failed_) {
+ DCHECK(isolate_->has_pending_exception());
+ DCHECK_EQ(isolate_->heap()->termination_exception(),
+ isolate_->pending_exception());
+ // Convert the termination exception into a regular exception.
+ isolate_->CancelTerminateExecution();
+ isolate_->Throw(*isolate_->factory()->NewEvalError(
+ MessageTemplate::kNoSideEffectDebugEvaluate));
}
- CommandMessage* array_to_free = messages_;
- *this = new_queue;
- new_queue.messages_ = array_to_free;
- // Make the new_queue empty so that it doesn't call Dispose on any messages.
- new_queue.start_ = new_queue.end_;
- // Automatic destructor called on new_queue, freeing array_to_free.
-}
-
-LockingCommandMessageQueue::LockingCommandMessageQueue(Logger* logger, int size)
- : logger_(logger), queue_(size) {}
-
-bool LockingCommandMessageQueue::IsEmpty() const {
- base::LockGuard<base::Mutex> lock_guard(&mutex_);
- return queue_.IsEmpty();
-}
-
-CommandMessage LockingCommandMessageQueue::Get() {
- base::LockGuard<base::Mutex> lock_guard(&mutex_);
- CommandMessage result = queue_.Get();
- logger_->DebugEvent("Get", result.text());
- return result;
-}
-
-void LockingCommandMessageQueue::Put(const CommandMessage& message) {
- base::LockGuard<base::Mutex> lock_guard(&mutex_);
- queue_.Put(message);
- logger_->DebugEvent("Put", message.text());
-}
-
-void LockingCommandMessageQueue::Clear() {
- base::LockGuard<base::Mutex> lock_guard(&mutex_);
- queue_.Clear();
+ isolate_->set_needs_side_effect_check(old_needs_side_effect_check_);
+ isolate_->debug()->UpdateHookOnFunctionCall();
+ isolate_->debug()->side_effect_check_failed_ = false;
}
} // namespace internal
diff --git a/deps/v8/src/debug/debug.h b/deps/v8/src/debug/debug.h
index b3bb3c46a6..43338d7f0a 100644
--- a/deps/v8/src/debug/debug.h
+++ b/deps/v8/src/debug/debug.h
@@ -6,7 +6,6 @@
#define V8_DEBUG_DEBUG_H_
#include "src/allocation.h"
-#include "src/arguments.h"
#include "src/assembler.h"
#include "src/base/atomicops.h"
#include "src/base/hashmap.h"
@@ -40,9 +39,7 @@ enum StepAction : int8_t {
StepNext = 1, // Step to the next statement in the current function.
StepIn = 2, // Step into new functions invoked or the next statement
// in the current function.
- StepFrame = 3, // Step into a new frame or return to previous frame.
-
- LastStepAction = StepFrame
+ LastStepAction = StepIn
};
// Type of exception break. NOTE: These values are in macros.py as well.
@@ -52,10 +49,6 @@ enum ExceptionBreakType {
};
-// Type of exception break.
-enum BreakLocatorType { ALL_BREAK_LOCATIONS, CALLS_AND_RETURNS };
-
-
// The different types of breakpoint position alignments.
// Must match Debug.BreakPositionAlignment in debug.js
enum BreakPositionAlignment {
@@ -72,12 +65,10 @@ enum DebugBreakType {
DEBUG_BREAK_SLOT_AT_TAIL_CALL,
};
-const int kDebugPromiseNoID = 0;
-const int kDebugPromiseFirstID = 1;
-
class BreakLocation {
public:
- static BreakLocation FromFrame(StandardFrame* frame);
+ static BreakLocation FromFrame(Handle<DebugInfo> debug_info,
+ JavaScriptFrame* frame);
static void AllAtCurrentStatement(Handle<DebugInfo> debug_info,
JavaScriptFrame* frame,
@@ -126,8 +117,7 @@ class BreakLocation {
class BreakIterator {
public:
static std::unique_ptr<BreakIterator> GetIterator(
- Handle<DebugInfo> debug_info, Handle<AbstractCode> abstract_code,
- BreakLocatorType type = ALL_BREAK_LOCATIONS);
+ Handle<DebugInfo> debug_info, Handle<AbstractCode> abstract_code);
virtual ~BreakIterator() {}
@@ -149,8 +139,7 @@ class BreakIterator {
virtual void SetDebugBreak() = 0;
protected:
- explicit BreakIterator(Handle<DebugInfo> debug_info,
- BreakLocatorType break_locator_type);
+ explicit BreakIterator(Handle<DebugInfo> debug_info);
int BreakIndexFromPosition(int position, BreakPositionAlignment alignment);
@@ -160,7 +149,6 @@ class BreakIterator {
int break_index_;
int position_;
int statement_position_;
- BreakLocatorType break_locator_type_;
private:
DisallowHeapAllocation no_gc_;
@@ -169,7 +157,7 @@ class BreakIterator {
class CodeBreakIterator : public BreakIterator {
public:
- CodeBreakIterator(Handle<DebugInfo> debug_info, BreakLocatorType type);
+ explicit CodeBreakIterator(Handle<DebugInfo> debug_info);
~CodeBreakIterator() override {}
BreakLocation GetBreakLocation() override;
@@ -188,7 +176,7 @@ class CodeBreakIterator : public BreakIterator {
}
private:
- int GetModeMask(BreakLocatorType type);
+ int GetModeMask();
DebugBreakType GetDebugBreakType();
RelocInfo::Mode rmode() { return reloc_iterator_.rinfo()->rmode(); }
@@ -201,8 +189,7 @@ class CodeBreakIterator : public BreakIterator {
class BytecodeArrayBreakIterator : public BreakIterator {
public:
- BytecodeArrayBreakIterator(Handle<DebugInfo> debug_info,
- BreakLocatorType type);
+ explicit BytecodeArrayBreakIterator(Handle<DebugInfo> debug_info);
~BytecodeArrayBreakIterator() override {}
BreakLocation GetBreakLocation() override;
@@ -243,135 +230,6 @@ class DebugInfoListNode {
DebugInfoListNode* next_;
};
-// Message delivered to the message handler callback. This is either a debugger
-// event or the response to a command.
-class MessageImpl : public v8::Debug::Message {
- public:
- // Create a message object for a debug event.
- static MessageImpl NewEvent(DebugEvent event, bool running,
- Handle<JSObject> exec_state,
- Handle<JSObject> event_data);
-
- // Create a message object for the response to a debug command.
- static MessageImpl NewResponse(DebugEvent event, bool running,
- Handle<JSObject> exec_state,
- Handle<JSObject> event_data,
- Handle<String> response_json,
- v8::Debug::ClientData* client_data);
-
- // Implementation of interface v8::Debug::Message.
- virtual bool IsEvent() const;
- virtual bool IsResponse() const;
- virtual DebugEvent GetEvent() const;
- virtual bool WillStartRunning() const;
- virtual v8::Local<v8::Object> GetExecutionState() const;
- virtual v8::Local<v8::Object> GetEventData() const;
- virtual v8::Local<v8::String> GetJSON() const;
- virtual v8::Local<v8::Context> GetEventContext() const;
- virtual v8::Debug::ClientData* GetClientData() const;
- virtual v8::Isolate* GetIsolate() const;
-
- private:
- MessageImpl(bool is_event, DebugEvent event, bool running,
- Handle<JSObject> exec_state, Handle<JSObject> event_data,
- Handle<String> response_json, v8::Debug::ClientData* client_data);
-
- bool is_event_; // Does this message represent a debug event?
- DebugEvent event_; // Debug event causing the break.
- bool running_; // Will the VM start running after this event?
- Handle<JSObject> exec_state_; // Current execution state.
- Handle<JSObject> event_data_; // Data associated with the event.
- Handle<String> response_json_; // Response JSON if message holds a response.
- v8::Debug::ClientData* client_data_; // Client data passed with the request.
-};
-
-// Details of the debug event delivered to the debug event listener.
-class EventDetailsImpl : public debug::EventDetails {
- public:
- EventDetailsImpl(DebugEvent event,
- Handle<JSObject> exec_state,
- Handle<JSObject> event_data,
- Handle<Object> callback_data,
- v8::Debug::ClientData* client_data);
- virtual DebugEvent GetEvent() const;
- virtual v8::Local<v8::Object> GetExecutionState() const;
- virtual v8::Local<v8::Object> GetEventData() const;
- virtual v8::Local<v8::Context> GetEventContext() const;
- virtual v8::Local<v8::Value> GetCallbackData() const;
- virtual v8::Debug::ClientData* GetClientData() const;
- virtual v8::Isolate* GetIsolate() const;
-
- private:
- DebugEvent event_; // Debug event causing the break.
- Handle<JSObject> exec_state_; // Current execution state.
- Handle<JSObject> event_data_; // Data associated with the event.
- Handle<Object> callback_data_; // User data passed with the callback
- // when it was registered.
- v8::Debug::ClientData* client_data_; // Data passed to DebugBreakForCommand.
-};
-
-// Message send by user to v8 debugger or debugger output message.
-// In addition to command text it may contain a pointer to some user data
-// which are expected to be passed along with the command reponse to message
-// handler.
-class CommandMessage {
- public:
- static CommandMessage New(const Vector<uint16_t>& command,
- v8::Debug::ClientData* data);
- CommandMessage();
-
- // Deletes user data and disposes of the text.
- void Dispose();
- Vector<uint16_t> text() const { return text_; }
- v8::Debug::ClientData* client_data() const { return client_data_; }
-
- private:
- CommandMessage(const Vector<uint16_t>& text, v8::Debug::ClientData* data);
-
- Vector<uint16_t> text_;
- v8::Debug::ClientData* client_data_;
-};
-
-// A Queue of CommandMessage objects. A thread-safe version is
-// LockingCommandMessageQueue, based on this class.
-class CommandMessageQueue BASE_EMBEDDED {
- public:
- explicit CommandMessageQueue(int size);
- ~CommandMessageQueue();
- bool IsEmpty() const { return start_ == end_; }
- CommandMessage Get();
- void Put(const CommandMessage& message);
- void Clear() { start_ = end_ = 0; } // Queue is empty after Clear().
-
- private:
- // Doubles the size of the message queue, and copies the messages.
- void Expand();
-
- CommandMessage* messages_;
- int start_;
- int end_;
- int size_; // The size of the queue buffer. Queue can hold size-1 messages.
-};
-
-// LockingCommandMessageQueue is a thread-safe circular buffer of CommandMessage
-// messages. The message data is not managed by LockingCommandMessageQueue.
-// Pointers to the data are passed in and out. Implemented by adding a
-// Mutex to CommandMessageQueue. Includes logging of all puts and gets.
-class LockingCommandMessageQueue BASE_EMBEDDED {
- public:
- LockingCommandMessageQueue(Logger* logger, int size);
- bool IsEmpty() const;
- CommandMessage Get();
- void Put(const CommandMessage& message);
- void Clear();
-
- private:
- Logger* logger_;
- CommandMessageQueue queue_;
- mutable base::Mutex mutex_;
- DISALLOW_COPY_AND_ASSIGN(LockingCommandMessageQueue);
-};
-
class DebugFeatureTracker {
public:
enum Feature {
@@ -404,29 +262,23 @@ class DebugFeatureTracker {
class Debug {
public:
// Debug event triggers.
- void OnDebugBreak(Handle<Object> break_points_hit, bool auto_continue);
+ void OnDebugBreak(Handle<Object> break_points_hit);
void OnThrow(Handle<Object> exception);
void OnPromiseReject(Handle<Object> promise, Handle<Object> value);
void OnCompileError(Handle<Script> script);
void OnAfterCompile(Handle<Script> script);
- void OnAsyncTaskEvent(debug::PromiseDebugActionType type, int id);
+ void OnAsyncTaskEvent(debug::PromiseDebugActionType type, int id,
+ int parent_id);
- // API facing.
- void SetEventListener(Handle<Object> callback, Handle<Object> data);
- void SetMessageHandler(v8::Debug::MessageHandler handler);
- void EnqueueCommandMessage(Vector<const uint16_t> command,
- v8::Debug::ClientData* client_data = NULL);
MUST_USE_RESULT MaybeHandle<Object> Call(Handle<Object> fun,
Handle<Object> data);
Handle<Context> GetDebugContext();
void HandleDebugBreak();
- void ProcessDebugMessages(bool debug_command_only);
// Internal logic
bool Load();
void Break(JavaScriptFrame* frame);
- void SetAfterBreakTarget(JavaScriptFrame* frame);
// Scripts handling.
Handle<FixedArray> GetLoadedScripts();
@@ -464,15 +316,17 @@ class Debug {
void RecordGenerator(Handle<JSGeneratorObject> generator_object);
+ void RunPromiseHook(PromiseHookType type, Handle<JSPromise> promise,
+ Handle<Object> parent);
+
int NextAsyncTaskId(Handle<JSObject> promise);
- void SetAsyncTaskListener(debug::AsyncTaskListener listener, void* data);
+ bool IsBlackboxed(Handle<SharedFunctionInfo> shared);
+
+ void SetDebugDelegate(debug::DebugDelegate* delegate, bool pass_ownership);
- // Returns whether the operation succeeded. Compilation can only be triggered
- // if a valid closure is passed as the second argument, otherwise the shared
- // function needs to be compiled already.
- bool EnsureDebugInfo(Handle<SharedFunctionInfo> shared,
- Handle<JSFunction> function);
+ // Returns whether the operation succeeded.
+ bool EnsureDebugInfo(Handle<SharedFunctionInfo> shared);
void CreateDebugInfo(Handle<SharedFunctionInfo> shared);
static Handle<DebugInfo> GetDebugInfo(Handle<SharedFunctionInfo> shared);
@@ -494,8 +348,9 @@ class Debug {
bool IsBreakAtReturn(JavaScriptFrame* frame);
// Support for LiveEdit
- void FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
- LiveEditFrameDropMode mode);
+ void ScheduleFrameRestart(StackFrame* frame);
+
+ bool IsFrameBlackboxed(JavaScriptFrame* frame);
// Threading support.
char* ArchiveDebug(char* to);
@@ -538,10 +393,11 @@ class Debug {
StackFrame::Id break_frame_id() { return thread_local_.break_frame_id_; }
int break_id() { return thread_local_.break_id_; }
- Handle<Object> return_value() { return thread_local_.return_value_; }
- void set_return_value(Handle<Object> value) {
- thread_local_.return_value_ = value;
+ Handle<Object> return_value_handle() {
+ return handle(thread_local_.return_value_, isolate_);
}
+ Object* return_value() { return thread_local_.return_value_; }
+ void set_return_value(Object* value) { thread_local_.return_value_ = value; }
// Support for embedding into generated code.
Address is_active_address() {
@@ -552,10 +408,6 @@ class Debug {
return reinterpret_cast<Address>(&hook_on_function_call_);
}
- Address after_break_target_address() {
- return reinterpret_cast<Address>(&after_break_target_);
- }
-
Address last_step_action_address() {
return reinterpret_cast<Address>(&thread_local_.last_step_action_);
}
@@ -564,28 +416,33 @@ class Debug {
return reinterpret_cast<Address>(&thread_local_.suspended_generator_);
}
+ Address restart_fp_address() {
+ return reinterpret_cast<Address>(&thread_local_.restart_fp_);
+ }
+
StepAction last_step_action() { return thread_local_.last_step_action_; }
DebugFeatureTracker* feature_tracker() { return &feature_tracker_; }
private:
explicit Debug(Isolate* isolate);
+ ~Debug() { DCHECK_NULL(debug_delegate_); }
void UpdateState();
void UpdateHookOnFunctionCall();
+ void RemoveDebugDelegate();
void Unload();
void SetNextBreakId() {
thread_local_.break_id_ = ++thread_local_.break_count_;
}
- // Check whether there are commands in the command queue.
- inline bool has_commands() const { return !command_queue_.IsEmpty(); }
+ // Return the number of virtual frames below debugger entry.
+ int CurrentFrameCount();
+
inline bool ignore_events() const {
return is_suppressed_ || !is_active_ || isolate_->needs_side_effect_check();
}
- inline bool break_disabled() const {
- return break_disabled_ || in_debug_event_listener_;
- }
+ inline bool break_disabled() const { return break_disabled_; }
void clear_suspended_generator() {
thread_local_.suspended_generator_ = Smi::kZero;
@@ -595,6 +452,8 @@ class Debug {
return thread_local_.suspended_generator_ != Smi::kZero;
}
+ bool IsExceptionBlackboxed(bool uncaught);
+
void OnException(Handle<Object> exception, Handle<Object> promise);
// Constructors for debug event objects.
@@ -607,22 +466,11 @@ class Debug {
Handle<Object> promise);
MUST_USE_RESULT MaybeHandle<Object> MakeCompileEvent(
Handle<Script> script, v8::DebugEvent type);
- MUST_USE_RESULT MaybeHandle<Object> MakeAsyncTaskEvent(Handle<Smi> type,
- Handle<Smi> id);
-
- // Mirror cache handling.
- void ClearMirrorCache();
+ MUST_USE_RESULT MaybeHandle<Object> MakeAsyncTaskEvent(
+ v8::debug::PromiseDebugActionType type, int id);
- void CallEventCallback(v8::DebugEvent event,
- Handle<Object> exec_state,
- Handle<Object> event_data,
- v8::Debug::ClientData* client_data);
void ProcessCompileEvent(v8::DebugEvent event, Handle<Script> script);
- void ProcessDebugEvent(v8::DebugEvent event, Handle<JSObject> event_data,
- bool auto_continue);
- void NotifyMessageHandler(v8::DebugEvent event, Handle<JSObject> exec_state,
- Handle<JSObject> event_data, bool auto_continue);
- void InvokeMessageHandler(MessageImpl message);
+ void ProcessDebugEvent(v8::DebugEvent event, Handle<JSObject> event_data);
// Find the closest source position for a break point for a given position.
int FindBreakablePosition(Handle<DebugInfo> debug_info, int source_position,
@@ -634,8 +482,7 @@ class Debug {
// Clear all code from instrumentation.
void ClearAllBreakPoints();
// Instrument a function with one-shots.
- void FloodWithOneShot(Handle<JSFunction> function,
- BreakLocatorType type = ALL_BREAK_LOCATIONS);
+ void FloodWithOneShot(Handle<SharedFunctionInfo> function);
// Clear all one-shot instrumentations, but restore break points.
void ClearOneShot();
@@ -660,17 +507,9 @@ class Debug {
// Global handles.
Handle<Context> debug_context_;
- Handle<Object> event_listener_;
- Handle<Object> event_listener_data_;
- v8::Debug::MessageHandler message_handler_;
-
- debug::AsyncTaskListener async_task_listener_ = nullptr;
- void* async_task_listener_data_ = nullptr;
-
- static const int kQueueInitialSize = 4;
- base::Semaphore command_received_; // Signaled for each command received.
- LockingCommandMessageQueue command_queue_;
+ debug::DebugDelegate* debug_delegate_ = nullptr;
+ bool owns_debug_delegate_ = false;
// Debugger is active, i.e. there is a debug event listener attached.
bool is_active_;
@@ -685,8 +524,6 @@ class Debug {
bool break_disabled_;
// Do not break on break points.
bool break_points_active_;
- // Nested inside a debug event listener.
- bool in_debug_event_listener_;
// Trigger debug break events for all exceptions.
bool break_on_exception_;
// Trigger debug break events for uncaught exceptions.
@@ -697,11 +534,6 @@ class Debug {
// List of active debug info objects.
DebugInfoListNode* debug_info_list_;
- // Storage location for jump when exiting debug break calls.
- // Note that this address is not GC safe. It should be computed immediately
- // before returning to the DebugBreakCallHelper.
- Address after_break_target_;
-
// Used to collect histogram data on debugger feature usage.
DebugFeatureTracker feature_tracker_;
@@ -727,21 +559,20 @@ class Debug {
int last_statement_position_;
// Frame pointer from last step next or step frame action.
- Address last_fp_;
+ int last_frame_count_;
// Frame pointer of the target frame we want to arrive at.
- Address target_fp_;
+ int target_frame_count_;
- // Stores the way how LiveEdit has patched the stack. It is used when
- // debugger returns control back to user script.
- LiveEditFrameDropMode frame_drop_mode_;
-
- // Value of accumulator in interpreter frames. In non-interpreter frames
- // this value will be the hole.
- Handle<Object> return_value_;
+ // Value of the accumulator at the point of entering the debugger.
+ Object* return_value_;
+ // The suspended generator object to track when stepping.
Object* suspended_generator_;
+ // The new frame pointer to drop to when restarting a frame.
+ Address restart_fp_;
+
int async_task_count_;
};
@@ -756,6 +587,7 @@ class Debug {
friend class LiveEdit;
friend class SuppressDebug;
friend class NoSideEffectScope;
+ friend class LegacyDebugDelegate;
friend Handle<FixedArray> GetDebuggedFunctions(); // In test-debug.cc
friend void CheckDebuggerUnloaded(bool check_functions); // In test-debug.cc
@@ -763,6 +595,84 @@ class Debug {
DISALLOW_COPY_AND_ASSIGN(Debug);
};
+class LegacyDebugDelegate : public v8::debug::DebugDelegate {
+ public:
+ explicit LegacyDebugDelegate(Isolate* isolate) : isolate_(isolate) {}
+ void PromiseEventOccurred(v8::debug::PromiseDebugActionType type, int id,
+ int parent_id) override;
+ void ScriptCompiled(v8::Local<v8::debug::Script> script,
+ bool has_compile_error) override;
+ void BreakProgramRequested(v8::Local<v8::Context> paused_context,
+ v8::Local<v8::Object> exec_state,
+ v8::Local<v8::Value> break_points_hit) override;
+ void ExceptionThrown(v8::Local<v8::Context> paused_context,
+ v8::Local<v8::Object> exec_state,
+ v8::Local<v8::Value> exception,
+ v8::Local<v8::Value> promise, bool is_uncaught) override;
+ bool IsFunctionBlackboxed(v8::Local<v8::debug::Script> script,
+ const v8::debug::Location& start,
+ const v8::debug::Location& end) override {
+ return false;
+ }
+
+ protected:
+ Isolate* isolate_;
+
+ private:
+ void ProcessDebugEvent(v8::DebugEvent event, Handle<JSObject> event_data);
+ virtual void ProcessDebugEvent(v8::DebugEvent event,
+ Handle<JSObject> event_data,
+ Handle<JSObject> exec_state) = 0;
+};
+
+class JavaScriptDebugDelegate : public LegacyDebugDelegate {
+ public:
+ JavaScriptDebugDelegate(Isolate* isolate, Handle<JSFunction> listener,
+ Handle<Object> data);
+ virtual ~JavaScriptDebugDelegate();
+
+ private:
+ void ProcessDebugEvent(v8::DebugEvent event, Handle<JSObject> event_data,
+ Handle<JSObject> exec_state) override;
+
+ Handle<JSFunction> listener_;
+ Handle<Object> data_;
+};
+
+class NativeDebugDelegate : public LegacyDebugDelegate {
+ public:
+ NativeDebugDelegate(Isolate* isolate, v8::Debug::EventCallback callback,
+ Handle<Object> data);
+ virtual ~NativeDebugDelegate();
+
+ private:
+ // Details of the debug event delivered to the debug event listener.
+ class EventDetails : public v8::Debug::EventDetails {
+ public:
+ EventDetails(DebugEvent event, Handle<JSObject> exec_state,
+ Handle<JSObject> event_data, Handle<Object> callback_data);
+ virtual DebugEvent GetEvent() const;
+ virtual v8::Local<v8::Object> GetExecutionState() const;
+ virtual v8::Local<v8::Object> GetEventData() const;
+ virtual v8::Local<v8::Context> GetEventContext() const;
+ virtual v8::Local<v8::Value> GetCallbackData() const;
+ virtual v8::Debug::ClientData* GetClientData() const { return nullptr; }
+ virtual v8::Isolate* GetIsolate() const;
+
+ private:
+ DebugEvent event_; // Debug event causing the break.
+ Handle<JSObject> exec_state_; // Current execution state.
+ Handle<JSObject> event_data_; // Data associated with the event.
+ Handle<Object> callback_data_; // User data passed with the callback
+ // when it was registered.
+ };
+
+ void ProcessDebugEvent(v8::DebugEvent event, Handle<JSObject> event_data,
+ Handle<JSObject> exec_state) override;
+
+ v8::Debug::EventCallback callback_;
+ Handle<Object> data_;
+};
// This scope is used to load and enter the debug context and create a new
// break state. Leaving the scope will restore the previous state.
@@ -785,32 +695,39 @@ class DebugScope BASE_EMBEDDED {
DebugScope* prev_; // Previous scope if entered recursively.
StackFrame::Id break_frame_id_; // Previous break frame id.
int break_id_; // Previous break id.
- Handle<Object> return_value_; // Previous result.
bool failed_; // Did the debug context fail to load?
SaveContext save_; // Saves previous context.
PostponeInterruptsScope no_termination_exceptons_;
};
+// This scope is used to handle return values in nested debug break points.
+// When there are nested debug breaks, we use this to restore the return
+// value to the previous state. This is not merged with DebugScope because
+// return_value_ will not be cleared when we use DebugScope.
+class ReturnValueScope {
+ public:
+ explicit ReturnValueScope(Debug* debug);
+ ~ReturnValueScope();
+
+ private:
+ Debug* debug_;
+ Handle<Object> return_value_; // Previous result.
+};
// Stack allocated class for disabling break.
class DisableBreak BASE_EMBEDDED {
public:
explicit DisableBreak(Debug* debug)
- : debug_(debug),
- previous_break_disabled_(debug->break_disabled_),
- previous_in_debug_event_listener_(debug->in_debug_event_listener_) {
+ : debug_(debug), previous_break_disabled_(debug->break_disabled_) {
debug_->break_disabled_ = true;
- debug_->in_debug_event_listener_ = true;
}
~DisableBreak() {
debug_->break_disabled_ = previous_break_disabled_;
- debug_->in_debug_event_listener_ = previous_in_debug_event_listener_;
}
private:
Debug* debug_;
bool previous_break_disabled_;
- bool previous_in_debug_event_listener_;
DISALLOW_COPY_AND_ASSIGN(DisableBreak);
};
@@ -858,14 +775,14 @@ class DebugCodegen : public AllStatic {
static void GenerateDebugBreakStub(MacroAssembler* masm,
DebugBreakCallHelperMode mode);
- // FrameDropper is a code replacement for a JavaScript frame with possibly
- // several frames above.
- // There is no calling conventions here, because it never actually gets
- // called, it only gets returned to.
- static void GenerateFrameDropperLiveEdit(MacroAssembler* masm);
+ static void GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode);
+ // Builtin to drop frames to restart function.
+ static void GenerateFrameDropperTrampoline(MacroAssembler* masm);
- static void GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode);
+ // Builtin to atomically (wrt deopts) handle debugger statement and
+ // drop frames to restart function if necessary.
+ static void GenerateHandleDebuggerStatement(MacroAssembler* masm);
static void PatchDebugBreakSlot(Isolate* isolate, Address pc,
Handle<Code> code);
diff --git a/deps/v8/src/debug/debug.js b/deps/v8/src/debug/debug.js
index 512bedb69f..6993274f09 100644
--- a/deps/v8/src/debug/debug.js
+++ b/deps/v8/src/debug/debug.js
@@ -12,21 +12,11 @@ var FrameMirror = global.FrameMirror;
var GlobalArray = global.Array;
var GlobalRegExp = global.RegExp;
var IsNaN = global.isNaN;
-var JSONParse = global.JSON.parse;
-var JSONStringify = global.JSON.stringify;
-var LookupMirror = global.LookupMirror;
var MakeMirror = global.MakeMirror;
-var MakeMirrorSerializer = global.MakeMirrorSerializer;
var MathMin = global.Math.min;
var Mirror = global.Mirror;
-var MirrorType;
-var ParseInt = global.parseInt;
var ValueMirror = global.ValueMirror;
-utils.Import(function(from) {
- MirrorType = from.MirrorType;
-});
-
//----------------------------------------------------------------------------
// Default number of frames to include in the response to backtrace request.
@@ -54,8 +44,7 @@ Debug.ExceptionBreak = { Caught : 0,
// The different types of steps.
Debug.StepAction = { StepOut: 0,
StepNext: 1,
- StepIn: 2,
- StepFrame: 3 };
+ StepIn: 2 };
// The different types of scripts matching enum ScriptType in objects.h.
Debug.ScriptType = { Native: 0,
@@ -831,8 +820,7 @@ function ExecutionState(break_id) {
ExecutionState.prototype.prepareStep = function(action) {
if (action === Debug.StepAction.StepIn ||
action === Debug.StepAction.StepOut ||
- action === Debug.StepAction.StepNext ||
- action === Debug.StepAction.StepFrame) {
+ action === Debug.StepAction.StepNext) {
return %PrepareStep(this.break_id, action);
}
throw %make_type_error(kDebuggerType);
@@ -867,11 +855,6 @@ ExecutionState.prototype.selectedFrame = function() {
return this.selected_frame;
};
-ExecutionState.prototype.debugCommandProcessor = function(opt_is_running) {
- return new DebugCommandProcessor(this, opt_is_running);
-};
-
-
function MakeBreakEvent(break_id, break_points_hit) {
return new BreakEvent(break_id, break_points_hit);
}
@@ -913,43 +896,6 @@ BreakEvent.prototype.breakPointsHit = function() {
};
-BreakEvent.prototype.toJSONProtocol = function() {
- var o = { seq: next_response_seq++,
- type: "event",
- event: "break",
- body: { invocationText: this.frame_.invocationText() }
- };
-
- // Add script related information to the event if available.
- var script = this.func().script();
- if (script) {
- o.body.sourceLine = this.sourceLine(),
- o.body.sourceColumn = this.sourceColumn(),
- o.body.sourceLineText = this.sourceLineText(),
- o.body.script = MakeScriptObject_(script, false);
- }
-
- // Add an Array of break points hit if any.
- if (this.breakPointsHit()) {
- o.body.breakpoints = [];
- for (var i = 0; i < this.breakPointsHit().length; i++) {
- // Find the break point number. For break points originating from a
- // script break point supply the script break point number.
- var breakpoint = this.breakPointsHit()[i];
- var script_break_point = breakpoint.script_break_point();
- var number;
- if (script_break_point) {
- number = script_break_point.number();
- } else {
- number = breakpoint.number();
- }
- o.body.breakpoints.push(number);
- }
- }
- return JSONStringify(ObjectToProtocolObject_(o));
-};
-
-
function MakeExceptionEvent(break_id, exception, uncaught, promise) {
return new ExceptionEvent(break_id, exception, uncaught, promise);
}
@@ -1003,32 +949,6 @@ ExceptionEvent.prototype.sourceLineText = function() {
};
-ExceptionEvent.prototype.toJSONProtocol = function() {
- var o = new ProtocolMessage();
- o.event = "exception";
- o.body = { uncaught: this.uncaught_,
- exception: MakeMirror(this.exception_)
- };
-
- // Exceptions might happen whithout any JavaScript frames.
- if (this.exec_state_.frameCount() > 0) {
- o.body.sourceLine = this.sourceLine();
- o.body.sourceColumn = this.sourceColumn();
- o.body.sourceLineText = this.sourceLineText();
-
- // Add script information to the event if available.
- var script = this.func().script();
- if (script) {
- o.body.script = MakeScriptObject_(script, false);
- }
- } else {
- o.body.sourceLine = -1;
- }
-
- return o.toJSONProtocol();
-};
-
-
function MakeCompileEvent(script, type) {
return new CompileEvent(script, type);
}
@@ -1050,27 +970,6 @@ CompileEvent.prototype.script = function() {
};
-CompileEvent.prototype.toJSONProtocol = function() {
- var o = new ProtocolMessage();
- o.running = true;
- switch (this.type_) {
- case Debug.DebugEvent.BeforeCompile:
- o.event = "beforeCompile";
- break;
- case Debug.DebugEvent.AfterCompile:
- o.event = "afterCompile";
- break;
- case Debug.DebugEvent.CompileError:
- o.event = "compileError";
- break;
- }
- o.body = {};
- o.body.script = this.script_;
-
- return o.toJSONProtocol();
-};
-
-
function MakeScriptObject_(script, include_source) {
var o = { id: script.id(),
name: script.name(),
@@ -1108,1273 +1007,11 @@ AsyncTaskEvent.prototype.id = function() {
return this.id_;
}
-
-function DebugCommandProcessor(exec_state, opt_is_running) {
- this.exec_state_ = exec_state;
- this.running_ = opt_is_running || false;
-}
-
-
-DebugCommandProcessor.prototype.processDebugRequest = function (request) {
- return this.processDebugJSONRequest(request);
-};
-
-
-function ProtocolMessage(request) {
- // Update sequence number.
- this.seq = next_response_seq++;
-
- if (request) {
- // If message is based on a request this is a response. Fill the initial
- // response from the request.
- this.type = 'response';
- this.request_seq = request.seq;
- this.command = request.command;
- } else {
- // If message is not based on a request it is a dabugger generated event.
- this.type = 'event';
- }
- this.success = true;
- // Handler may set this field to control debugger state.
- this.running = UNDEFINED;
-}
-
-
-ProtocolMessage.prototype.setOption = function(name, value) {
- if (!this.options_) {
- this.options_ = {};
- }
- this.options_[name] = value;
-};
-
-
-ProtocolMessage.prototype.failed = function(message, opt_details) {
- this.success = false;
- this.message = message;
- if (IS_OBJECT(opt_details)) {
- this.error_details = opt_details;
- }
-};
-
-
-ProtocolMessage.prototype.toJSONProtocol = function() {
- // Encode the protocol header.
- var json = {};
- json.seq= this.seq;
- if (this.request_seq) {
- json.request_seq = this.request_seq;
- }
- json.type = this.type;
- if (this.event) {
- json.event = this.event;
- }
- if (this.command) {
- json.command = this.command;
- }
- if (this.success) {
- json.success = this.success;
- } else {
- json.success = false;
- }
- if (this.body) {
- // Encode the body part.
- var bodyJson;
- var serializer = MakeMirrorSerializer(true, this.options_);
- if (this.body instanceof Mirror) {
- bodyJson = serializer.serializeValue(this.body);
- } else if (this.body instanceof GlobalArray) {
- bodyJson = [];
- for (var i = 0; i < this.body.length; i++) {
- if (this.body[i] instanceof Mirror) {
- bodyJson.push(serializer.serializeValue(this.body[i]));
- } else {
- bodyJson.push(ObjectToProtocolObject_(this.body[i], serializer));
- }
- }
- } else {
- bodyJson = ObjectToProtocolObject_(this.body, serializer);
- }
- json.body = bodyJson;
- json.refs = serializer.serializeReferencedObjects();
- }
- if (this.message) {
- json.message = this.message;
- }
- if (this.error_details) {
- json.error_details = this.error_details;
- }
- json.running = this.running;
- return JSONStringify(json);
-};
-
-
-DebugCommandProcessor.prototype.createResponse = function(request) {
- return new ProtocolMessage(request);
-};
-
-
-DebugCommandProcessor.prototype.processDebugJSONRequest = function(
- json_request) {
- var request; // Current request.
- var response; // Generated response.
- try {
- try {
- // Convert the JSON string to an object.
- request = JSONParse(json_request);
-
- // Create an initial response.
- response = this.createResponse(request);
-
- if (!request.type) {
- throw %make_error(kDebugger, 'Type not specified');
- }
-
- if (request.type != 'request') {
- throw %make_error(kDebugger,
- "Illegal type '" + request.type + "' in request");
- }
-
- if (!request.command) {
- throw %make_error(kDebugger, 'Command not specified');
- }
-
- if (request.arguments) {
- var args = request.arguments;
- // TODO(yurys): remove request.arguments.compactFormat check once
- // ChromeDevTools are switched to 'inlineRefs'
- if (args.inlineRefs || args.compactFormat) {
- response.setOption('inlineRefs', true);
- }
- if (!IS_UNDEFINED(args.maxStringLength)) {
- response.setOption('maxStringLength', args.maxStringLength);
- }
- }
-
- var key = request.command.toLowerCase();
- var handler = DebugCommandProcessor.prototype.dispatch_[key];
- if (IS_FUNCTION(handler)) {
- %_Call(handler, this, request, response);
- } else {
- throw %make_error(kDebugger,
- 'Unknown command "' + request.command + '" in request');
- }
- } catch (e) {
- // If there is no response object created one (without command).
- if (!response) {
- response = this.createResponse();
- }
- response.success = false;
- response.message = TO_STRING(e);
- }
-
- // Return the response as a JSON encoded string.
- try {
- if (!IS_UNDEFINED(response.running)) {
- // Response controls running state.
- this.running_ = response.running;
- }
- response.running = this.running_;
- return response.toJSONProtocol();
- } catch (e) {
- // Failed to generate response - return generic error.
- return '{"seq":' + response.seq + ',' +
- '"request_seq":' + request.seq + ',' +
- '"type":"response",' +
- '"success":false,' +
- '"message":"Internal error: ' + TO_STRING(e) + '"}';
- }
- } catch (e) {
- // Failed in one of the catch blocks above - most generic error.
- return '{"seq":0,"type":"response","success":false,"message":"Internal error"}';
- }
-};
-
-
-DebugCommandProcessor.prototype.continueRequest_ = function(request, response) {
- // Check for arguments for continue.
- if (request.arguments) {
- var action = Debug.StepAction.StepIn;
-
- // Pull out arguments.
- var stepaction = request.arguments.stepaction;
-
- // Get the stepaction argument.
- if (stepaction) {
- if (stepaction == 'in') {
- action = Debug.StepAction.StepIn;
- } else if (stepaction == 'next') {
- action = Debug.StepAction.StepNext;
- } else if (stepaction == 'out') {
- action = Debug.StepAction.StepOut;
- } else {
- throw %make_error(kDebugger,
- 'Invalid stepaction argument "' + stepaction + '".');
- }
- }
-
- // Set up the VM for stepping.
- this.exec_state_.prepareStep(action);
- }
-
- // VM should be running after executing this request.
- response.running = true;
-};
-
-
-DebugCommandProcessor.prototype.breakRequest_ = function(request, response) {
- // Ignore as break command does not do anything when broken.
-};
-
-
-DebugCommandProcessor.prototype.setBreakPointRequest_ =
- function(request, response) {
- // Check for legal request.
- if (!request.arguments) {
- response.failed('Missing arguments');
- return;
- }
-
- // Pull out arguments.
- var type = request.arguments.type;
- var target = request.arguments.target;
- var line = request.arguments.line;
- var column = request.arguments.column;
- var enabled = IS_UNDEFINED(request.arguments.enabled) ?
- true : request.arguments.enabled;
- var condition = request.arguments.condition;
- var groupId = request.arguments.groupId;
-
- // Check for legal arguments.
- if (!type || IS_UNDEFINED(target)) {
- response.failed('Missing argument "type" or "target"');
- return;
- }
-
- // Either function or script break point.
- var break_point_number;
- if (type == 'function') {
- // Handle function break point.
- if (!IS_STRING(target)) {
- response.failed('Argument "target" is not a string value');
- return;
- }
- var f;
- try {
- // Find the function through a global evaluate.
- f = this.exec_state_.evaluateGlobal(target).value();
- } catch (e) {
- response.failed('Error: "' + TO_STRING(e) +
- '" evaluating "' + target + '"');
- return;
- }
- if (!IS_FUNCTION(f)) {
- response.failed('"' + target + '" does not evaluate to a function');
- return;
- }
-
- // Set function break point.
- break_point_number = Debug.setBreakPoint(f, line, column, condition);
- } else if (type == 'handle') {
- // Find the object pointed by the specified handle.
- var handle = ParseInt(target, 10);
- var mirror = LookupMirror(handle);
- if (!mirror) {
- return response.failed('Object #' + handle + '# not found');
- }
- if (!mirror.isFunction()) {
- return response.failed('Object #' + handle + '# is not a function');
- }
-
- // Set function break point.
- break_point_number = Debug.setBreakPoint(mirror.value(),
- line, column, condition);
- } else if (type == 'script') {
- // set script break point.
- break_point_number =
- Debug.setScriptBreakPointByName(target, line, column, condition,
- groupId);
- } else if (type == 'scriptId') {
- break_point_number =
- Debug.setScriptBreakPointById(target, line, column, condition, groupId);
- } else if (type == 'scriptRegExp') {
- break_point_number =
- Debug.setScriptBreakPointByRegExp(target, line, column, condition,
- groupId);
- } else {
- response.failed('Illegal type "' + type + '"');
- return;
- }
-
- // Set additional break point properties.
- var break_point = Debug.findBreakPoint(break_point_number);
- if (!enabled) {
- Debug.disableBreakPoint(break_point_number);
- }
-
- // Add the break point number to the response.
- response.body = { type: type,
- breakpoint: break_point_number };
-
- // Add break point information to the response.
- if (break_point instanceof ScriptBreakPoint) {
- if (break_point.type() == Debug.ScriptBreakPointType.ScriptId) {
- response.body.type = 'scriptId';
- response.body.script_id = break_point.script_id();
- } else if (break_point.type() == Debug.ScriptBreakPointType.ScriptName) {
- response.body.type = 'scriptName';
- response.body.script_name = break_point.script_name();
- } else if (break_point.type() == Debug.ScriptBreakPointType.ScriptRegExp) {
- response.body.type = 'scriptRegExp';
- response.body.script_regexp = break_point.script_regexp_object().source;
- } else {
- throw %make_error(kDebugger,
- "Unexpected breakpoint type: " + break_point.type());
- }
- response.body.line = break_point.line();
- response.body.column = break_point.column();
- response.body.actual_locations = break_point.actual_locations();
- } else {
- response.body.type = 'function';
- response.body.actual_locations = [break_point.actual_location];
- }
-};
-
-
-DebugCommandProcessor.prototype.changeBreakPointRequest_ = function(
- request, response) {
- // Check for legal request.
- if (!request.arguments) {
- response.failed('Missing arguments');
- return;
- }
-
- // Pull out arguments.
- var break_point = TO_NUMBER(request.arguments.breakpoint);
- var enabled = request.arguments.enabled;
- var condition = request.arguments.condition;
-
- // Check for legal arguments.
- if (!break_point) {
- response.failed('Missing argument "breakpoint"');
- return;
- }
-
- // Change enabled state if supplied.
- if (!IS_UNDEFINED(enabled)) {
- if (enabled) {
- Debug.enableBreakPoint(break_point);
- } else {
- Debug.disableBreakPoint(break_point);
- }
- }
-
- // Change condition if supplied
- if (!IS_UNDEFINED(condition)) {
- Debug.changeBreakPointCondition(break_point, condition);
- }
-};
-
-
-DebugCommandProcessor.prototype.clearBreakPointGroupRequest_ = function(
- request, response) {
- // Check for legal request.
- if (!request.arguments) {
- response.failed('Missing arguments');
- return;
- }
-
- // Pull out arguments.
- var group_id = request.arguments.groupId;
-
- // Check for legal arguments.
- if (!group_id) {
- response.failed('Missing argument "groupId"');
- return;
- }
-
- var cleared_break_points = [];
- var new_script_break_points = [];
- for (var i = 0; i < script_break_points.length; i++) {
- var next_break_point = script_break_points[i];
- if (next_break_point.groupId() == group_id) {
- cleared_break_points.push(next_break_point.number());
- next_break_point.clear();
- } else {
- new_script_break_points.push(next_break_point);
- }
- }
- script_break_points = new_script_break_points;
-
- // Add the cleared break point numbers to the response.
- response.body = { breakpoints: cleared_break_points };
-};
-
-
-DebugCommandProcessor.prototype.clearBreakPointRequest_ = function(
- request, response) {
- // Check for legal request.
- if (!request.arguments) {
- response.failed('Missing arguments');
- return;
- }
-
- // Pull out arguments.
- var break_point = TO_NUMBER(request.arguments.breakpoint);
-
- // Check for legal arguments.
- if (!break_point) {
- response.failed('Missing argument "breakpoint"');
- return;
- }
-
- // Clear break point.
- Debug.clearBreakPoint(break_point);
-
- // Add the cleared break point number to the response.
- response.body = { breakpoint: break_point };
-};
-
-
-DebugCommandProcessor.prototype.listBreakpointsRequest_ = function(
- request, response) {
- var array = [];
- for (var i = 0; i < script_break_points.length; i++) {
- var break_point = script_break_points[i];
-
- var description = {
- number: break_point.number(),
- line: break_point.line(),
- column: break_point.column(),
- groupId: break_point.groupId(),
- active: break_point.active(),
- condition: break_point.condition(),
- actual_locations: break_point.actual_locations()
- };
-
- if (break_point.type() == Debug.ScriptBreakPointType.ScriptId) {
- description.type = 'scriptId';
- description.script_id = break_point.script_id();
- } else if (break_point.type() == Debug.ScriptBreakPointType.ScriptName) {
- description.type = 'scriptName';
- description.script_name = break_point.script_name();
- } else if (break_point.type() == Debug.ScriptBreakPointType.ScriptRegExp) {
- description.type = 'scriptRegExp';
- description.script_regexp = break_point.script_regexp_object().source;
- } else {
- throw %make_error(kDebugger,
- "Unexpected breakpoint type: " + break_point.type());
- }
- array.push(description);
- }
-
- response.body = {
- breakpoints: array,
- breakOnExceptions: Debug.isBreakOnException(),
- breakOnUncaughtExceptions: Debug.isBreakOnUncaughtException()
- };
-};
-
-
-DebugCommandProcessor.prototype.disconnectRequest_ =
- function(request, response) {
- Debug.disableAllBreakPoints();
- this.continueRequest_(request, response);
-};
-
-
-DebugCommandProcessor.prototype.setExceptionBreakRequest_ =
- function(request, response) {
- // Check for legal request.
- if (!request.arguments) {
- response.failed('Missing arguments');
- return;
- }
-
- // Pull out and check the 'type' argument:
- var type = request.arguments.type;
- if (!type) {
- response.failed('Missing argument "type"');
- return;
- }
-
- // Initialize the default value of enable:
- var enabled;
- if (type == 'all') {
- enabled = !Debug.isBreakOnException();
- } else if (type == 'uncaught') {
- enabled = !Debug.isBreakOnUncaughtException();
- }
-
- // Pull out and check the 'enabled' argument if present:
- if (!IS_UNDEFINED(request.arguments.enabled)) {
- enabled = request.arguments.enabled;
- if ((enabled != true) && (enabled != false)) {
- response.failed('Illegal value for "enabled":"' + enabled + '"');
- }
- }
-
- // Now set the exception break state:
- if (type == 'all') {
- %ChangeBreakOnException(Debug.ExceptionBreak.Caught, enabled);
- } else if (type == 'uncaught') {
- %ChangeBreakOnException(Debug.ExceptionBreak.Uncaught, enabled);
- } else {
- response.failed('Unknown "type":"' + type + '"');
- }
-
- // Add the cleared break point number to the response.
- response.body = { 'type': type, 'enabled': enabled };
-};
-
-
-DebugCommandProcessor.prototype.backtraceRequest_ = function(
- request, response) {
- // Get the number of frames.
- var total_frames = this.exec_state_.frameCount();
-
- // Create simple response if there are no frames.
- if (total_frames == 0) {
- response.body = {
- totalFrames: total_frames
- };
- return;
- }
-
- // Default frame range to include in backtrace.
- var from_index = 0;
- var to_index = kDefaultBacktraceLength;
-
- // Get the range from the arguments.
- if (request.arguments) {
- if (request.arguments.fromFrame) {
- from_index = request.arguments.fromFrame;
- }
- if (request.arguments.toFrame) {
- to_index = request.arguments.toFrame;
- }
- if (request.arguments.bottom) {
- var tmp_index = total_frames - from_index;
- from_index = total_frames - to_index;
- to_index = tmp_index;
- }
- if (from_index < 0 || to_index < 0) {
- return response.failed('Invalid frame number');
- }
- }
-
- // Adjust the index.
- to_index = MathMin(total_frames, to_index);
-
- if (to_index <= from_index) {
- var error = 'Invalid frame range';
- return response.failed(error);
- }
-
- // Create the response body.
- var frames = [];
- for (var i = from_index; i < to_index; i++) {
- frames.push(this.exec_state_.frame(i));
- }
- response.body = {
- fromFrame: from_index,
- toFrame: to_index,
- totalFrames: total_frames,
- frames: frames
- };
-};
-
-
-DebugCommandProcessor.prototype.frameRequest_ = function(request, response) {
- // No frames no source.
- if (this.exec_state_.frameCount() == 0) {
- return response.failed('No frames');
- }
-
- // With no arguments just keep the selected frame.
- if (request.arguments) {
- var index = request.arguments.number;
- if (index < 0 || this.exec_state_.frameCount() <= index) {
- return response.failed('Invalid frame number');
- }
-
- this.exec_state_.setSelectedFrame(request.arguments.number);
- }
- response.body = this.exec_state_.frame();
-};
-
-
-DebugCommandProcessor.prototype.resolveFrameFromScopeDescription_ =
- function(scope_description) {
- // Get the frame for which the scope or scopes are requested.
- // With no frameNumber argument use the currently selected frame.
- if (scope_description && !IS_UNDEFINED(scope_description.frameNumber)) {
- var frame_index = scope_description.frameNumber;
- if (frame_index < 0 || this.exec_state_.frameCount() <= frame_index) {
- throw %make_type_error(kDebuggerFrame);
- }
- return this.exec_state_.frame(frame_index);
- } else {
- return this.exec_state_.frame();
- }
-};
-
-
-// Gets scope host object from request. It is either a function
-// ('functionHandle' argument must be specified) or a stack frame
-// ('frameNumber' may be specified and the current frame is taken by default).
-DebugCommandProcessor.prototype.resolveScopeHolder_ =
- function(scope_description) {
- if (scope_description && "functionHandle" in scope_description) {
- if (!IS_NUMBER(scope_description.functionHandle)) {
- throw %make_error(kDebugger, 'Function handle must be a number');
- }
- var function_mirror = LookupMirror(scope_description.functionHandle);
- if (!function_mirror) {
- throw %make_error(kDebugger, 'Failed to find function object by handle');
- }
- if (!function_mirror.isFunction()) {
- throw %make_error(kDebugger,
- 'Value of non-function type is found by handle');
- }
- return function_mirror;
- } else {
- // No frames no scopes.
- if (this.exec_state_.frameCount() == 0) {
- throw %make_error(kDebugger, 'No scopes');
- }
-
- // Get the frame for which the scopes are requested.
- var frame = this.resolveFrameFromScopeDescription_(scope_description);
- return frame;
- }
-}
-
-
-DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) {
- var scope_holder = this.resolveScopeHolder_(request.arguments);
-
- // Fill all scopes for this frame or function.
- var total_scopes = scope_holder.scopeCount();
- var scopes = [];
- for (var i = 0; i < total_scopes; i++) {
- scopes.push(scope_holder.scope(i));
- }
- response.body = {
- fromScope: 0,
- toScope: total_scopes,
- totalScopes: total_scopes,
- scopes: scopes
- };
-};
-
-
-DebugCommandProcessor.prototype.scopeRequest_ = function(request, response) {
- // Get the frame or function for which the scope is requested.
- var scope_holder = this.resolveScopeHolder_(request.arguments);
-
- // With no scope argument just return top scope.
- var scope_index = 0;
- if (request.arguments && !IS_UNDEFINED(request.arguments.number)) {
- scope_index = TO_NUMBER(request.arguments.number);
- if (scope_index < 0 || scope_holder.scopeCount() <= scope_index) {
- return response.failed('Invalid scope number');
- }
- }
-
- response.body = scope_holder.scope(scope_index);
-};
-
-
-// Reads value from protocol description. Description may be in form of type
-// (for singletons), raw value (primitive types supported in JSON),
-// string value description plus type (for primitive values) or handle id.
-// Returns raw value or throws exception.
-DebugCommandProcessor.resolveValue_ = function(value_description) {
- if ("handle" in value_description) {
- var value_mirror = LookupMirror(value_description.handle);
- if (!value_mirror) {
- throw %make_error(kDebugger, "Failed to resolve value by handle, ' #" +
- value_description.handle + "# not found");
- }
- return value_mirror.value();
- } else if ("stringDescription" in value_description) {
- if (value_description.type == MirrorType.BOOLEAN_TYPE) {
- return TO_BOOLEAN(value_description.stringDescription);
- } else if (value_description.type == MirrorType.NUMBER_TYPE) {
- return TO_NUMBER(value_description.stringDescription);
- } if (value_description.type == MirrorType.STRING_TYPE) {
- return TO_STRING(value_description.stringDescription);
- } else {
- throw %make_error(kDebugger, "Unknown type");
- }
- } else if ("value" in value_description) {
- return value_description.value;
- } else if (value_description.type == MirrorType.UNDEFINED_TYPE) {
- return UNDEFINED;
- } else if (value_description.type == MirrorType.NULL_TYPE) {
- return null;
- } else {
- throw %make_error(kDebugger, "Failed to parse value description");
- }
-};
-
-
-DebugCommandProcessor.prototype.setVariableValueRequest_ =
- function(request, response) {
- if (!request.arguments) {
- response.failed('Missing arguments');
- return;
- }
-
- if (IS_UNDEFINED(request.arguments.name)) {
- response.failed('Missing variable name');
- }
- var variable_name = request.arguments.name;
-
- var scope_description = request.arguments.scope;
-
- // Get the frame or function for which the scope is requested.
- var scope_holder = this.resolveScopeHolder_(scope_description);
-
- if (IS_UNDEFINED(scope_description.number)) {
- response.failed('Missing scope number');
- }
- var scope_index = TO_NUMBER(scope_description.number);
-
- var scope = scope_holder.scope(scope_index);
-
- var new_value =
- DebugCommandProcessor.resolveValue_(request.arguments.newValue);
-
- scope.setVariableValue(variable_name, new_value);
-
- var new_value_mirror = MakeMirror(new_value);
-
- response.body = {
- newValue: new_value_mirror
- };
-};
-
-
-DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
- if (!request.arguments) {
- return response.failed('Missing arguments');
- }
-
- // Pull out arguments.
- var expression = request.arguments.expression;
- var frame = request.arguments.frame;
- var global = request.arguments.global;
-
- // The expression argument could be an integer so we convert it to a
- // string.
- try {
- expression = TO_STRING(expression);
- } catch(e) {
- return response.failed('Failed to convert expression argument to string');
- }
-
- // Check for legal arguments.
- if (!IS_UNDEFINED(frame) && global) {
- return response.failed('Arguments "frame" and "global" are exclusive');
- }
-
- // Global evaluate.
- if (global) {
- // Evaluate in the native context.
- response.body = this.exec_state_.evaluateGlobal(expression);
- return;
- }
-
- // No frames no evaluate in frame.
- if (this.exec_state_.frameCount() == 0) {
- return response.failed('No frames');
- }
-
- // Check whether a frame was specified.
- if (!IS_UNDEFINED(frame)) {
- var frame_number = TO_NUMBER(frame);
- if (frame_number < 0 || frame_number >= this.exec_state_.frameCount()) {
- return response.failed('Invalid frame "' + frame + '"');
- }
- // Evaluate in the specified frame.
- response.body = this.exec_state_.frame(frame_number).evaluate(expression);
- return;
- } else {
- // Evaluate in the selected frame.
- response.body = this.exec_state_.frame().evaluate(expression);
- return;
- }
-};
-
-
-DebugCommandProcessor.prototype.lookupRequest_ = function(request, response) {
- if (!request.arguments) {
- return response.failed('Missing arguments');
- }
-
- // Pull out arguments.
- var handles = request.arguments.handles;
-
- // Check for legal arguments.
- if (IS_UNDEFINED(handles)) {
- return response.failed('Argument "handles" missing');
- }
-
- // Set 'includeSource' option for script lookup.
- if (!IS_UNDEFINED(request.arguments.includeSource)) {
- var includeSource = TO_BOOLEAN(request.arguments.includeSource);
- response.setOption('includeSource', includeSource);
- }
-
- // Lookup handles.
- var mirrors = {};
- for (var i = 0; i < handles.length; i++) {
- var handle = handles[i];
- var mirror = LookupMirror(handle);
- if (!mirror) {
- return response.failed('Object #' + handle + '# not found');
- }
- mirrors[handle] = mirror;
- }
- response.body = mirrors;
-};
-
-
-DebugCommandProcessor.prototype.referencesRequest_ =
- function(request, response) {
- if (!request.arguments) {
- return response.failed('Missing arguments');
- }
-
- // Pull out arguments.
- var type = request.arguments.type;
- var handle = request.arguments.handle;
-
- // Check for legal arguments.
- if (IS_UNDEFINED(type)) {
- return response.failed('Argument "type" missing');
- }
- if (IS_UNDEFINED(handle)) {
- return response.failed('Argument "handle" missing');
- }
- if (type != 'referencedBy' && type != 'constructedBy') {
- return response.failed('Invalid type "' + type + '"');
- }
-
- // Lookup handle and return objects with references the object.
- var mirror = LookupMirror(handle);
- if (mirror) {
- if (type == 'referencedBy') {
- response.body = mirror.referencedBy();
- } else {
- response.body = mirror.constructedBy();
- }
- } else {
- return response.failed('Object #' + handle + '# not found');
- }
-};
-
-
-DebugCommandProcessor.prototype.sourceRequest_ = function(request, response) {
- // No frames no source.
- if (this.exec_state_.frameCount() == 0) {
- return response.failed('No source');
- }
-
- var from_line;
- var to_line;
- var frame = this.exec_state_.frame();
- if (request.arguments) {
- // Pull out arguments.
- from_line = request.arguments.fromLine;
- to_line = request.arguments.toLine;
-
- if (!IS_UNDEFINED(request.arguments.frame)) {
- var frame_number = TO_NUMBER(request.arguments.frame);
- if (frame_number < 0 || frame_number >= this.exec_state_.frameCount()) {
- return response.failed('Invalid frame "' + frame + '"');
- }
- frame = this.exec_state_.frame(frame_number);
- }
- }
-
- // Get the script selected.
- var script = frame.func().script();
- if (!script) {
- return response.failed('No source');
- }
-
- var raw_script = script.value();
-
- // Sanitize arguments and remove line offset.
- var line_offset = raw_script.line_offset;
- var line_count = %ScriptLineCount(raw_script);
- from_line = IS_UNDEFINED(from_line) ? 0 : from_line - line_offset;
- to_line = IS_UNDEFINED(to_line) ? line_count : to_line - line_offset;
-
- if (from_line < 0) from_line = 0;
- if (to_line > line_count) to_line = line_count;
-
- if (from_line >= line_count || to_line < 0 || from_line > to_line) {
- return response.failed('Invalid line interval');
- }
-
- // Fill in the response.
-
- response.body = {};
- response.body.fromLine = from_line + line_offset;
- response.body.toLine = to_line + line_offset;
- response.body.fromPosition = %ScriptLineStartPosition(raw_script, from_line);
- response.body.toPosition =
- (to_line == 0) ? 0 : %ScriptLineEndPosition(raw_script, to_line - 1);
- response.body.totalLines = %ScriptLineCount(raw_script);
-
- response.body.source = %_SubString(raw_script.source,
- response.body.fromPosition,
- response.body.toPosition);
-};
-
-
-DebugCommandProcessor.prototype.scriptsRequest_ = function(request, response) {
- var types = ScriptTypeFlag(Debug.ScriptType.Normal);
- var includeSource = false;
- var idsToInclude = null;
- if (request.arguments) {
- // Pull out arguments.
- if (!IS_UNDEFINED(request.arguments.types)) {
- types = TO_NUMBER(request.arguments.types);
- if (IsNaN(types) || types < 0) {
- return response.failed('Invalid types "' +
- request.arguments.types + '"');
- }
- }
-
- if (!IS_UNDEFINED(request.arguments.includeSource)) {
- includeSource = TO_BOOLEAN(request.arguments.includeSource);
- response.setOption('includeSource', includeSource);
- }
-
- if (IS_ARRAY(request.arguments.ids)) {
- idsToInclude = {};
- var ids = request.arguments.ids;
- for (var i = 0; i < ids.length; i++) {
- idsToInclude[ids[i]] = true;
- }
- }
-
- var filterStr = null;
- var filterNum = null;
- if (!IS_UNDEFINED(request.arguments.filter)) {
- var num = TO_NUMBER(request.arguments.filter);
- if (!IsNaN(num)) {
- filterNum = num;
- }
- filterStr = request.arguments.filter;
- }
- }
-
- // Collect all scripts in the heap.
- var scripts = Debug.scripts();
-
- response.body = [];
-
- for (var i = 0; i < scripts.length; i++) {
- if (idsToInclude && !idsToInclude[scripts[i].id]) {
- continue;
- }
- if (filterStr || filterNum) {
- var script = scripts[i];
- var found = false;
- if (filterNum && !found) {
- if (script.id && script.id === filterNum) {
- found = true;
- }
- }
- if (filterStr && !found) {
- if (script.name && script.name.indexOf(filterStr) >= 0) {
- found = true;
- }
- }
- if (!found) continue;
- }
- if (types & ScriptTypeFlag(scripts[i].type)) {
- response.body.push(MakeMirror(scripts[i]));
- }
- }
-};
-
-
-DebugCommandProcessor.prototype.suspendRequest_ = function(request, response) {
- response.running = false;
-};
-
-
-// TODO(5510): remove this.
-DebugCommandProcessor.prototype.versionRequest_ = function(request, response) {
- response.body = {
- V8Version: %GetV8Version()
- };
-};
-
-
-DebugCommandProcessor.prototype.changeLiveRequest_ = function(
- request, response) {
- if (!request.arguments) {
- return response.failed('Missing arguments');
- }
- var script_id = request.arguments.script_id;
- var preview_only = !!request.arguments.preview_only;
-
- var the_script = scriptById(script_id);
- if (!the_script) {
- response.failed('Script not found');
- return;
- }
-
- var change_log = new GlobalArray();
-
- if (!IS_STRING(request.arguments.new_source)) {
- throw "new_source argument expected";
- }
-
- var new_source = request.arguments.new_source;
-
- var result_description;
- try {
- result_description = Debug.LiveEdit.SetScriptSource(the_script,
- new_source, preview_only, change_log);
- } catch (e) {
- if (e instanceof Debug.LiveEdit.Failure && "details" in e) {
- response.failed(e.message, e.details);
- return;
- }
- throw e;
- }
- response.body = {change_log: change_log, result: result_description};
-
- if (!preview_only && !this.running_ && result_description.stack_modified) {
- response.body.stepin_recommended = true;
- }
-};
-
-
-DebugCommandProcessor.prototype.restartFrameRequest_ = function(
- request, response) {
- if (!request.arguments) {
- return response.failed('Missing arguments');
- }
- var frame = request.arguments.frame;
-
- // No frames to evaluate in frame.
- if (this.exec_state_.frameCount() == 0) {
- return response.failed('No frames');
- }
-
- var frame_mirror;
- // Check whether a frame was specified.
- if (!IS_UNDEFINED(frame)) {
- var frame_number = TO_NUMBER(frame);
- if (frame_number < 0 || frame_number >= this.exec_state_.frameCount()) {
- return response.failed('Invalid frame "' + frame + '"');
- }
- // Restart specified frame.
- frame_mirror = this.exec_state_.frame(frame_number);
- } else {
- // Restart selected frame.
- frame_mirror = this.exec_state_.frame();
- }
-
- var result_description = frame_mirror.restart();
- response.body = {result: result_description};
-};
-
-
-DebugCommandProcessor.prototype.debuggerFlagsRequest_ = function(request,
- response) {
- // Check for legal request.
- if (!request.arguments) {
- response.failed('Missing arguments');
- return;
- }
-
- // Pull out arguments.
- var flags = request.arguments.flags;
-
- response.body = { flags: [] };
- if (!IS_UNDEFINED(flags)) {
- for (var i = 0; i < flags.length; i++) {
- var name = flags[i].name;
- var debugger_flag = debugger_flags[name];
- if (!debugger_flag) {
- continue;
- }
- if ('value' in flags[i]) {
- debugger_flag.setValue(flags[i].value);
- }
- response.body.flags.push({ name: name, value: debugger_flag.getValue() });
- }
- } else {
- for (var name in debugger_flags) {
- var value = debugger_flags[name].getValue();
- response.body.flags.push({ name: name, value: value });
- }
- }
-};
-
-
-DebugCommandProcessor.prototype.v8FlagsRequest_ = function(request, response) {
- var flags = request.arguments.flags;
- if (!flags) flags = '';
- %SetFlags(flags);
-};
-
-
-DebugCommandProcessor.prototype.gcRequest_ = function(request, response) {
- var type = request.arguments.type;
- if (!type) type = 'all';
-
- var before = %GetHeapUsage();
- %CollectGarbage(type);
- var after = %GetHeapUsage();
-
- response.body = { "before": before, "after": after };
-};
-
-
-DebugCommandProcessor.prototype.dispatch_ = (function() {
- var proto = DebugCommandProcessor.prototype;
- return {
- "continue": proto.continueRequest_,
- "break" : proto.breakRequest_,
- "setbreakpoint" : proto.setBreakPointRequest_,
- "changebreakpoint": proto.changeBreakPointRequest_,
- "clearbreakpoint": proto.clearBreakPointRequest_,
- "clearbreakpointgroup": proto.clearBreakPointGroupRequest_,
- "disconnect": proto.disconnectRequest_,
- "setexceptionbreak": proto.setExceptionBreakRequest_,
- "listbreakpoints": proto.listBreakpointsRequest_,
- "backtrace": proto.backtraceRequest_,
- "frame": proto.frameRequest_,
- "scopes": proto.scopesRequest_,
- "scope": proto.scopeRequest_,
- "setvariablevalue": proto.setVariableValueRequest_,
- "evaluate": proto.evaluateRequest_,
- "lookup": proto.lookupRequest_,
- "references": proto.referencesRequest_,
- "source": proto.sourceRequest_,
- "scripts": proto.scriptsRequest_,
- "suspend": proto.suspendRequest_,
- "version": proto.versionRequest_,
- "changelive": proto.changeLiveRequest_,
- "restartframe": proto.restartFrameRequest_,
- "flags": proto.debuggerFlagsRequest_,
- "v8flag": proto.v8FlagsRequest_,
- "gc": proto.gcRequest_,
- };
-})();
-
-
-// Check whether the previously processed command caused the VM to become
-// running.
-DebugCommandProcessor.prototype.isRunning = function() {
- return this.running_;
-};
-
-
-DebugCommandProcessor.prototype.systemBreak = function(cmd, args) {
- return %SystemBreak();
-};
-
-
-/**
- * Convert an Object to its debugger protocol representation. The representation
- * may be serilized to a JSON object using JSON.stringify().
- * This implementation simply runs through all string property names, converts
- * each property value to a protocol value and adds the property to the result
- * object. For type "object" the function will be called recursively. Note that
- * circular structures will cause infinite recursion.
- * @param {Object} object The object to format as protocol object.
- * @param {MirrorSerializer} mirror_serializer The serializer to use if any
- * mirror objects are encountered.
- * @return {Object} Protocol object value.
- */
-function ObjectToProtocolObject_(object, mirror_serializer) {
- var content = {};
- for (var key in object) {
- // Only consider string keys.
- if (typeof key == 'string') {
- // Format the value based on its type.
- var property_value_json = ValueToProtocolValue_(object[key],
- mirror_serializer);
- // Add the property if relevant.
- if (!IS_UNDEFINED(property_value_json)) {
- content[key] = property_value_json;
- }
- }
- }
-
- return content;
-}
-
-
-/**
- * Convert an array to its debugger protocol representation. It will convert
- * each array element to a protocol value.
- * @param {Array} array The array to format as protocol array.
- * @param {MirrorSerializer} mirror_serializer The serializer to use if any
- * mirror objects are encountered.
- * @return {Array} Protocol array value.
- */
-function ArrayToProtocolArray_(array, mirror_serializer) {
- var json = [];
- for (var i = 0; i < array.length; i++) {
- json.push(ValueToProtocolValue_(array[i], mirror_serializer));
- }
- return json;
-}
-
-
-/**
- * Convert a value to its debugger protocol representation.
- * @param {*} value The value to format as protocol value.
- * @param {MirrorSerializer} mirror_serializer The serializer to use if any
- * mirror objects are encountered.
- * @return {*} Protocol value.
- */
-function ValueToProtocolValue_(value, mirror_serializer) {
- // Format the value based on its type.
- var json;
- switch (typeof value) {
- case 'object':
- if (value instanceof Mirror) {
- json = mirror_serializer.serializeValue(value);
- } else if (IS_ARRAY(value)){
- json = ArrayToProtocolArray_(value, mirror_serializer);
- } else {
- json = ObjectToProtocolObject_(value, mirror_serializer);
- }
- break;
-
- case 'boolean':
- case 'string':
- case 'number':
- json = value;
- break;
-
- default:
- json = null;
- }
- return json;
-}
-
-
// -------------------------------------------------------------------
// Exports
utils.InstallConstants(global, [
"Debug", Debug,
- "DebugCommandProcessor", DebugCommandProcessor,
"BreakEvent", BreakEvent,
"CompileEvent", CompileEvent,
"BreakPoint", BreakPoint,
diff --git a/deps/v8/src/debug/ia32/debug-ia32.cc b/deps/v8/src/debug/ia32/debug-ia32.cc
index 1e0ee750ca..0ce9874e84 100644
--- a/deps/v8/src/debug/ia32/debug-ia32.cc
+++ b/deps/v8/src/debug/ia32/debug-ia32.cc
@@ -64,12 +64,6 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
{
FrameScope scope(masm, StackFrame::INTERNAL);
- // Load padding words on stack.
- for (int i = 0; i < LiveEdit::kFramePaddingInitialSize; i++) {
- __ push(Immediate(Smi::FromInt(LiveEdit::kFramePaddingValue)));
- }
- __ push(Immediate(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
-
// Push arguments for DebugBreak call.
if (mode == SAVE_RESULT_REGISTER) {
// Break on return.
@@ -96,54 +90,43 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
}
}
}
-
- __ pop(ebx);
- // We divide stored value by 2 (untagging) and multiply it by word's size.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiShiftSize == 0);
- __ lea(esp, Operand(esp, ebx, times_half_pointer_size, 0));
-
// Get rid of the internal frame.
}
- // This call did not replace a call , so there will be an unwanted
- // return address left on the stack. Here we get rid of that.
- __ add(esp, Immediate(kPointerSize));
+ __ MaybeDropFrames();
- // Now that the break point has been handled, resume normal execution by
- // jumping to the target address intended by the caller and that was
- // overwritten by the address of DebugBreakXXX.
- ExternalReference after_break_target =
- ExternalReference::debug_after_break_target_address(masm->isolate());
- __ jmp(Operand::StaticVariable(after_break_target));
+ // Return to caller.
+ __ ret(0);
}
+void DebugCodegen::GenerateHandleDebuggerStatement(MacroAssembler* masm) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kHandleDebuggerStatement, 0);
+ }
+ __ MaybeDropFrames();
-void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- // We do not know our frame height, but set esp based on ebp.
- __ lea(esp, Operand(ebp, FrameDropperFrameConstants::kFunctionOffset));
- __ pop(edi); // Function.
- __ add(esp, Immediate(-FrameDropperFrameConstants::kCodeOffset)); // INTERNAL
- // frame
- // marker
- // and code
- __ pop(ebp);
-
- ParameterCount dummy(0);
- __ CheckDebugHook(edi, no_reg, dummy, dummy);
-
- // Load context from the function.
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+ // Return to caller.
+ __ ret(0);
+}
- // Clear new.target register as a safety measure.
- __ mov(edx, masm->isolate()->factory()->undefined_value());
+void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
+ // Frame is being dropped:
+ // - Drop to the target frame specified by ebx.
+ // - Look up current function on the frame.
+ // - Leave the frame.
+ // - Restart the frame by calling the function.
+ __ mov(ebp, ebx);
+ __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ leave();
- // Get function code.
__ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kCodeOffset));
- __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
+ __ mov(ebx,
+ FieldOperand(ebx, SharedFunctionInfo::kFormalParameterCountOffset));
- // Re-run JSFunction, edi is function, esi is context.
- __ jmp(ebx);
+ ParameterCount dummy(ebx);
+ __ InvokeFunction(edi, dummy, dummy, JUMP_FUNCTION,
+ CheckDebugStepCallWrapper());
}
diff --git a/deps/v8/src/debug/interface-types.h b/deps/v8/src/debug/interface-types.h
index 2b7072ce2b..b86986dee4 100644
--- a/deps/v8/src/debug/interface-types.h
+++ b/deps/v8/src/debug/interface-types.h
@@ -9,6 +9,8 @@
#include <string>
#include <vector>
+#include "src/globals.h"
+
namespace v8 {
namespace debug {
@@ -16,7 +18,7 @@ namespace debug {
* Defines location inside script.
* Lines and columns are 0-based.
*/
-class Location {
+class V8_EXPORT_PRIVATE Location {
public:
Location(int line_number, int column_number);
/**
@@ -60,10 +62,10 @@ struct WasmDisassembly {
};
enum PromiseDebugActionType {
+ kDebugPromiseCreated,
kDebugEnqueueAsyncFunction,
kDebugEnqueuePromiseResolve,
kDebugEnqueuePromiseReject,
- kDebugEnqueuePromiseResolveThenableJob,
kDebugPromiseCollected,
kDebugWillHandle,
kDebugDidHandle,
diff --git a/deps/v8/src/debug/liveedit.cc b/deps/v8/src/debug/liveedit.cc
index 75c5a805ec..fa70e77d1f 100644
--- a/deps/v8/src/debug/liveedit.cc
+++ b/deps/v8/src/debug/liveedit.cc
@@ -4,6 +4,7 @@
#include "src/debug/liveedit.h"
+#include "src/assembler-inl.h"
#include "src/ast/scopes.h"
#include "src/code-stubs.h"
#include "src/compilation-cache.h"
@@ -14,6 +15,7 @@
#include "src/global-handles.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
+#include "src/objects-inl.h"
#include "src/source-position-table.h"
#include "src/v8.h"
#include "src/v8memory.h"
@@ -604,15 +606,16 @@ static int GetArrayLength(Handle<JSArray> array) {
return Smi::cast(length)->value();
}
-void FunctionInfoWrapper::SetInitialProperties(
- Handle<String> name, int start_position, int end_position, int param_num,
- int literal_count, int parent_index, int function_literal_id) {
+void FunctionInfoWrapper::SetInitialProperties(Handle<String> name,
+ int start_position,
+ int end_position, int param_num,
+ int parent_index,
+ int function_literal_id) {
HandleScope scope(isolate());
this->SetField(kFunctionNameOffset_, name);
this->SetSmiValueField(kStartPositionOffset_, start_position);
this->SetSmiValueField(kEndPositionOffset_, end_position);
this->SetSmiValueField(kParamNumOffset_, param_num);
- this->SetSmiValueField(kLiteralNumOffset_, literal_count);
this->SetSmiValueField(kParentIndexOffset_, parent_index);
this->SetSmiValueField(kFunctionLiteralIdOffset_, function_literal_id);
}
@@ -652,33 +655,7 @@ Handle<SharedFunctionInfo> SharedInfoWrapper::GetInfo() {
void LiveEdit::InitializeThreadLocal(Debug* debug) {
- debug->thread_local_.frame_drop_mode_ = LIVE_EDIT_FRAMES_UNTOUCHED;
-}
-
-
-bool LiveEdit::SetAfterBreakTarget(Debug* debug) {
- Code* code = NULL;
- Isolate* isolate = debug->isolate_;
- switch (debug->thread_local_.frame_drop_mode_) {
- case LIVE_EDIT_FRAMES_UNTOUCHED:
- return false;
- case LIVE_EDIT_FRAME_DROPPED_IN_DEBUG_SLOT_CALL:
- // Debug break slot stub does not return normally, instead it manually
- // cleans the stack and jumps. We should patch the jump address.
- code = isolate->builtins()->builtin(Builtins::kFrameDropper_LiveEdit);
- break;
- case LIVE_EDIT_FRAME_DROPPED_IN_DIRECT_CALL:
- // Nothing to do, after_break_target is not used here.
- return true;
- case LIVE_EDIT_FRAME_DROPPED_IN_RETURN_CALL:
- code = isolate->builtins()->builtin(Builtins::kFrameDropper_LiveEdit);
- break;
- case LIVE_EDIT_CURRENTLY_SET_MODE:
- UNREACHABLE();
- break;
- }
- debug->after_break_target_ = code->entry();
- return true;
+ debug->thread_local_.restart_fp_ = 0;
}
@@ -745,47 +722,6 @@ MaybeHandle<JSArray> LiveEdit::GatherCompileInfo(Handle<Script> script,
}
}
-
-// Visitor that finds all references to a particular code object,
-// including "CODE_TARGET" references in other code objects and replaces
-// them on the fly.
-class ReplacingVisitor : public ObjectVisitor {
- public:
- explicit ReplacingVisitor(Code* original, Code* substitution)
- : original_(original), substitution_(substitution) {
- }
-
- void VisitPointers(Object** start, Object** end) override {
- for (Object** p = start; p < end; p++) {
- if (*p == original_) {
- *p = substitution_;
- }
- }
- }
-
- void VisitCodeEntry(Address entry) override {
- if (Code::GetObjectFromEntryAddress(entry) == original_) {
- Address substitution_entry = substitution_->instruction_start();
- Memory::Address_at(entry) = substitution_entry;
- }
- }
-
- void VisitCodeTarget(RelocInfo* rinfo) override {
- if (RelocInfo::IsCodeTarget(rinfo->rmode()) &&
- Code::GetCodeFromTargetAddress(rinfo->target_address()) == original_) {
- Address substitution_entry = substitution_->instruction_start();
- rinfo->set_target_address(substitution_entry);
- }
- }
-
- void VisitDebugTarget(RelocInfo* rinfo) override { VisitCodeTarget(rinfo); }
-
- private:
- Code* original_;
- Code* substitution_;
-};
-
-
// Finds all references to original and replaces them with substitution.
static void ReplaceCodeObject(Handle<Code> original,
Handle<Code> substitution) {
@@ -795,62 +731,42 @@ static void ReplaceCodeObject(Handle<Code> original,
// to code objects (that are never in new space) without worrying about
// write barriers.
Heap* heap = original->GetHeap();
- HeapIterator iterator(heap);
-
- DCHECK(!heap->InNewSpace(*substitution));
-
- ReplacingVisitor visitor(*original, *substitution);
-
- // Iterate over all roots. Stack frames may have pointer into original code,
- // so temporary replace the pointers with offset numbers
- // in prologue/epilogue.
- heap->IterateRoots(&visitor, VISIT_ALL);
-
+ HeapIterator iterator(heap, HeapIterator::kFilterUnreachable);
// Now iterate over all pointers of all objects, including code_target
// implicit pointers.
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
- obj->Iterate(&visitor);
+ if (obj->IsJSFunction()) {
+ JSFunction* fun = JSFunction::cast(obj);
+ if (fun->code() == *original) fun->ReplaceCode(*substitution);
+ } else if (obj->IsSharedFunctionInfo()) {
+ SharedFunctionInfo* info = SharedFunctionInfo::cast(obj);
+ if (info->code() == *original) info->set_code(*substitution);
+ }
}
}
-
-// Patch function literals.
-// Name 'literals' is a misnomer. Rather it's a cache for complex object
-// boilerplates and for a native context. We must clean cached values.
-// Additionally we may need to allocate a new array if number of literals
-// changed.
-class LiteralFixer {
+// Patch function feedback vector.
+// The feedback vector is a cache for complex object boilerplates and for a
+// native context. We must clean cached values, or if the structure of the
+// vector itself changes we need to allocate a new one.
+class FeedbackVectorFixer {
public:
- static void PatchLiterals(FunctionInfoWrapper* compile_info_wrapper,
- Handle<SharedFunctionInfo> shared_info,
- bool feedback_metadata_changed, Isolate* isolate) {
- int new_literal_count = compile_info_wrapper->GetLiteralCount();
- int old_literal_count = shared_info->num_literals();
-
- if (old_literal_count == new_literal_count && !feedback_metadata_changed) {
- // If literal count didn't change, simply go over all functions
- // and clear literal arrays.
- ClearValuesVisitor visitor;
- IterateJSFunctions(shared_info, &visitor);
- } else {
- // When literal count changes, we have to create new array instances.
- // Since we cannot create instances when iterating heap, we should first
- // collect all functions and fix their literal arrays.
- Handle<FixedArray> function_instances =
- CollectJSFunctions(shared_info, isolate);
- Handle<FeedbackMetadata> feedback_metadata(
- shared_info->feedback_metadata());
-
- for (int i = 0; i < function_instances->length(); i++) {
- Handle<JSFunction> fun(JSFunction::cast(function_instances->get(i)));
- Handle<FeedbackVector> vector =
- FeedbackVector::New(isolate, feedback_metadata);
- Handle<LiteralsArray> new_literals =
- LiteralsArray::New(isolate, vector, new_literal_count);
- fun->set_literals(*new_literals);
- }
-
- shared_info->set_num_literals(new_literal_count);
+ static void PatchFeedbackVector(FunctionInfoWrapper* compile_info_wrapper,
+ Handle<SharedFunctionInfo> shared_info,
+ Isolate* isolate) {
+ // When feedback metadata changes, we have to create new array instances.
+ // Since we cannot create instances when iterating heap, we should first
+ // collect all functions and fix their literal arrays.
+ Handle<FixedArray> function_instances =
+ CollectJSFunctions(shared_info, isolate);
+
+ for (int i = 0; i < function_instances->length(); i++) {
+ Handle<JSFunction> fun(JSFunction::cast(function_instances->get(i)));
+ Handle<Cell> new_cell = isolate->factory()->NewManyClosuresCell(
+ isolate->factory()->undefined_value());
+ fun->set_feedback_vector_cell(*new_cell);
+ // Only create feedback vectors if we already have the metadata.
+ if (shared_info->is_compiled()) JSFunction::EnsureLiterals(fun);
}
}
@@ -889,17 +805,6 @@ class LiteralFixer {
return result;
}
- class ClearValuesVisitor {
- public:
- void visit(JSFunction* fun) {
- LiteralsArray* literals = fun->literals();
- int len = literals->literals_count();
- for (int j = 0; j < len; j++) {
- literals->set_literal_undefined(j);
- }
- }
- };
-
class CountVisitor {
public:
void visit(JSFunction* fun) {
@@ -972,7 +877,6 @@ void LiveEdit::ReplaceFunctionCode(
Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
Handle<SharedFunctionInfo> new_shared_info =
compile_info_wrapper.GetSharedFunctionInfo();
- bool feedback_metadata_changed = false;
if (shared_info->is_compiled()) {
// Take whatever code we can get from the new shared function info. We
@@ -1019,9 +923,10 @@ void LiveEdit::ReplaceFunctionCode(
// Update the type feedback vector, if needed.
Handle<FeedbackMetadata> new_feedback_metadata(
new_shared_info->feedback_metadata());
- feedback_metadata_changed =
- new_feedback_metadata->DiffersFrom(shared_info->feedback_metadata());
shared_info->set_feedback_metadata(*new_feedback_metadata);
+ } else {
+ shared_info->set_feedback_metadata(
+ FeedbackMetadata::cast(isolate->heap()->empty_fixed_array()));
}
int start_position = compile_info_wrapper.GetStartPosition();
@@ -1029,8 +934,8 @@ void LiveEdit::ReplaceFunctionCode(
shared_info->set_start_position(start_position);
shared_info->set_end_position(end_position);
- LiteralFixer::PatchLiterals(&compile_info_wrapper, shared_info,
- feedback_metadata_changed, isolate);
+ FeedbackVectorFixer::PatchFeedbackVector(&compile_info_wrapper, shared_info,
+ isolate);
DeoptimizeDependentFunctions(*shared_info);
isolate->compilation_cache()->Remove(shared_info);
@@ -1277,185 +1182,6 @@ static bool CheckActivation(Handle<JSArray> shared_info_array,
return false;
}
-
-// Iterates over handler chain and removes all elements that are inside
-// frames being dropped.
-static bool FixTryCatchHandler(StackFrame* top_frame,
- StackFrame* bottom_frame) {
- Address* pointer_address =
- &Memory::Address_at(top_frame->isolate()->get_address_from_id(
- Isolate::kHandlerAddress));
-
- while (*pointer_address < top_frame->sp()) {
- pointer_address = &Memory::Address_at(*pointer_address);
- }
- Address* above_frame_address = pointer_address;
- while (*pointer_address < bottom_frame->fp()) {
- pointer_address = &Memory::Address_at(*pointer_address);
- }
- bool change = *above_frame_address != *pointer_address;
- *above_frame_address = *pointer_address;
- return change;
-}
-
-
-// Initializes an artificial stack frame. The data it contains is used for:
-// a. successful work of frame dropper code which eventually gets control,
-// b. being compatible with a typed frame structure for various stack
-// iterators.
-// Frame structure (conforms to InternalFrame structure):
-// -- function
-// -- code
-// -- SMI marker
-// -- frame base
-static void SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
- Handle<Code> code) {
- DCHECK(bottom_js_frame->is_java_script());
- Address fp = bottom_js_frame->fp();
- Memory::Object_at(fp + FrameDropperFrameConstants::kFunctionOffset) =
- Memory::Object_at(fp + StandardFrameConstants::kFunctionOffset);
- Memory::Object_at(fp + FrameDropperFrameConstants::kFrameTypeOffset) =
- Smi::FromInt(StackFrame::INTERNAL);
- Memory::Object_at(fp + FrameDropperFrameConstants::kCodeOffset) = *code;
-}
-
-
-// Removes specified range of frames from stack. There may be 1 or more
-// frames in range. Anyway the bottom frame is restarted rather than dropped,
-// and therefore has to be a JavaScript frame.
-// Returns error message or NULL.
-static const char* DropFrames(Vector<StackFrame*> frames, int top_frame_index,
- int bottom_js_frame_index,
- LiveEditFrameDropMode* mode) {
- if (!LiveEdit::kFrameDropperSupported) {
- return "Stack manipulations are not supported in this architecture.";
- }
-
- StackFrame* pre_top_frame = frames[top_frame_index - 1];
- StackFrame* top_frame = frames[top_frame_index];
- StackFrame* bottom_js_frame = frames[bottom_js_frame_index];
-
- DCHECK(bottom_js_frame->is_java_script());
-
- // Check the nature of the top frame.
- Isolate* isolate = bottom_js_frame->isolate();
- Code* pre_top_frame_code = pre_top_frame->LookupCode();
- bool frame_has_padding = true;
- if (pre_top_frame_code ==
- isolate->builtins()->builtin(Builtins::kSlot_DebugBreak)) {
- // OK, we can drop debug break slot.
- *mode = LIVE_EDIT_FRAME_DROPPED_IN_DEBUG_SLOT_CALL;
- } else if (pre_top_frame_code ==
- isolate->builtins()->builtin(Builtins::kFrameDropper_LiveEdit)) {
- // OK, we can drop our own code.
- pre_top_frame = frames[top_frame_index - 2];
- top_frame = frames[top_frame_index - 1];
- *mode = LIVE_EDIT_CURRENTLY_SET_MODE;
- frame_has_padding = false;
- } else if (pre_top_frame_code ==
- isolate->builtins()->builtin(Builtins::kReturn_DebugBreak)) {
- *mode = LIVE_EDIT_FRAME_DROPPED_IN_RETURN_CALL;
- } else if (pre_top_frame_code->kind() == Code::STUB &&
- CodeStub::GetMajorKey(pre_top_frame_code) == CodeStub::CEntry) {
- // Entry from our unit tests on 'debugger' statement.
- // It's fine, we support this case.
- *mode = LIVE_EDIT_FRAME_DROPPED_IN_DIRECT_CALL;
- // We don't have a padding from 'debugger' statement call.
- // Here the stub is CEntry, it's not debug-only and can't be padded.
- // If anyone would complain, a proxy padded stub could be added.
- frame_has_padding = false;
- } else if (pre_top_frame->type() == StackFrame::ARGUMENTS_ADAPTOR) {
- // This must be adaptor that remain from the frame dropping that
- // is still on stack. A frame dropper frame must be above it.
- DCHECK(frames[top_frame_index - 2]->LookupCode() ==
- isolate->builtins()->builtin(Builtins::kFrameDropper_LiveEdit));
- pre_top_frame = frames[top_frame_index - 3];
- top_frame = frames[top_frame_index - 2];
- *mode = LIVE_EDIT_CURRENTLY_SET_MODE;
- frame_has_padding = false;
- } else if (pre_top_frame_code->kind() == Code::BYTECODE_HANDLER) {
- // Interpreted bytecode takes up two stack frames, one for the bytecode
- // handler and one for the interpreter entry trampoline. Therefore we shift
- // up by one frame.
- *mode = LIVE_EDIT_FRAME_DROPPED_IN_DIRECT_CALL;
- pre_top_frame = frames[top_frame_index - 2];
- top_frame = frames[top_frame_index - 1];
- } else {
- return "Unknown structure of stack above changing function";
- }
-
- Address unused_stack_top = top_frame->sp();
- Address unused_stack_bottom =
- bottom_js_frame->fp() - FrameDropperFrameConstants::kFixedFrameSize +
- 2 * kPointerSize; // Bigger address end is exclusive.
-
- Address* top_frame_pc_address = top_frame->pc_address();
-
- // top_frame may be damaged below this point. Do not used it.
- DCHECK(!(top_frame = NULL));
-
- if (unused_stack_top > unused_stack_bottom) {
- if (frame_has_padding) {
- int shortage_bytes =
- static_cast<int>(unused_stack_top - unused_stack_bottom);
-
- Address padding_start =
- pre_top_frame->fp() -
- (FrameDropperFrameConstants::kFixedFrameSize - kPointerSize);
-
- Address padding_pointer = padding_start;
- Smi* padding_object = Smi::FromInt(LiveEdit::kFramePaddingValue);
- while (Memory::Object_at(padding_pointer) == padding_object) {
- padding_pointer -= kPointerSize;
- }
- int padding_counter =
- Smi::cast(Memory::Object_at(padding_pointer))->value();
- if (padding_counter * kPointerSize < shortage_bytes) {
- return "Not enough space for frame dropper frame "
- "(even with padding frame)";
- }
- Memory::Object_at(padding_pointer) =
- Smi::FromInt(padding_counter - shortage_bytes / kPointerSize);
-
- StackFrame* pre_pre_frame = frames[top_frame_index - 2];
-
- MemMove(padding_start + kPointerSize - shortage_bytes,
- padding_start + kPointerSize,
- FrameDropperFrameConstants::kFixedFrameSize - kPointerSize);
-
- pre_top_frame->UpdateFp(pre_top_frame->fp() - shortage_bytes);
- pre_pre_frame->SetCallerFp(pre_top_frame->fp());
- unused_stack_top -= shortage_bytes;
-
- STATIC_ASSERT(sizeof(Address) == kPointerSize);
- top_frame_pc_address -= shortage_bytes / kPointerSize;
- } else {
- return "Not enough space for frame dropper frame";
- }
- }
-
- // Committing now. After this point we should return only NULL value.
-
- FixTryCatchHandler(pre_top_frame, bottom_js_frame);
- // Make sure FixTryCatchHandler is idempotent.
- DCHECK(!FixTryCatchHandler(pre_top_frame, bottom_js_frame));
-
- Handle<Code> code = isolate->builtins()->FrameDropper_LiveEdit();
- *top_frame_pc_address = code->entry();
- pre_top_frame->SetCallerFp(bottom_js_frame->fp());
-
- SetUpFrameDropperFrame(bottom_js_frame, code);
-
- for (Address a = unused_stack_top;
- a < unused_stack_bottom;
- a += kPointerSize) {
- Memory::Object_at(a) = Smi::kZero;
- }
-
- return NULL;
-}
-
-
// Describes a set of call frames that execute any of listed functions.
// Finding no such frames does not mean error.
class MultipleFunctionTarget {
@@ -1543,7 +1269,6 @@ static const char* DropActivationsInActiveThreadImpl(Isolate* isolate,
Zone zone(isolate->allocator(), ZONE_NAME);
Vector<StackFrame*> frames = CreateStackMap(isolate, &zone);
-
int top_frame_index = -1;
int frame_index = 0;
for (; frame_index < frames.length(); frame_index++) {
@@ -1628,24 +1353,11 @@ static const char* DropActivationsInActiveThreadImpl(Isolate* isolate,
return target.GetNotFoundMessage();
}
- LiveEditFrameDropMode drop_mode = LIVE_EDIT_FRAMES_UNTOUCHED;
- const char* error_message =
- DropFrames(frames, top_frame_index, bottom_js_frame_index, &drop_mode);
-
- if (error_message != NULL) {
- return error_message;
+ if (!LiveEdit::kFrameDropperSupported) {
+ return "Stack manipulations are not supported in this architecture.";
}
- // Adjust break_frame after some frames has been dropped.
- StackFrame::Id new_id = StackFrame::NO_ID;
- for (int i = bottom_js_frame_index + 1; i < frames.length(); i++) {
- if (frames[i]->type() == StackFrame::JAVA_SCRIPT ||
- frames[i]->type() == StackFrame::INTERPRETED) {
- new_id = frames[i]->id();
- break;
- }
- }
- debug->FramesHaveBeenDropped(new_id, drop_mode);
+ debug->ScheduleFrameRestart(frames[bottom_js_frame_index]);
return NULL;
}
@@ -1691,7 +1403,7 @@ bool LiveEdit::FindActiveGenerators(Handle<FixedArray> shared_info_array,
FunctionPatchabilityStatus active = FUNCTION_BLOCKED_ACTIVE_GENERATOR;
Heap* heap = isolate->heap();
- HeapIterator iterator(heap);
+ HeapIterator iterator(heap, HeapIterator::kFilterUnreachable);
HeapObject* obj = NULL;
while ((obj = iterator.next()) != NULL) {
if (!obj->IsJSGeneratorObject()) continue;
@@ -1888,7 +1600,6 @@ void LiveEditFunctionTracker::FunctionStarted(FunctionLiteral* fun) {
FunctionInfoWrapper info = FunctionInfoWrapper::Create(isolate_);
info.SetInitialProperties(fun->name(), fun->start_position(),
fun->end_position(), fun->parameter_count(),
- fun->materialized_literal_count(),
current_parent_index_, fun->function_literal_id());
current_parent_index_ = len_;
SetElementSloppy(result_, len_, info.GetJSArray());
diff --git a/deps/v8/src/debug/liveedit.h b/deps/v8/src/debug/liveedit.h
index be70d2e50c..4ad1bc5097 100644
--- a/deps/v8/src/debug/liveedit.h
+++ b/deps/v8/src/debug/liveedit.h
@@ -74,8 +74,6 @@ class LiveEdit : AllStatic {
public:
static void InitializeThreadLocal(Debug* debug);
- static bool SetAfterBreakTarget(Debug* debug);
-
MUST_USE_RESULT static MaybeHandle<JSArray> GatherCompileInfo(
Handle<Script> script,
Handle<String> source);
@@ -146,40 +144,6 @@ class LiveEdit : AllStatic {
// Architecture-specific constant.
static const bool kFrameDropperSupported;
-
- /**
- * Defines layout of a stack frame that supports padding. This is a regular
- * internal frame that has a flexible stack structure. LiveEdit can shift
- * its lower part up the stack, taking up the 'padding' space when additional
- * stack memory is required.
- * Such frame is expected immediately above the topmost JavaScript frame.
- *
- * Stack Layout:
- * --- Top
- * LiveEdit routine frames
- * ---
- * C frames of debug handler
- * ---
- * ...
- * ---
- * An internal frame that has n padding words:
- * - any number of words as needed by code -- upper part of frame
- * - padding size: a Smi storing n -- current size of padding
- * - padding: n words filled with kPaddingValue in form of Smi
- * - 3 context/type words of a regular InternalFrame
- * - fp
- * ---
- * Topmost JavaScript frame
- * ---
- * ...
- * --- Bottom
- */
- // A number of words that should be reserved on stack for the LiveEdit use.
- // Stored on stack in form of Smi.
- static const int kFramePaddingInitialSize = 1;
- // A value that padding words are filled with (in form of Smi). Going
- // bottom-top, the first word not having this value is a counter word.
- static const int kFramePaddingValue = kFramePaddingInitialSize + 1;
};
@@ -280,8 +244,8 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
}
void SetInitialProperties(Handle<String> name, int start_position,
- int end_position, int param_num, int literal_count,
- int parent_index, int function_literal_id);
+ int end_position, int param_num, int parent_index,
+ int function_literal_id);
void SetFunctionScopeInfo(Handle<Object> scope_info_array) {
this->SetField(kFunctionScopeInfoOffset_, scope_info_array);
@@ -291,10 +255,6 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
Handle<SharedFunctionInfo> GetSharedFunctionInfo();
- int GetLiteralCount() {
- return this->GetSmiValueField(kLiteralNumOffset_);
- }
-
int GetParentIndex() {
return this->GetSmiValueField(kParentIndexOffset_);
}
@@ -313,9 +273,8 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
static const int kFunctionScopeInfoOffset_ = 4;
static const int kParentIndexOffset_ = 5;
static const int kSharedFunctionInfoOffset_ = 6;
- static const int kLiteralNumOffset_ = 7;
- static const int kFunctionLiteralIdOffset_ = 8;
- static const int kSize_ = 9;
+ static const int kFunctionLiteralIdOffset_ = 7;
+ static const int kSize_ = 8;
friend class JSArrayBasedStruct<FunctionInfoWrapper>;
};
diff --git a/deps/v8/src/debug/liveedit.js b/deps/v8/src/debug/liveedit.js
index 0076543f8b..8e206544eb 100644
--- a/deps/v8/src/debug/liveedit.js
+++ b/deps/v8/src/debug/liveedit.js
@@ -773,7 +773,7 @@
this.scope_info = raw_array[4];
this.outer_index = raw_array[5];
this.shared_function_info = raw_array[6];
- this.function_literal_id = raw_array[8];
+ this.function_literal_id = raw_array[7];
this.next_sibling_index = null;
this.raw_array = raw_array;
}
diff --git a/deps/v8/src/debug/mips/debug-mips.cc b/deps/v8/src/debug/mips/debug-mips.cc
index c6daf2d226..5b809e6a40 100644
--- a/deps/v8/src/debug/mips/debug-mips.cc
+++ b/deps/v8/src/debug/mips/debug-mips.cc
@@ -69,16 +69,6 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
{
FrameScope scope(masm, StackFrame::INTERNAL);
- // Load padding words on stack.
- __ li(at, Operand(Smi::FromInt(LiveEdit::kFramePaddingValue)));
- __ Subu(sp, sp,
- Operand(kPointerSize * LiveEdit::kFramePaddingInitialSize));
- for (int i = LiveEdit::kFramePaddingInitialSize - 1; i >= 0; i--) {
- __ sw(at, MemOperand(sp, kPointerSize * i));
- }
- __ li(at, Operand(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
- __ push(at);
-
// Push arguments for DebugBreak call.
if (mode == SAVE_RESULT_REGISTER) {
// Break on return.
@@ -104,47 +94,47 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
}
}
}
-
- // Don't bother removing padding bytes pushed on the stack
- // as the frame is going to be restored right away.
-
// Leave the internal frame.
}
- // Now that the break point has been handled, resume normal execution by
- // jumping to the target address intended by the caller and that was
- // overwritten by the address of DebugBreakXXX.
- ExternalReference after_break_target =
- ExternalReference::debug_after_break_target_address(masm->isolate());
- __ li(t9, Operand(after_break_target));
- __ lw(t9, MemOperand(t9));
- __ Jump(t9);
+ __ MaybeDropFrames();
+
+ // Return to caller.
+ __ Ret();
}
+void DebugCodegen::GenerateHandleDebuggerStatement(MacroAssembler* masm) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kHandleDebuggerStatement, 0);
+ }
+ __ MaybeDropFrames();
+
+ // Return to caller.
+ __ Ret();
+}
-void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- // We do not know our frame height, but set sp based on fp.
- __ lw(a1, MemOperand(fp, FrameDropperFrameConstants::kFunctionOffset));
+void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
+ // Frame is being dropped:
+ // - Drop to the target frame specified by a1.
+ // - Look up current function on the frame.
+ // - Leave the frame.
+ // - Restart the frame by calling the function.
+ __ mov(fp, a1);
+ __ lw(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
// Pop return address and frame.
__ LeaveFrame(StackFrame::INTERNAL);
- ParameterCount dummy(0);
- __ CheckDebugHook(a1, no_reg, dummy, dummy);
-
- // Load context from the function.
- __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-
- // Clear new.target as a safety measure.
- __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
-
- // Get function code.
- __ lw(at, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(at, FieldMemOperand(at, SharedFunctionInfo::kCodeOffset));
- __ Addu(t9, at, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ lw(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a0,
+ FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ mov(a2, a0);
- // Re-run JSFunction, a1 is function, cp is context.
- __ Jump(t9);
+ ParameterCount dummy1(a2);
+ ParameterCount dummy2(a0);
+ __ InvokeFunction(a1, dummy1, dummy2, JUMP_FUNCTION,
+ CheckDebugStepCallWrapper());
}
diff --git a/deps/v8/src/debug/mips64/debug-mips64.cc b/deps/v8/src/debug/mips64/debug-mips64.cc
index 0230f13145..b8dbbfb45e 100644
--- a/deps/v8/src/debug/mips64/debug-mips64.cc
+++ b/deps/v8/src/debug/mips64/debug-mips64.cc
@@ -65,22 +65,23 @@ bool DebugCodegen::DebugBreakSlotIsPatched(Address pc) {
return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
}
+void DebugCodegen::GenerateHandleDebuggerStatement(MacroAssembler* masm) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kHandleDebuggerStatement, 0);
+ }
+ __ MaybeDropFrames();
+
+ // Return to caller.
+ __ Ret();
+}
+
void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
DebugBreakCallHelperMode mode) {
__ RecordComment("Debug break");
{
FrameScope scope(masm, StackFrame::INTERNAL);
- // Load padding words on stack.
- __ li(at, Operand(Smi::FromInt(LiveEdit::kFramePaddingValue)));
- __ Dsubu(sp, sp,
- Operand(kPointerSize * LiveEdit::kFramePaddingInitialSize));
- for (int i = LiveEdit::kFramePaddingInitialSize - 1; i >= 0; i--) {
- __ sd(at, MemOperand(sp, kPointerSize * i));
- }
- __ li(at, Operand(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
- __ push(at);
-
// Push arguments for DebugBreak call.
if (mode == SAVE_RESULT_REGISTER) {
// Break on return.
@@ -107,46 +108,36 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
}
}
- // Don't bother removing padding bytes pushed on the stack
- // as the frame is going to be restored right away.
-
// Leave the internal frame.
}
- // Now that the break point has been handled, resume normal execution by
- // jumping to the target address intended by the caller and that was
- // overwritten by the address of DebugBreakXXX.
- ExternalReference after_break_target =
- ExternalReference::debug_after_break_target_address(masm->isolate());
- __ li(t9, Operand(after_break_target));
- __ ld(t9, MemOperand(t9));
- __ Jump(t9);
-}
+ __ MaybeDropFrames();
+ // Return to caller.
+ __ Ret();
+}
-void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- // We do not know our frame height, but set sp based on fp.
- __ ld(a1, MemOperand(fp, FrameDropperFrameConstants::kFunctionOffset));
+void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
+ // Frame is being dropped:
+ // - Drop to the target frame specified by a1.
+ // - Look up current function on the frame.
+ // - Leave the frame.
+ // - Restart the frame by calling the function.
+ __ mov(fp, a1);
+ __ ld(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
// Pop return address and frame.
__ LeaveFrame(StackFrame::INTERNAL);
- ParameterCount dummy(0);
- __ CheckDebugHook(a1, no_reg, dummy, dummy);
-
- // Load context from the function.
- __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-
- // Clear new.target as a safety measure.
- __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
-
- // Get function code.
- __ ld(at, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ ld(at, FieldMemOperand(at, SharedFunctionInfo::kCodeOffset));
- __ Daddu(t9, at, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ ld(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ ld(a0,
+ FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ mov(a2, a0);
- // Re-run JSFunction, a1 is function, cp is context.
- __ Jump(t9);
+ ParameterCount dummy1(a2);
+ ParameterCount dummy2(a0);
+ __ InvokeFunction(a1, dummy1, dummy2, JUMP_FUNCTION,
+ CheckDebugStepCallWrapper());
}
diff --git a/deps/v8/src/debug/mirrors.js b/deps/v8/src/debug/mirrors.js
index 2713be36b7..b534fecdc4 100644
--- a/deps/v8/src/debug/mirrors.js
+++ b/deps/v8/src/debug/mirrors.js
@@ -77,59 +77,15 @@ var MirrorType = {
GENERATOR_TYPE : 'generator',
}
-
-// Handle id counters.
-var next_handle_ = 0;
-var next_transient_handle_ = -1;
-
-// Mirror cache.
-var mirror_cache_ = [];
-var mirror_cache_enabled_ = true;
-
-
-function MirrorCacheIsEmpty() {
- return next_handle_ == 0 && mirror_cache_.length == 0;
-}
-
-
-function ToggleMirrorCache(value) {
- mirror_cache_enabled_ = value;
- ClearMirrorCache();
-}
-
-
-function ClearMirrorCache(value) {
- next_handle_ = 0;
- mirror_cache_ = [];
-}
-
-
/**
* Returns the mirror for a specified value or object.
*
* @param {value or Object} value the value or object to retrieve the mirror for
- * @param {boolean} transient indicate whether this object is transient and
- * should not be added to the mirror cache. The default is not transient.
* @returns {Mirror} the mirror reflects the passed value or object
*/
-function MakeMirror(value, opt_transient) {
+function MakeMirror(value) {
var mirror;
- // Look for non transient mirrors in the mirror cache.
- if (!opt_transient && mirror_cache_enabled_) {
- for (var id in mirror_cache_) {
- mirror = mirror_cache_[id];
- if (mirror.value() === value) {
- return mirror;
- }
- // Special check for NaN as NaN == NaN is false.
- if (mirror.isNumber() && IsNaN(mirror.value()) &&
- typeof value == 'number' && IsNaN(value)) {
- return mirror;
- }
- }
- }
-
if (IS_UNDEFINED(value)) {
mirror = new UndefinedMirror();
} else if (IS_NULL(value)) {
@@ -165,30 +121,14 @@ function MakeMirror(value, opt_transient) {
} else if (IS_GENERATOR(value)) {
mirror = new GeneratorMirror(value);
} else {
- mirror = new ObjectMirror(value, MirrorType.OBJECT_TYPE, opt_transient);
+ mirror = new ObjectMirror(value, MirrorType.OBJECT_TYPE);
}
- if (mirror_cache_enabled_) mirror_cache_[mirror.handle()] = mirror;
return mirror;
}
/**
- * Returns the mirror for a specified mirror handle.
- *
- * @param {number} handle the handle to find the mirror for
- * @returns {Mirror or undefiend} the mirror with the requested handle or
- * undefined if no mirror with the requested handle was found
- */
-function LookupMirror(handle) {
- if (!mirror_cache_enabled_) {
- throw %make_error(kDebugger, "Mirror cache is disabled");
- }
- return mirror_cache_[handle];
-}
-
-
-/**
* Returns the mirror for the undefined value.
*
* @returns {Mirror} the mirror reflects the undefined value
@@ -491,23 +431,6 @@ Mirror.prototype.isIterator = function() {
};
-/**
- * Allocate a handle id for this object.
- */
-Mirror.prototype.allocateHandle_ = function() {
- if (mirror_cache_enabled_) this.handle_ = next_handle_++;
-};
-
-
-/**
- * Allocate a transient handle id for this object. Transient handles are
- * negative.
- */
-Mirror.prototype.allocateTransientHandle_ = function() {
- this.handle_ = next_transient_handle_--;
-};
-
-
Mirror.prototype.toText = function() {
// Simpel to text which is used when on specialization in subclass.
return "#<" + this.constructor.name + ">";
@@ -518,28 +441,16 @@ Mirror.prototype.toText = function() {
* Base class for all value mirror objects.
* @param {string} type The type of the mirror
* @param {value} value The value reflected by this mirror
- * @param {boolean} transient indicate whether this object is transient with a
- * transient handle
* @constructor
* @extends Mirror
*/
-function ValueMirror(type, value, transient) {
+function ValueMirror(type, value) {
%_Call(Mirror, this, type);
this.value_ = value;
- if (!transient) {
- this.allocateHandle_();
- } else {
- this.allocateTransientHandle_();
- }
}
inherits(ValueMirror, Mirror);
-Mirror.prototype.handle = function() {
- return this.handle_;
-};
-
-
/**
* Check whether this is a primitive value.
* @return {boolean} True if the mirror reflects a primitive value
@@ -626,7 +537,7 @@ inherits(NumberMirror, ValueMirror);
NumberMirror.prototype.toText = function() {
- return %_NumberToString(this.value_);
+ return %NumberToString(this.value_);
};
@@ -684,14 +595,12 @@ SymbolMirror.prototype.toText = function() {
/**
* Mirror object for objects.
* @param {object} value The object reflected by this mirror
- * @param {boolean} transient indicate whether this object is transient with a
- * transient handle
* @constructor
* @extends ValueMirror
*/
-function ObjectMirror(value, type, transient) {
+function ObjectMirror(value, type) {
type = type || MirrorType.OBJECT_TYPE;
- %_Call(ValueMirror, this, type, value, transient);
+ %_Call(ValueMirror, this, type, value);
}
inherits(ObjectMirror, ValueMirror);
@@ -2010,11 +1919,12 @@ FrameMirror.prototype.allScopes = function(opt_ignore_nested_scopes) {
};
-FrameMirror.prototype.evaluate = function(source) {
+FrameMirror.prototype.evaluate = function(source, throw_on_side_effect = false) {
return MakeMirror(%DebugEvaluate(this.break_id_,
this.details_.frameId(),
this.details_.inlinedFrameIndex(),
- source));
+ source,
+ throw_on_side_effect));
};
@@ -2313,13 +2223,10 @@ ScopeMirror.prototype.scopeType = function() {
ScopeMirror.prototype.scopeObject = function() {
- // For local, closure and script scopes create a transient mirror
+ // For local, closure and script scopes create a mirror
// as these objects are created on the fly materializing the local
// or closure scopes and therefore will not preserve identity.
- var transient = this.scopeType() == ScopeType.Local ||
- this.scopeType() == ScopeType.Closure ||
- this.scopeType() == ScopeType.Script;
- return MakeMirror(this.details_.object(), transient);
+ return MakeMirror(this.details_.object());
};
@@ -2338,7 +2245,6 @@ function ScriptMirror(script) {
%_Call(Mirror, this, MirrorType.SCRIPT_TYPE);
this.script_ = script;
this.context_ = new ContextMirror(script.context_data);
- this.allocateHandle_();
}
inherits(ScriptMirror, Mirror);
@@ -2454,7 +2360,6 @@ ScriptMirror.prototype.toText = function() {
function ContextMirror(data) {
%_Call(Mirror, this, MirrorType.CONTEXT_TYPE);
this.data_ = data;
- this.allocateHandle_();
}
inherits(ContextMirror, Mirror);
@@ -2463,580 +2368,11 @@ ContextMirror.prototype.data = function() {
return this.data_;
};
-
-/**
- * Returns a mirror serializer
- *
- * @param {boolean} details Set to true to include details
- * @param {Object} options Options comtrolling the serialization
- * The following options can be set:
- * includeSource: include ths full source of scripts
- * @returns {MirrorSerializer} mirror serializer
- */
-function MakeMirrorSerializer(details, options) {
- return new JSONProtocolSerializer(details, options);
-}
-
-
-/**
- * Object for serializing a mirror objects and its direct references.
- * @param {boolean} details Indicates whether to include details for the mirror
- * serialized
- * @constructor
- */
-function JSONProtocolSerializer(details, options) {
- this.details_ = details;
- this.options_ = options;
- this.mirrors_ = [ ];
-}
-
-
-/**
- * Returns a serialization of an object reference. The referenced object are
- * added to the serialization state.
- *
- * @param {Mirror} mirror The mirror to serialize
- * @returns {String} JSON serialization
- */
-JSONProtocolSerializer.prototype.serializeReference = function(mirror) {
- return this.serialize_(mirror, true, true);
-};
-
-
-/**
- * Returns a serialization of an object value. The referenced objects are
- * added to the serialization state.
- *
- * @param {Mirror} mirror The mirror to serialize
- * @returns {String} JSON serialization
- */
-JSONProtocolSerializer.prototype.serializeValue = function(mirror) {
- var json = this.serialize_(mirror, false, true);
- return json;
-};
-
-
-/**
- * Returns a serialization of all the objects referenced.
- *
- * @param {Mirror} mirror The mirror to serialize.
- * @returns {Array.<Object>} Array of the referenced objects converted to
- * protcol objects.
- */
-JSONProtocolSerializer.prototype.serializeReferencedObjects = function() {
- // Collect the protocol representation of the referenced objects in an array.
- var content = [];
-
- // Get the number of referenced objects.
- var count = this.mirrors_.length;
-
- for (var i = 0; i < count; i++) {
- content.push(this.serialize_(this.mirrors_[i], false, false));
- }
-
- return content;
-};
-
-
-JSONProtocolSerializer.prototype.includeSource_ = function() {
- return this.options_ && this.options_.includeSource;
-};
-
-
-JSONProtocolSerializer.prototype.inlineRefs_ = function() {
- return this.options_ && this.options_.inlineRefs;
-};
-
-
-JSONProtocolSerializer.prototype.maxStringLength_ = function() {
- if (IS_UNDEFINED(this.options_) ||
- IS_UNDEFINED(this.options_.maxStringLength)) {
- return kMaxProtocolStringLength;
- }
- return this.options_.maxStringLength;
-};
-
-
-JSONProtocolSerializer.prototype.add_ = function(mirror) {
- // If this mirror is already in the list just return.
- for (var i = 0; i < this.mirrors_.length; i++) {
- if (this.mirrors_[i] === mirror) {
- return;
- }
- }
-
- // Add the mirror to the list of mirrors to be serialized.
- this.mirrors_.push(mirror);
-};
-
-
-/**
- * Formats mirror object to protocol reference object with some data that can
- * be used to display the value in debugger.
- * @param {Mirror} mirror Mirror to serialize.
- * @return {Object} Protocol reference object.
- */
-JSONProtocolSerializer.prototype.serializeReferenceWithDisplayData_ =
- function(mirror) {
- var o = {};
- o.ref = mirror.handle();
- o.type = mirror.type();
- switch (mirror.type()) {
- case MirrorType.UNDEFINED_TYPE:
- case MirrorType.NULL_TYPE:
- case MirrorType.BOOLEAN_TYPE:
- case MirrorType.NUMBER_TYPE:
- o.value = mirror.value();
- break;
- case MirrorType.STRING_TYPE:
- o.value = mirror.getTruncatedValue(this.maxStringLength_());
- break;
- case MirrorType.SYMBOL_TYPE:
- o.description = mirror.description();
- break;
- case MirrorType.FUNCTION_TYPE:
- o.name = mirror.name();
- o.inferredName = mirror.inferredName();
- if (mirror.script()) {
- o.scriptId = mirror.script().id();
- }
- break;
- case MirrorType.ERROR_TYPE:
- case MirrorType.REGEXP_TYPE:
- o.value = mirror.toText();
- break;
- case MirrorType.OBJECT_TYPE:
- o.className = mirror.className();
- break;
- }
- return o;
-};
-
-
-JSONProtocolSerializer.prototype.serialize_ = function(mirror, reference,
- details) {
- // If serializing a reference to a mirror just return the reference and add
- // the mirror to the referenced mirrors.
- if (reference &&
- (mirror.isValue() || mirror.isScript() || mirror.isContext())) {
- if (this.inlineRefs_() && mirror.isValue()) {
- return this.serializeReferenceWithDisplayData_(mirror);
- } else {
- this.add_(mirror);
- return {'ref' : mirror.handle()};
- }
- }
-
- // Collect the JSON property/value pairs.
- var content = {};
-
- // Add the mirror handle.
- if (mirror.isValue() || mirror.isScript() || mirror.isContext()) {
- content.handle = mirror.handle();
- }
-
- // Always add the type.
- content.type = mirror.type();
-
- switch (mirror.type()) {
- case MirrorType.UNDEFINED_TYPE:
- case MirrorType.NULL_TYPE:
- // Undefined and null are represented just by their type.
- break;
-
- case MirrorType.BOOLEAN_TYPE:
- // Boolean values are simply represented by their value.
- content.value = mirror.value();
- break;
-
- case MirrorType.NUMBER_TYPE:
- // Number values are simply represented by their value.
- content.value = NumberToJSON_(mirror.value());
- break;
-
- case MirrorType.STRING_TYPE:
- // String values might have their value cropped to keep down size.
- if (this.maxStringLength_() != -1 &&
- mirror.length() > this.maxStringLength_()) {
- var str = mirror.getTruncatedValue(this.maxStringLength_());
- content.value = str;
- content.fromIndex = 0;
- content.toIndex = this.maxStringLength_();
- } else {
- content.value = mirror.value();
- }
- content.length = mirror.length();
- break;
-
- case MirrorType.SYMBOL_TYPE:
- content.description = mirror.description();
- break;
-
- case MirrorType.OBJECT_TYPE:
- case MirrorType.FUNCTION_TYPE:
- case MirrorType.ERROR_TYPE:
- case MirrorType.REGEXP_TYPE:
- case MirrorType.PROMISE_TYPE:
- case MirrorType.GENERATOR_TYPE:
- // Add object representation.
- this.serializeObject_(mirror, content, details);
- break;
-
- case MirrorType.PROPERTY_TYPE:
- case MirrorType.INTERNAL_PROPERTY_TYPE:
- throw %make_error(kDebugger,
- 'PropertyMirror cannot be serialized independently');
- break;
-
- case MirrorType.FRAME_TYPE:
- // Add object representation.
- this.serializeFrame_(mirror, content);
- break;
-
- case MirrorType.SCOPE_TYPE:
- // Add object representation.
- this.serializeScope_(mirror, content);
- break;
-
- case MirrorType.SCRIPT_TYPE:
- // Script is represented by id, name and source attributes.
- if (mirror.name()) {
- content.name = mirror.name();
- }
- content.id = mirror.id();
- content.lineOffset = mirror.lineOffset();
- content.columnOffset = mirror.columnOffset();
- content.lineCount = mirror.lineCount();
- if (mirror.data()) {
- content.data = mirror.data();
- }
- if (this.includeSource_()) {
- content.source = mirror.source();
- } else {
- var sourceStart = mirror.source().substring(0, 80);
- content.sourceStart = sourceStart;
- }
- content.sourceLength = mirror.source().length;
- content.scriptType = mirror.scriptType();
- content.compilationType = mirror.compilationType();
- // For compilation type eval emit information on the script from which
- // eval was called if a script is present.
- if (mirror.compilationType() == 1 &&
- mirror.evalFromScript()) {
- content.evalFromScript =
- this.serializeReference(mirror.evalFromScript());
- var evalFromLocation = mirror.evalFromLocation();
- if (evalFromLocation) {
- content.evalFromLocation = { line: evalFromLocation.line,
- column: evalFromLocation.column };
- }
- if (mirror.evalFromFunctionName()) {
- content.evalFromFunctionName = mirror.evalFromFunctionName();
- }
- }
- if (mirror.context()) {
- content.context = this.serializeReference(mirror.context());
- }
- break;
-
- case MirrorType.CONTEXT_TYPE:
- content.data = mirror.data();
- break;
- }
-
- // Always add the text representation.
- content.text = mirror.toText();
-
- // Create and return the JSON string.
- return content;
-};
-
-
-/**
- * Serialize object information to the following JSON format.
- *
- * {"className":"<class name>",
- * "constructorFunction":{"ref":<number>},
- * "protoObject":{"ref":<number>},
- * "prototypeObject":{"ref":<number>},
- * "namedInterceptor":<boolean>,
- * "indexedInterceptor":<boolean>,
- * "properties":[<properties>],
- * "internalProperties":[<internal properties>]}
- */
-JSONProtocolSerializer.prototype.serializeObject_ = function(mirror, content,
- details) {
- // Add general object properties.
- content.className = mirror.className();
- content.constructorFunction =
- this.serializeReference(mirror.constructorFunction());
- content.protoObject = this.serializeReference(mirror.protoObject());
- content.prototypeObject = this.serializeReference(mirror.prototypeObject());
-
- // Add flags to indicate whether there are interceptors.
- if (mirror.hasNamedInterceptor()) {
- content.namedInterceptor = true;
- }
- if (mirror.hasIndexedInterceptor()) {
- content.indexedInterceptor = true;
- }
-
- if (mirror.isFunction()) {
- // Add function specific properties.
- content.name = mirror.name();
- if (!IS_UNDEFINED(mirror.inferredName())) {
- content.inferredName = mirror.inferredName();
- }
- content.resolved = mirror.resolved();
- if (mirror.resolved()) {
- content.source = mirror.source();
- }
- if (mirror.script()) {
- content.script = this.serializeReference(mirror.script());
- content.scriptId = mirror.script().id();
-
- serializeLocationFields(mirror.sourceLocation(), content);
- }
-
- content.scopes = [];
- for (var i = 0; i < mirror.scopeCount(); i++) {
- var scope = mirror.scope(i);
- content.scopes.push({
- type: scope.scopeType(),
- index: i
- });
- }
- }
-
- if (mirror.isGenerator()) {
- // Add generator specific properties.
-
- // Either 'running', 'closed', or 'suspended'.
- content.status = mirror.status();
-
- content.func = this.serializeReference(mirror.func())
- content.receiver = this.serializeReference(mirror.receiver())
-
- // If the generator is suspended, the content add line/column properties.
- serializeLocationFields(mirror.sourceLocation(), content);
-
- // TODO(wingo): Also serialize a reference to the context (scope chain).
- }
-
- if (mirror.isDate()) {
- // Add date specific properties.
- content.value = mirror.value();
- }
-
- if (mirror.isPromise()) {
- // Add promise specific properties.
- content.status = mirror.status();
- content.promiseValue = this.serializeReference(mirror.promiseValue());
- }
-
- // Add actual properties - named properties followed by indexed properties.
- var properties = mirror.propertyNames();
- for (var i = 0; i < properties.length; i++) {
- var propertyMirror = mirror.property(properties[i]);
- properties[i] = this.serializeProperty_(propertyMirror);
- if (details) {
- this.add_(propertyMirror.value());
- }
- }
- content.properties = properties;
-
- var internalProperties = mirror.internalProperties();
- if (internalProperties.length > 0) {
- var ip = [];
- for (var i = 0; i < internalProperties.length; i++) {
- ip.push(this.serializeInternalProperty_(internalProperties[i]));
- }
- content.internalProperties = ip;
- }
-};
-
-
-/**
- * Serialize location information to the following JSON format:
- *
- * "position":"<position>",
- * "line":"<line>",
- * "column":"<column>",
- *
- * @param {SourceLocation} location The location to serialize, may be undefined.
- */
-function serializeLocationFields (location, content) {
- if (!location) {
- return;
- }
- content.position = location.position;
- var line = location.line;
- if (!IS_UNDEFINED(line)) {
- content.line = line;
- }
- var column = location.column;
- if (!IS_UNDEFINED(column)) {
- content.column = column;
- }
-}
-
-
-/**
- * Serialize property information to the following JSON format for building the
- * array of properties.
- *
- * {"name":"<property name>",
- * "attributes":<number>,
- * "propertyType":<number>,
- * "ref":<number>}
- *
- * If the attribute for the property is PropertyAttribute.None it is not added.
- * Here are a couple of examples.
- *
- * {"name":"hello","propertyType":0,"ref":1}
- * {"name":"length","attributes":7,"propertyType":3,"ref":2}
- *
- * @param {PropertyMirror} propertyMirror The property to serialize.
- * @returns {Object} Protocol object representing the property.
- */
-JSONProtocolSerializer.prototype.serializeProperty_ = function(propertyMirror) {
- var result = {};
-
- result.name = propertyMirror.name();
- var propertyValue = propertyMirror.value();
- if (this.inlineRefs_() && propertyValue.isValue()) {
- result.value = this.serializeReferenceWithDisplayData_(propertyValue);
- } else {
- if (propertyMirror.attributes() != PropertyAttribute.None) {
- result.attributes = propertyMirror.attributes();
- }
- result.propertyType = propertyMirror.propertyType();
- result.ref = propertyValue.handle();
- }
- return result;
-};
-
-
-/**
- * Serialize internal property information to the following JSON format for
- * building the array of properties.
- *
- * {"name":"<property name>",
- * "ref":<number>}
- *
- * {"name":"[[BoundThis]]","ref":117}
- *
- * @param {InternalPropertyMirror} propertyMirror The property to serialize.
- * @returns {Object} Protocol object representing the property.
- */
-JSONProtocolSerializer.prototype.serializeInternalProperty_ =
- function(propertyMirror) {
- var result = {};
-
- result.name = propertyMirror.name();
- var propertyValue = propertyMirror.value();
- if (this.inlineRefs_() && propertyValue.isValue()) {
- result.value = this.serializeReferenceWithDisplayData_(propertyValue);
- } else {
- result.ref = propertyValue.handle();
- }
- return result;
-};
-
-
-JSONProtocolSerializer.prototype.serializeFrame_ = function(mirror, content) {
- content.index = mirror.index();
- content.receiver = this.serializeReference(mirror.receiver());
- var func = mirror.func();
- content.func = this.serializeReference(func);
- var script = func.script();
- if (script) {
- content.script = this.serializeReference(script);
- }
- content.constructCall = mirror.isConstructCall();
- content.atReturn = mirror.isAtReturn();
- if (mirror.isAtReturn()) {
- content.returnValue = this.serializeReference(mirror.returnValue());
- }
- content.debuggerFrame = mirror.isDebuggerFrame();
- var x = new GlobalArray(mirror.argumentCount());
- for (var i = 0; i < mirror.argumentCount(); i++) {
- var arg = {};
- var argument_name = mirror.argumentName(i);
- if (argument_name) {
- arg.name = argument_name;
- }
- arg.value = this.serializeReference(mirror.argumentValue(i));
- x[i] = arg;
- }
- content.arguments = x;
- var x = new GlobalArray(mirror.localCount());
- for (var i = 0; i < mirror.localCount(); i++) {
- var local = {};
- local.name = mirror.localName(i);
- local.value = this.serializeReference(mirror.localValue(i));
- x[i] = local;
- }
- content.locals = x;
- serializeLocationFields(mirror.sourceLocation(), content);
- var source_line_text = mirror.sourceLineText();
- if (!IS_UNDEFINED(source_line_text)) {
- content.sourceLineText = source_line_text;
- }
-
- content.scopes = [];
- for (var i = 0; i < mirror.scopeCount(); i++) {
- var scope = mirror.scope(i);
- content.scopes.push({
- type: scope.scopeType(),
- index: i
- });
- }
-};
-
-
-JSONProtocolSerializer.prototype.serializeScope_ = function(mirror, content) {
- content.index = mirror.scopeIndex();
- content.frameIndex = mirror.frameIndex();
- content.type = mirror.scopeType();
- content.object = this.inlineRefs_() ?
- this.serializeValue(mirror.scopeObject()) :
- this.serializeReference(mirror.scopeObject());
-};
-
-
-/**
- * Convert a number to a protocol value. For all finite numbers the number
- * itself is returned. For non finite numbers NaN, Infinite and
- * -Infinite the string representation "NaN", "Infinite" or "-Infinite"
- * (not including the quotes) is returned.
- *
- * @param {number} value The number value to convert to a protocol value.
- * @returns {number|string} Protocol value.
- */
-function NumberToJSON_(value) {
- if (IsNaN(value)) {
- return 'NaN';
- }
- if (!NUMBER_IS_FINITE(value)) {
- if (value > 0) {
- return 'Infinity';
- } else {
- return '-Infinity';
- }
- }
- return value;
-}
-
// ----------------------------------------------------------------------------
// Exports
utils.InstallFunctions(global, DONT_ENUM, [
"MakeMirror", MakeMirror,
- "MakeMirrorSerializer", MakeMirrorSerializer,
- "LookupMirror", LookupMirror,
- "ToggleMirrorCache", ToggleMirrorCache,
- "MirrorCacheIsEmpty", MirrorCacheIsEmpty,
]);
utils.InstallConstants(global, [
@@ -3071,13 +2407,4 @@ utils.InstallConstants(global, [
"FrameDetails", FrameDetails,
]);
-// Functions needed by the debugger runtime.
-utils.InstallFunctions(utils, DONT_ENUM, [
- "ClearMirrorCache", ClearMirrorCache
-]);
-
-// Export to debug.js
-utils.Export(function(to) {
- to.MirrorType = MirrorType;
-});
})
diff --git a/deps/v8/src/debug/ppc/debug-ppc.cc b/deps/v8/src/debug/ppc/debug-ppc.cc
index acca19a75e..42be185793 100644
--- a/deps/v8/src/debug/ppc/debug-ppc.cc
+++ b/deps/v8/src/debug/ppc/debug-ppc.cc
@@ -77,14 +77,6 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- // Load padding words on stack.
- __ LoadSmiLiteral(ip, Smi::FromInt(LiveEdit::kFramePaddingValue));
- for (int i = 0; i < LiveEdit::kFramePaddingInitialSize; i++) {
- __ push(ip);
- }
- __ LoadSmiLiteral(ip, Smi::FromInt(LiveEdit::kFramePaddingInitialSize));
- __ push(ip);
-
// Push arguments for DebugBreak call.
if (mode == SAVE_RESULT_REGISTER) {
// Break on return.
@@ -111,50 +103,47 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
}
}
}
-
- // Don't bother removing padding bytes pushed on the stack
- // as the frame is going to be restored right away.
-
// Leave the internal frame.
}
- // Now that the break point has been handled, resume normal execution by
- // jumping to the target address intended by the caller and that was
- // overwritten by the address of DebugBreakXXX.
- ExternalReference after_break_target =
- ExternalReference::debug_after_break_target_address(masm->isolate());
- __ mov(ip, Operand(after_break_target));
- __ LoadP(ip, MemOperand(ip));
- __ JumpToJSEntry(ip);
-}
-
+ __ MaybeDropFrames();
-void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- // Load the function pointer off of our current stack frame.
- __ LoadP(r4, MemOperand(fp, FrameDropperFrameConstants::kFunctionOffset));
-
- // Pop return address and frame
- __ LeaveFrame(StackFrame::INTERNAL);
-
- ParameterCount dummy(0);
- __ CheckDebugHook(r4, no_reg, dummy, dummy);
+ // Return to caller.
+ __ Ret();
+}
- // Load context from the function.
- __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+void DebugCodegen::GenerateHandleDebuggerStatement(MacroAssembler* masm) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kHandleDebuggerStatement, 0);
+ }
+ __ MaybeDropFrames();
- // Clear new.target as a safety measure.
- __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+ // Return to caller.
+ __ Ret();
+}
- // Get function code.
- __ LoadP(ip, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset));
- __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
+ // Frame is being dropped:
+ // - Drop to the target frame specified by r4.
+ // - Look up current function on the frame.
+ // - Leave the frame.
+ // - Restart the frame by calling the function.
- // Re-run JSFunction, r4 is function, cp is context.
- __ Jump(ip);
+ __ mr(fp, r4);
+ __ LoadP(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ LeaveFrame(StackFrame::INTERNAL);
+ __ LoadP(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(
+ r3, FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ mr(r5, r3);
+
+ ParameterCount dummy1(r5);
+ ParameterCount dummy2(r3);
+ __ InvokeFunction(r4, dummy1, dummy2, JUMP_FUNCTION,
+ CheckDebugStepCallWrapper());
}
-
const bool LiveEdit::kFrameDropperSupported = true;
#undef __
diff --git a/deps/v8/src/debug/s390/debug-s390.cc b/deps/v8/src/debug/s390/debug-s390.cc
index a225c72f13..5ef6a60b82 100644
--- a/deps/v8/src/debug/s390/debug-s390.cc
+++ b/deps/v8/src/debug/s390/debug-s390.cc
@@ -82,14 +82,6 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
{
FrameScope scope(masm, StackFrame::INTERNAL);
- // Load padding words on stack.
- __ LoadSmiLiteral(ip, Smi::FromInt(LiveEdit::kFramePaddingValue));
- for (int i = 0; i < LiveEdit::kFramePaddingInitialSize; i++) {
- __ push(ip);
- }
- __ LoadSmiLiteral(ip, Smi::FromInt(LiveEdit::kFramePaddingInitialSize));
- __ push(ip);
-
// Push arguments for DebugBreak call.
if (mode == SAVE_RESULT_REGISTER) {
// Break on return.
@@ -116,46 +108,44 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
}
}
}
-
- // Don't bother removing padding bytes pushed on the stack
- // as the frame is going to be restored right away.
-
// Leave the internal frame.
}
+ __ MaybeDropFrames();
- // Now that the break point has been handled, resume normal execution by
- // jumping to the target address intended by the caller and that was
- // overwritten by the address of DebugBreakXXX.
- ExternalReference after_break_target =
- ExternalReference::debug_after_break_target_address(masm->isolate());
- __ mov(ip, Operand(after_break_target));
- __ LoadP(ip, MemOperand(ip));
- __ JumpToJSEntry(ip);
+ // Return to caller.
+ __ Ret();
}
-void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- // Load the function pointer off of our current stack frame.
- __ LoadP(r3, MemOperand(fp, FrameDropperFrameConstants::kFunctionOffset));
-
- // Pop return address and frame
- __ LeaveFrame(StackFrame::INTERNAL);
-
- ParameterCount dummy(0);
- __ CheckDebugHook(r3, no_reg, dummy, dummy);
-
- // Load context from the function.
- __ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
+void DebugCodegen::GenerateHandleDebuggerStatement(MacroAssembler* masm) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kHandleDebuggerStatement, 0);
+ }
+ __ MaybeDropFrames();
- // Clear new.target as a safety measure.
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ // Return to caller.
+ __ Ret();
+}
- // Get function code.
- __ LoadP(ip, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset));
- __ AddP(ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
+ // Frame is being dropped:
+ // - Drop to the target frame specified by r3.
+ // - Look up current function on the frame.
+ // - Leave the frame.
+ // - Restart the frame by calling the function.
- // Re-run JSFunction, r3 is function, cp is context.
- __ Jump(ip);
+ __ LoadRR(fp, r3);
+ __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ LeaveFrame(StackFrame::INTERNAL);
+ __ LoadP(r2, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(
+ r2, FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ LoadRR(r4, r2);
+
+ ParameterCount dummy1(r4);
+ ParameterCount dummy2(r2);
+ __ InvokeFunction(r3, dummy1, dummy2, JUMP_FUNCTION,
+ CheckDebugStepCallWrapper());
}
const bool LiveEdit::kFrameDropperSupported = true;
diff --git a/deps/v8/src/debug/x64/debug-x64.cc b/deps/v8/src/debug/x64/debug-x64.cc
index afdc3303a2..63689dedab 100644
--- a/deps/v8/src/debug/x64/debug-x64.cc
+++ b/deps/v8/src/debug/x64/debug-x64.cc
@@ -9,6 +9,7 @@
#include "src/assembler.h"
#include "src/codegen.h"
#include "src/debug/liveedit.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -64,12 +65,6 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
{
FrameScope scope(masm, StackFrame::INTERNAL);
- // Load padding words on stack.
- for (int i = 0; i < LiveEdit::kFramePaddingInitialSize; i++) {
- __ Push(Smi::FromInt(LiveEdit::kFramePaddingValue));
- }
- __ Push(Smi::FromInt(LiveEdit::kFramePaddingInitialSize));
-
// Push arguments for DebugBreak call.
if (mode == SAVE_RESULT_REGISTER) {
// Break on return.
@@ -78,12 +73,8 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
// Non-return breaks.
__ Push(masm->isolate()->factory()->the_hole_value());
}
- __ Set(rax, 1);
- __ Move(rbx, ExternalReference(Runtime::FunctionForId(Runtime::kDebugBreak),
- masm->isolate()));
- CEntryStub ceb(masm->isolate(), 1);
- __ CallStub(&ceb);
+ __ CallRuntime(Runtime::kDebugBreak, 1, kDontSaveFPRegs);
if (FLAG_debug_code) {
for (int i = 0; i < kNumJSCallerSaved; ++i) {
@@ -95,55 +86,43 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
}
}
}
-
- // Read current padding counter and skip corresponding number of words.
- __ Pop(kScratchRegister);
- __ SmiToInteger32(kScratchRegister, kScratchRegister);
- __ leap(rsp, Operand(rsp, kScratchRegister, times_pointer_size, 0));
-
// Get rid of the internal frame.
}
- // This call did not replace a call , so there will be an unwanted
- // return address left on the stack. Here we get rid of that.
- __ addp(rsp, Immediate(kPCOnStackSize));
-
- // Now that the break point has been handled, resume normal execution by
- // jumping to the target address intended by the caller and that was
- // overwritten by the address of DebugBreakXXX.
- ExternalReference after_break_target =
- ExternalReference::debug_after_break_target_address(masm->isolate());
- __ Move(kScratchRegister, after_break_target);
- __ Jump(Operand(kScratchRegister, 0));
-}
-
+ __ MaybeDropFrames();
-void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- // We do not know our frame height, but set rsp based on rbp.
- __ leap(rsp, Operand(rbp, FrameDropperFrameConstants::kFunctionOffset));
- __ Pop(rdi); // Function.
- __ addp(rsp,
- Immediate(-FrameDropperFrameConstants::kCodeOffset)); // INTERNAL
- // frame marker
- // and code
- __ popq(rbp);
+ // Return to caller.
+ __ ret(0);
+}
- ParameterCount dummy(0);
- __ CheckDebugHook(rdi, no_reg, dummy, dummy);
+void DebugCodegen::GenerateHandleDebuggerStatement(MacroAssembler* masm) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kHandleDebuggerStatement, 0);
+ }
+ __ MaybeDropFrames();
- // Load context from the function.
- __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ // Return to caller.
+ __ ret(0);
+}
- // Clear new.target as a safety measure.
- __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
+ // Frame is being dropped:
+ // - Drop to the target frame specified by rbx.
+ // - Look up current function on the frame.
+ // - Leave the frame.
+ // - Restart the frame by calling the function.
+ __ movp(rbp, rbx);
+ __ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ leave();
- // Get function code.
__ movp(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movp(rbx, FieldOperand(rbx, SharedFunctionInfo::kCodeOffset));
- __ leap(rbx, FieldOperand(rbx, Code::kHeaderSize));
+ __ LoadSharedFunctionInfoSpecialField(
+ rbx, rbx, SharedFunctionInfo::kFormalParameterCountOffset);
- // Re-run JSFunction, rdi is function, rsi is context.
- __ jmp(rbx);
+ ParameterCount dummy(rbx);
+ __ InvokeFunction(rdi, no_reg, dummy, dummy, JUMP_FUNCTION,
+ CheckDebugStepCallWrapper());
}
const bool LiveEdit::kFrameDropperSupported = true;
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index 97f82cb3e1..8dfe0e181c 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -1396,8 +1396,7 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(
// A marker value is used in place of the context.
output_offset -= kPointerSize;
- intptr_t context = reinterpret_cast<intptr_t>(
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ intptr_t context = StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR);
output_frame->SetFrameSlot(output_offset, context);
DebugPrintOutputSlot(context, frame_index, output_offset,
"context (adaptor sentinel)\n");
@@ -1453,8 +1452,8 @@ void Deoptimizer::DoComputeTailCallerFrame(TranslatedFrame* translated_frame,
Address adaptor_fp_address =
Memory::Address_at(fp_address + CommonFrameConstants::kCallerFPOffset);
- if (Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR) !=
- Memory::Object_at(adaptor_fp_address +
+ if (StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR) !=
+ Memory::intptr_at(adaptor_fp_address +
CommonFrameConstants::kContextOrFrameTypeOffset)) {
return;
}
@@ -1508,6 +1507,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
Builtins* builtins = isolate_->builtins();
Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
+ BailoutId bailout_id = translated_frame->node_id();
unsigned height = translated_frame->height();
unsigned height_in_bytes = height * kPointerSize;
@@ -1520,12 +1520,15 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
height_in_bytes += kPointerSize;
}
- // Skip function.
+ JSFunction* function = JSFunction::cast(value_iterator->GetRawValue());
value_iterator++;
input_index++;
if (trace_scope_ != NULL) {
PrintF(trace_scope_->file(),
- " translating construct stub => height=%d\n", height_in_bytes);
+ " translating construct stub => bailout_id=%d (%s), height=%d\n",
+ bailout_id.ToInt(),
+ bailout_id == BailoutId::ConstructStubCreate() ? "create" : "invoke",
+ height_in_bytes);
}
unsigned fixed_frame_size = ConstructFrameConstants::kFixedFrameSize;
@@ -1589,7 +1592,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
// A marker value is used to mark the frame.
output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
+ value = StackFrame::TypeToMarker(StackFrame::CONSTRUCT);
output_frame->SetFrameSlot(output_offset, value);
DebugPrintOutputSlot(value, frame_index, output_offset,
"typed frame marker\n");
@@ -1609,13 +1612,21 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
PrintF(trace_scope_->file(), "(%d)\n", height - 1);
}
- // The newly allocated object was passed as receiver in the artificial
- // constructor stub environment created by HEnvironment::CopyForInlining().
- output_offset -= kPointerSize;
- value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
- output_frame->SetFrameSlot(output_offset, value);
- DebugPrintOutputSlot(value, frame_index, output_offset,
- "allocated receiver\n");
+ if (bailout_id == BailoutId::ConstructStubCreate()) {
+ // The function was mentioned explicitly in the CONSTRUCT_STUB_FRAME.
+ output_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(function);
+ WriteValueToOutput(function, 0, frame_index, output_offset, "function ");
+ } else {
+ DCHECK(bailout_id == BailoutId::ConstructStubInvoke());
+ // The newly allocated object was passed as receiver in the artificial
+ // constructor stub environment created by HEnvironment::CopyForInlining().
+ output_offset -= kPointerSize;
+ value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
+ output_frame->SetFrameSlot(output_offset, value);
+ DebugPrintOutputSlot(value, frame_index, output_offset,
+ "allocated receiver\n");
+ }
if (is_topmost) {
// Ensure the result is restored back when we return to the stub.
@@ -1632,10 +1643,17 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
CHECK_EQ(0u, output_offset);
- intptr_t pc = reinterpret_cast<intptr_t>(
- construct_stub->instruction_start() +
- isolate_->heap()->construct_stub_deopt_pc_offset()->value());
- output_frame->SetPc(pc);
+ // Compute this frame's PC.
+ DCHECK(bailout_id.IsValidForConstructStub());
+ Address start = construct_stub->instruction_start();
+ int pc_offset =
+ bailout_id == BailoutId::ConstructStubCreate()
+ ? isolate_->heap()->construct_stub_create_deopt_pc_offset()->value()
+ : isolate_->heap()->construct_stub_invoke_deopt_pc_offset()->value();
+ intptr_t pc_value = reinterpret_cast<intptr_t>(start + pc_offset);
+ output_frame->SetPc(pc_value);
+
+ // Update constant pool.
if (FLAG_enable_embedded_constant_pool) {
intptr_t constant_pool_value =
reinterpret_cast<intptr_t>(construct_stub->constant_pool());
@@ -1761,7 +1779,7 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslatedFrame* translated_frame,
// Set the frame type.
output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL));
+ value = StackFrame::TypeToMarker(StackFrame::INTERNAL);
output_frame->SetFrameSlot(output_offset, value);
DebugPrintOutputSlot(value, frame_index, output_offset, "frame type ");
if (trace_scope_ != nullptr) {
@@ -1820,6 +1838,8 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslatedFrame* translated_frame,
intptr_t pc = reinterpret_cast<intptr_t>(
accessor_stub->instruction_start() + offset->value());
output_frame->SetPc(pc);
+
+ // Update constant pool.
if (FLAG_enable_embedded_constant_pool) {
intptr_t constant_pool_value =
reinterpret_cast<intptr_t>(accessor_stub->constant_pool());
@@ -1956,8 +1976,7 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslatedFrame* translated_frame,
// The marker for the typed stack frame
output_frame_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(
- Smi::FromInt(StackFrame::STUB_FAILURE_TRAMPOLINE));
+ value = StackFrame::TypeToMarker(StackFrame::STUB_FAILURE_TRAMPOLINE);
output_frame->SetFrameSlot(output_frame_offset, value);
DebugPrintOutputSlot(value, frame_index, output_frame_offset,
"function (stub failure sentinel)\n");
@@ -2319,9 +2338,10 @@ Handle<ByteArray> TranslationBuffer::CreateByteArray(Factory* factory) {
return result;
}
-
-void Translation::BeginConstructStubFrame(int literal_id, unsigned height) {
+void Translation::BeginConstructStubFrame(BailoutId bailout_id, int literal_id,
+ unsigned height) {
buffer_->Add(CONSTRUCT_STUB_FRAME);
+ buffer_->Add(bailout_id.ToInt());
buffer_->Add(literal_id);
buffer_->Add(height);
}
@@ -2350,8 +2370,7 @@ void Translation::BeginTailCallerFrame(int literal_id) {
buffer_->Add(literal_id);
}
-void Translation::BeginJSFrame(BailoutId node_id,
- int literal_id,
+void Translation::BeginJSFrame(BailoutId node_id, int literal_id,
unsigned height) {
buffer_->Add(JS_FRAME);
buffer_->Add(node_id.ToInt());
@@ -2508,10 +2527,10 @@ int Translation::NumberOfOperandsFor(Opcode opcode) {
return 1;
case BEGIN:
case ARGUMENTS_ADAPTOR_FRAME:
- case CONSTRUCT_STUB_FRAME:
return 2;
case JS_FRAME:
case INTERPRETED_FRAME:
+ case CONSTRUCT_STUB_FRAME:
return 3;
}
FATAL("Unexpected translation type");
@@ -2793,7 +2812,7 @@ TranslatedValue TranslatedValue::NewDuplicateObject(TranslatedState* container,
// static
TranslatedValue TranslatedValue::NewFloat(TranslatedState* container,
- float value) {
+ Float32 value) {
TranslatedValue slot(container, kFloat);
slot.float_value_ = value;
return slot;
@@ -2801,7 +2820,7 @@ TranslatedValue TranslatedValue::NewFloat(TranslatedState* container,
// static
TranslatedValue TranslatedValue::NewDouble(TranslatedState* container,
- double value) {
+ Float64 value) {
TranslatedValue slot(container, kDouble);
slot.double_value_ = value;
return slot;
@@ -2870,12 +2889,12 @@ uint32_t TranslatedValue::uint32_value() const {
return uint32_value_;
}
-float TranslatedValue::float_value() const {
+Float32 TranslatedValue::float_value() const {
DCHECK_EQ(kFloat, kind());
return float_value_;
}
-double TranslatedValue::double_value() const {
+Float64 TranslatedValue::double_value() const {
DCHECK_EQ(kDouble, kind());
return double_value_;
}
@@ -2985,22 +3004,29 @@ void TranslatedValue::MaterializeSimple() {
}
switch (kind()) {
- case kInt32: {
+ case kInt32:
value_ = Handle<Object>(isolate()->factory()->NewNumber(int32_value()));
return;
- }
case kUInt32:
value_ = Handle<Object>(isolate()->factory()->NewNumber(uint32_value()));
return;
- case kFloat:
- value_ = Handle<Object>(isolate()->factory()->NewNumber(float_value()));
+ case kFloat: {
+ double scalar_value = float_value().get_scalar();
+ value_ = Handle<Object>(isolate()->factory()->NewNumber(scalar_value));
return;
+ }
- case kDouble:
- value_ = Handle<Object>(isolate()->factory()->NewNumber(double_value()));
+ case kDouble: {
+ if (double_value().is_hole_nan()) {
+ value_ = isolate()->factory()->hole_nan_value();
+ return;
+ }
+ double scalar_value = double_value().get_scalar();
+ value_ = Handle<Object>(isolate()->factory()->NewNumber(scalar_value));
return;
+ }
case kCapturedObject:
case kDuplicatedObject:
@@ -3048,6 +3074,13 @@ uint32_t TranslatedState::GetUInt32Slot(Address fp, int slot_offset) {
#endif
}
+Float32 TranslatedState::GetFloatSlot(Address fp, int slot_offset) {
+ return Float32::FromBits(GetUInt32Slot(fp, slot_offset));
+}
+
+Float64 TranslatedState::GetDoubleSlot(Address fp, int slot_offset) {
+ return Float64::FromBits(Memory::uint64_at(fp + slot_offset));
+}
void TranslatedValue::Handlify() {
if (kind() == kTagged) {
@@ -3096,9 +3129,11 @@ TranslatedFrame TranslatedFrame::TailCallerFrame(
}
TranslatedFrame TranslatedFrame::ConstructStubFrame(
- SharedFunctionInfo* shared_info, int height) {
- return TranslatedFrame(kConstructStub, shared_info->GetIsolate(), shared_info,
- height);
+ BailoutId bailout_id, SharedFunctionInfo* shared_info, int height) {
+ TranslatedFrame frame(kConstructStub, shared_info->GetIsolate(), shared_info,
+ height);
+ frame.node_id_ = bailout_id;
+ return frame;
}
@@ -3216,15 +3251,18 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
}
case Translation::CONSTRUCT_STUB_FRAME: {
+ BailoutId bailout_id = BailoutId(iterator->Next());
SharedFunctionInfo* shared_info =
SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
int height = iterator->Next();
if (trace_file != nullptr) {
std::unique_ptr<char[]> name = shared_info->DebugName()->ToCString();
PrintF(trace_file, " reading construct stub frame %s", name.get());
- PrintF(trace_file, " => height=%d; inputs:\n", height);
+ PrintF(trace_file, " => bailout_id=%d, height=%d; inputs:\n",
+ bailout_id.ToInt(), height);
}
- return TranslatedFrame::ConstructStubFrame(shared_info, height);
+ return TranslatedFrame::ConstructStubFrame(bailout_id, shared_info,
+ height);
}
case Translation::GETTER_STUB_FRAME: {
@@ -3403,9 +3441,9 @@ TranslatedValue TranslatedState::CreateNextTranslatedValue(
case Translation::FLOAT_REGISTER: {
int input_reg = iterator->Next();
if (registers == nullptr) return TranslatedValue::NewInvalid(this);
- float value = registers->GetFloatRegister(input_reg);
+ Float32 value = registers->GetFloatRegister(input_reg);
if (trace_file != nullptr) {
- PrintF(trace_file, "%e ; %s (float)", value,
+ PrintF(trace_file, "%e ; %s (float)", value.get_scalar(),
RegisterConfiguration::Crankshaft()->GetFloatRegisterName(
input_reg));
}
@@ -3415,9 +3453,9 @@ TranslatedValue TranslatedState::CreateNextTranslatedValue(
case Translation::DOUBLE_REGISTER: {
int input_reg = iterator->Next();
if (registers == nullptr) return TranslatedValue::NewInvalid(this);
- double value = registers->GetDoubleRegister(input_reg);
+ Float64 value = registers->GetDoubleRegister(input_reg);
if (trace_file != nullptr) {
- PrintF(trace_file, "%e ; %s (double)", value,
+ PrintF(trace_file, "%e ; %s (double)", value.get_scalar(),
RegisterConfiguration::Crankshaft()->GetDoubleRegisterName(
input_reg));
}
@@ -3473,9 +3511,9 @@ TranslatedValue TranslatedState::CreateNextTranslatedValue(
case Translation::FLOAT_STACK_SLOT: {
int slot_offset =
OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
- float value = ReadFloatValue(fp + slot_offset);
+ Float32 value = GetFloatSlot(fp, slot_offset);
if (trace_file != nullptr) {
- PrintF(trace_file, "%e ; (float) [fp %c %d] ", value,
+ PrintF(trace_file, "%e ; (float) [fp %c %d] ", value.get_scalar(),
slot_offset < 0 ? '-' : '+', std::abs(slot_offset));
}
return TranslatedValue::NewFloat(this, value);
@@ -3484,9 +3522,9 @@ TranslatedValue TranslatedState::CreateNextTranslatedValue(
case Translation::DOUBLE_STACK_SLOT: {
int slot_offset =
OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
- double value = ReadDoubleValue(fp + slot_offset);
+ Float64 value = GetDoubleSlot(fp, slot_offset);
if (trace_file != nullptr) {
- PrintF(trace_file, "%e ; (double) [fp %c %d] ", value,
+ PrintF(trace_file, "%e ; (double) [fp %c %d] ", value.get_scalar(),
slot_offset < 0 ? '-' : '+', std::abs(slot_offset));
}
return TranslatedValue::NewDouble(this, value);
@@ -3724,6 +3762,8 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
Handle<JSArrayIterator> object = Handle<JSArrayIterator>::cast(
isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
slot->value_ = object;
+ // Initialize the index to zero to make the heap verifier happy.
+ object->set_index(Smi::FromInt(0));
Handle<Object> properties = materializer.FieldAt(value_index);
Handle<Object> elements = materializer.FieldAt(value_index);
Handle<Object> iterated_object = materializer.FieldAt(value_index);
@@ -3736,6 +3776,37 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
object->set_object_map(*iterated_object_map);
return object;
}
+ case JS_STRING_ITERATOR_TYPE: {
+ Handle<JSStringIterator> object = Handle<JSStringIterator>::cast(
+ isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
+ slot->value_ = object;
+ // Initialize the index to zero to make the heap verifier happy.
+ object->set_index(0);
+ Handle<Object> properties = materializer.FieldAt(value_index);
+ Handle<Object> elements = materializer.FieldAt(value_index);
+ Handle<Object> iterated_string = materializer.FieldAt(value_index);
+ Handle<Object> next_index = materializer.FieldAt(value_index);
+ object->set_properties(FixedArray::cast(*properties));
+ object->set_elements(FixedArrayBase::cast(*elements));
+ CHECK(iterated_string->IsString());
+ object->set_string(String::cast(*iterated_string));
+ CHECK(next_index->IsSmi());
+ object->set_index(Smi::cast(*next_index)->value());
+ return object;
+ }
+ case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE: {
+ Handle<JSAsyncFromSyncIterator> object =
+ Handle<JSAsyncFromSyncIterator>::cast(
+ isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
+ slot->value_ = object;
+ Handle<Object> properties = materializer.FieldAt(value_index);
+ Handle<Object> elements = materializer.FieldAt(value_index);
+ Handle<Object> sync_iterator = materializer.FieldAt(value_index);
+ object->set_properties(FixedArray::cast(*properties));
+ object->set_elements(FixedArrayBase::cast(*elements));
+ object->set_sync_iterator(JSReceiver::cast(*sync_iterator));
+ return object;
+ }
case JS_ARRAY_TYPE: {
Handle<JSArray> object = Handle<JSArray>::cast(
isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
@@ -3762,7 +3833,7 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
Handle<Object> prototype = materializer.FieldAt(value_index);
Handle<Object> shared = materializer.FieldAt(value_index);
Handle<Object> context = materializer.FieldAt(value_index);
- Handle<Object> literals = materializer.FieldAt(value_index);
+ Handle<Object> vector_cell = materializer.FieldAt(value_index);
Handle<Object> entry = materializer.FieldAt(value_index);
Handle<Object> next_link = materializer.FieldAt(value_index);
object->ReplaceCode(*isolate_->builtins()->CompileLazy());
@@ -3772,7 +3843,7 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
object->set_prototype_or_initial_map(*prototype);
object->set_shared(SharedFunctionInfo::cast(*shared));
object->set_context(Context::cast(*context));
- object->set_literals(LiteralsArray::cast(*literals));
+ object->set_feedback_vector_cell(Cell::cast(*vector_cell));
CHECK(entry->IsNumber()); // Entry to compile lazy stub.
CHECK(next_link->IsUndefined(isolate_));
return object;
@@ -3837,7 +3908,11 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
for (int i = 0; i < length; ++i) {
Handle<Object> value = materializer.FieldAt(value_index);
CHECK(value->IsNumber());
- double_array->set(i, value->Number());
+ if (value.is_identical_to(isolate_->factory()->hole_nan_value())) {
+ double_array->set_the_hole(isolate_, i);
+ } else {
+ double_array->set(i, value->Number());
+ }
}
}
return object;
@@ -3853,6 +3928,8 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
case SHORT_EXTERNAL_STRING_TYPE:
case SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE:
case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
+ case THIN_STRING_TYPE:
+ case THIN_ONE_BYTE_STRING_TYPE:
case INTERNALIZED_STRING_TYPE:
case ONE_BYTE_INTERNALIZED_STRING_TYPE:
case EXTERNAL_INTERNALIZED_STRING_TYPE:
@@ -3863,7 +3940,6 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
case SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
case SYMBOL_TYPE:
case ODDBALL_TYPE:
- case SIMD128_VALUE_TYPE:
case JS_GLOBAL_OBJECT_TYPE:
case JS_GLOBAL_PROXY_TYPE:
case JS_API_OBJECT_TYPE:
@@ -3882,7 +3958,6 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
case JS_MAP_TYPE:
case JS_SET_ITERATOR_TYPE:
case JS_MAP_ITERATOR_TYPE:
- case JS_STRING_ITERATOR_TYPE:
case JS_WEAK_MAP_TYPE:
case JS_WEAK_SET_TYPE:
case JS_PROMISE_CAPABILITY_TYPE:
@@ -3917,7 +3992,6 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
case ALLOCATION_MEMENTO_TYPE:
case TYPE_FEEDBACK_INFO_TYPE:
case ALIASED_ARGUMENTS_ENTRY_TYPE:
- case BOX_TYPE:
case PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE:
case PROMISE_REACTION_JOB_INFO_TYPE:
case DEBUG_INFO_TYPE:
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index 7d74af9958..5501ca6cf2 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -20,6 +20,37 @@ class DeoptimizedFrameInfo;
class TranslatedState;
class RegisterValues;
+// Safety wrapper for a 32-bit floating-point value to make sure we don't loose
+// the exact bit pattern during deoptimization when passing this value. Note
+// that there is intentionally no way to construct it from a {float} value.
+class Float32 {
+ public:
+ Float32() : bit_pattern_(0) {}
+ uint32_t get_bits() const { return bit_pattern_; }
+ float get_scalar() const { return bit_cast<float>(bit_pattern_); }
+ static Float32 FromBits(uint32_t bits) { return Float32(bits); }
+
+ private:
+ explicit Float32(uint32_t bit_pattern) : bit_pattern_(bit_pattern) {}
+ uint32_t bit_pattern_;
+};
+
+// Safety wrapper for a 64-bit floating-point value to make sure we don't loose
+// the exact bit pattern during deoptimization when passing this value. Note
+// that there is intentionally no way to construct it from a {double} value.
+class Float64 {
+ public:
+ Float64() : bit_pattern_(0) {}
+ uint64_t get_bits() const { return bit_pattern_; }
+ double get_scalar() const { return bit_cast<double>(bit_pattern_); }
+ bool is_hole_nan() const { return bit_pattern_ == kHoleNanInt64; }
+ static Float64 FromBits(uint64_t bits) { return Float64(bits); }
+
+ private:
+ explicit Float64(uint64_t bit_pattern) : bit_pattern_(bit_pattern) {}
+ uint64_t bit_pattern_;
+};
+
class TranslatedValue {
public:
// Allocation-less getter of the value.
@@ -64,8 +95,8 @@ class TranslatedValue {
static TranslatedValue NewDeferredObject(TranslatedState* container,
int length, int object_index);
static TranslatedValue NewDuplicateObject(TranslatedState* container, int id);
- static TranslatedValue NewFloat(TranslatedState* container, float value);
- static TranslatedValue NewDouble(TranslatedState* container, double value);
+ static TranslatedValue NewFloat(TranslatedState* container, Float32 value);
+ static TranslatedValue NewDouble(TranslatedState* container, Float64 value);
static TranslatedValue NewInt32(TranslatedState* container, int32_t value);
static TranslatedValue NewUInt32(TranslatedState* container, uint32_t value);
static TranslatedValue NewBool(TranslatedState* container, uint32_t value);
@@ -98,9 +129,9 @@ class TranslatedValue {
// kind is kInt32.
int32_t int32_value_;
// kind is kFloat
- float float_value_;
+ Float32 float_value_;
// kind is kDouble
- double double_value_;
+ Float64 double_value_;
// kind is kDuplicatedObject or kArgumentsObject or kCapturedObject.
MaterializedObjectInfo materialization_info_;
};
@@ -109,8 +140,8 @@ class TranslatedValue {
Object* raw_literal() const;
int32_t int32_value() const;
uint32_t uint32_value() const;
- float float_value() const;
- double double_value() const;
+ Float32 float_value() const;
+ Float64 double_value() const;
int object_length() const;
int object_index() const;
};
@@ -195,7 +226,8 @@ class TranslatedFrame {
static TranslatedFrame ArgumentsAdaptorFrame(SharedFunctionInfo* shared_info,
int height);
static TranslatedFrame TailCallerFrame(SharedFunctionInfo* shared_info);
- static TranslatedFrame ConstructStubFrame(SharedFunctionInfo* shared_info,
+ static TranslatedFrame ConstructStubFrame(BailoutId bailout_id,
+ SharedFunctionInfo* shared_info,
int height);
static TranslatedFrame CompiledStubFrame(int height, Isolate* isolate) {
return TranslatedFrame(kCompiledStub, isolate, nullptr, height);
@@ -298,6 +330,8 @@ class TranslatedState {
bool GetAdaptedArguments(Handle<JSObject>* result, int frame_index);
static uint32_t GetUInt32Slot(Address fp, int slot_index);
+ static Float32 GetFloatSlot(Address fp, int slot_index);
+ static Float64 GetDoubleSlot(Address fp, int slot_index);
std::vector<TranslatedFrame> frames_;
Isolate* isolate_;
@@ -652,12 +686,12 @@ class RegisterValues {
return registers_[n];
}
- float GetFloatRegister(unsigned n) const {
+ Float32 GetFloatRegister(unsigned n) const {
DCHECK(n < arraysize(float_registers_));
return float_registers_[n];
}
- double GetDoubleRegister(unsigned n) const {
+ Float64 GetDoubleRegister(unsigned n) const {
DCHECK(n < arraysize(double_registers_));
return double_registers_[n];
}
@@ -667,19 +701,24 @@ class RegisterValues {
registers_[n] = value;
}
- void SetFloatRegister(unsigned n, float value) {
+ void SetFloatRegister(unsigned n, Float32 value) {
DCHECK(n < arraysize(float_registers_));
float_registers_[n] = value;
}
- void SetDoubleRegister(unsigned n, double value) {
+ void SetDoubleRegister(unsigned n, Float64 value) {
DCHECK(n < arraysize(double_registers_));
double_registers_[n] = value;
}
+ // Generated code is writing directly into the below arrays, make sure their
+ // element sizes fit what the machine instructions expect.
+ static_assert(sizeof(Float32) == kFloatSize, "size mismatch");
+ static_assert(sizeof(Float64) == kDoubleSize, "size mismatch");
+
intptr_t registers_[Register::kNumRegisters];
- float float_registers_[FloatRegister::kMaxNumRegisters];
- double double_registers_[DoubleRegister::kMaxNumRegisters];
+ Float32 float_registers_[FloatRegister::kMaxNumRegisters];
+ Float64 double_registers_[DoubleRegister::kMaxNumRegisters];
};
@@ -732,7 +771,7 @@ class FrameDescription {
return register_values_.GetRegister(n);
}
- double GetDoubleRegister(unsigned n) const {
+ Float64 GetDoubleRegister(unsigned n) const {
return register_values_.GetDoubleRegister(n);
}
@@ -740,7 +779,7 @@ class FrameDescription {
register_values_.SetRegister(n, value);
}
- void SetDoubleRegister(unsigned n, double value) {
+ void SetDoubleRegister(unsigned n, Float64 value) {
register_values_.SetDoubleRegister(n, value);
}
@@ -936,7 +975,8 @@ class Translation BASE_EMBEDDED {
void BeginCompiledStubFrame(int height);
void BeginArgumentsAdaptorFrame(int literal_id, unsigned height);
void BeginTailCallerFrame(int literal_id);
- void BeginConstructStubFrame(int literal_id, unsigned height);
+ void BeginConstructStubFrame(BailoutId bailout_id, int literal_id,
+ unsigned height);
void BeginGetterStubFrame(int literal_id);
void BeginSetterStubFrame(int literal_id);
void BeginArgumentsObject(int args_length);
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index 7036e1b62d..59accc1c20 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -6,6 +6,7 @@
#include <memory>
+#include "src/assembler-inl.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/debug/debug.h"
@@ -13,6 +14,7 @@
#include "src/disasm.h"
#include "src/ic/ic.h"
#include "src/macro-assembler.h"
+#include "src/objects-inl.h"
#include "src/snapshot/serializer-common.h"
#include "src/string-stream.h"
@@ -201,11 +203,6 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
Code* code = Code::GetCodeFromTargetAddress(relocinfo.target_address());
Code::Kind kind = code->kind();
if (code->is_inline_cache_stub()) {
- if (kind == Code::LOAD_GLOBAL_IC &&
- LoadGlobalICState::GetTypeofMode(code->extra_ic_state()) ==
- INSIDE_TYPEOF) {
- out.AddFormatted(" inside typeof,");
- }
out.AddFormatted(" %s", Code::Kind2String(kind));
if (!IC::ICUseVector(kind)) {
InlineCacheState ic_state = IC::StateFromCode(code);
diff --git a/deps/v8/src/eh-frame.h b/deps/v8/src/eh-frame.h
index 3da4612f2c..bd064eb9cd 100644
--- a/deps/v8/src/eh-frame.h
+++ b/deps/v8/src/eh-frame.h
@@ -8,6 +8,7 @@
#include "src/base/compiler-specific.h"
#include "src/globals.h"
#include "src/macro-assembler.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index 121f6fc2d9..d5acb6670f 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -187,7 +187,7 @@ static void CopyDictionaryToObjectElements(
: SKIP_WRITE_BARRIER;
Isolate* isolate = from->GetIsolate();
for (int i = 0; i < copy_size; i++) {
- int entry = from->FindEntry(i + from_start);
+ int entry = from->FindEntry(isolate, i + from_start);
if (entry != SeededNumberDictionary::kNotFound) {
Object* value = from->ValueAt(entry);
DCHECK(!value->IsTheHole(isolate));
@@ -417,8 +417,9 @@ static void CopyDictionaryToDoubleElements(FixedArrayBase* from_base,
if (to_start + copy_size > to_length) {
copy_size = to_length - to_start;
}
+ Isolate* isolate = from->GetIsolate();
for (int i = 0; i < copy_size; i++) {
- int entry = from->FindEntry(i + from_start);
+ int entry = from->FindEntry(isolate, i + from_start);
if (entry != SeededNumberDictionary::kNotFound) {
to->set(i + to_start, from->ValueAt(entry)->Number());
} else {
@@ -1628,7 +1629,7 @@ class DictionaryElementsAccessor
// Iterate through entire range, as accessing elements out of order is
// observable
for (uint32_t k = start_from; k < length; ++k) {
- int entry = dictionary->FindEntry(k);
+ int entry = dictionary->FindEntry(isolate, k);
if (entry == SeededNumberDictionary::kNotFound) {
if (search_for_hole) return Just(true);
continue;
@@ -1694,7 +1695,7 @@ class DictionaryElementsAccessor
// Iterate through entire range, as accessing elements out of order is
// observable.
for (uint32_t k = start_from; k < length; ++k) {
- int entry = dictionary->FindEntry(k);
+ int entry = dictionary->FindEntry(isolate, k);
if (entry == SeededNumberDictionary::kNotFound) {
continue;
}
@@ -2637,10 +2638,10 @@ class FastDoubleElementsAccessor
FixedArrayBase* elements_base = receiver->elements();
Object* value = *search_value;
- if (start_from >= length) return Just<int64_t>(-1);
-
length = std::min(static_cast<uint32_t>(elements_base->length()), length);
+ if (start_from >= length) return Just<int64_t>(-1);
+
if (!value->IsNumber()) {
return Just<int64_t>(-1);
}
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index e5d4ad9d49..ee6afb2aca 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -7,6 +7,7 @@
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
+#include "src/debug/debug.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
#include "src/runtime-profiler.h"
@@ -469,7 +470,7 @@ Object* StackGuard::HandleInterrupts() {
isolate_->heap()->HandleGCRequest();
}
- if (CheckDebugBreak() || CheckDebugCommand()) {
+ if (CheckDebugBreak()) {
isolate_->debug()->HandleDebugBreak();
}
diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h
index ee5d3a6f73..d5f6371726 100644
--- a/deps/v8/src/execution.h
+++ b/deps/v8/src/execution.h
@@ -7,12 +7,15 @@
#include "src/allocation.h"
#include "src/base/atomicops.h"
-#include "src/handles.h"
+#include "src/globals.h"
#include "src/utils.h"
namespace v8 {
namespace internal {
+template <typename T>
+class Handle;
+
class Execution final : public AllStatic {
public:
// Whether to report pending messages, or keep them pending on the isolate.
@@ -61,7 +64,7 @@ class PostponeInterruptsScope;
// StackGuard contains the handling of the limits that are used to limit the
// number of nested invocations of JavaScript and the stack size used in each
// invocation.
-class StackGuard final {
+class V8_EXPORT_PRIVATE StackGuard final {
public:
// Pass the address beyond which the stack should not grow. The stack
// is assumed to grow downwards.
@@ -86,12 +89,11 @@ class StackGuard final {
#define INTERRUPT_LIST(V) \
V(DEBUGBREAK, DebugBreak, 0) \
- V(DEBUGCOMMAND, DebugCommand, 1) \
- V(TERMINATE_EXECUTION, TerminateExecution, 2) \
- V(GC_REQUEST, GC, 3) \
- V(INSTALL_CODE, InstallCode, 4) \
- V(API_INTERRUPT, ApiInterrupt, 5) \
- V(DEOPT_MARKED_ALLOCATION_SITES, DeoptMarkedAllocationSites, 6)
+ V(TERMINATE_EXECUTION, TerminateExecution, 1) \
+ V(GC_REQUEST, GC, 2) \
+ V(INSTALL_CODE, InstallCode, 3) \
+ V(API_INTERRUPT, ApiInterrupt, 4) \
+ V(DEOPT_MARKED_ALLOCATION_SITES, DeoptMarkedAllocationSites, 5)
#define V(NAME, Name, id) \
inline bool Check##Name() { return CheckInterrupt(NAME); } \
diff --git a/deps/v8/src/external-reference-table.cc b/deps/v8/src/external-reference-table.cc
index 57dc09ce3c..6c72a46e93 100644
--- a/deps/v8/src/external-reference-table.cc
+++ b/deps/v8/src/external-reference-table.cc
@@ -10,6 +10,7 @@
#include "src/counters.h"
#include "src/deoptimizer.h"
#include "src/ic/stub-cache.h"
+#include "src/objects-inl.h"
#if defined(DEBUG) && defined(V8_OS_LINUX) && !defined(V8_OS_ANDROID)
#define SYMBOLIZE_FUNCTION
@@ -253,12 +254,11 @@ void ExternalReferenceTable::AddReferences(Isolate* isolate) {
"double_absolute_constant");
Add(ExternalReference::address_of_double_neg_constant().address(),
"double_negate_constant");
- Add(ExternalReference::promise_hook_address(isolate).address(),
- "Isolate::promise_hook_address()");
+ Add(ExternalReference::promise_hook_or_debug_is_active_address(isolate)
+ .address(),
+ "Isolate::promise_hook_or_debug_is_active_address()");
// Debug addresses
- Add(ExternalReference::debug_after_break_target_address(isolate).address(),
- "Debug::after_break_target_address()");
Add(ExternalReference::debug_is_active_address(isolate).address(),
"Debug::is_active_address()");
Add(ExternalReference::debug_hook_on_function_call_address(isolate).address(),
@@ -267,6 +267,8 @@ void ExternalReferenceTable::AddReferences(Isolate* isolate) {
"Debug::step_in_enabled_address()");
Add(ExternalReference::debug_suspended_generator_address(isolate).address(),
"Debug::step_suspended_generator_address()");
+ Add(ExternalReference::debug_restart_fp_address(isolate).address(),
+ "Debug::restart_fp_address()");
#ifndef V8_INTERPRETED_REGEXP
Add(ExternalReference::re_case_insensitive_compare_uc16(isolate).address(),
@@ -442,6 +444,7 @@ void ExternalReferenceTable::AddDeoptEntries(Isolate* isolate) {
void ExternalReferenceTable::AddApiReferences(Isolate* isolate) {
// Add external references provided by the embedder (a null-terminated
// array).
+ api_refs_start_ = size();
intptr_t* api_external_references = isolate->api_external_references();
if (api_external_references != nullptr) {
while (*api_external_references != 0) {
diff --git a/deps/v8/src/external-reference-table.h b/deps/v8/src/external-reference-table.h
index e1b97f957c..40eccbedf0 100644
--- a/deps/v8/src/external-reference-table.h
+++ b/deps/v8/src/external-reference-table.h
@@ -22,6 +22,7 @@ class ExternalReferenceTable {
uint32_t size() const { return static_cast<uint32_t>(refs_.length()); }
Address address(uint32_t i) { return refs_[i].address; }
const char* name(uint32_t i) { return refs_[i].name; }
+ bool is_api_reference(uint32_t i) { return i >= api_refs_start_; }
#ifdef DEBUG
void increment_count(uint32_t i) { refs_[i].count++; }
@@ -64,6 +65,7 @@ class ExternalReferenceTable {
void AddApiReferences(Isolate* isolate);
List<ExternalReferenceEntry> refs_;
+ uint32_t api_refs_start_;
DISALLOW_COPY_AND_ASSIGN(ExternalReferenceTable);
};
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index f029864de1..79147d622c 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -89,12 +89,6 @@ Handle<HeapObject> Factory::NewFillerObject(int size,
}
-Handle<Box> Factory::NewBox(Handle<Object> value) {
- Handle<Box> result = Handle<Box>::cast(NewStruct(BOX_TYPE));
- result->set_value(*value);
- return result;
-}
-
Handle<PrototypeInfo> Factory::NewPrototypeInfo() {
Handle<PrototypeInfo> result =
Handle<PrototypeInfo>::cast(NewStruct(PROTOTYPE_INFO_TYPE));
@@ -185,6 +179,34 @@ Handle<FixedArray> Factory::NewUninitializedFixedArray(int size) {
FixedArray);
}
+Handle<BoilerplateDescription> Factory::NewBoilerplateDescription(
+ int boilerplate, int all_properties, int index_keys, bool has_seen_proto) {
+ DCHECK_GE(boilerplate, 0);
+ DCHECK_GE(all_properties, index_keys);
+ DCHECK_GE(index_keys, 0);
+
+ int backing_store_size =
+ all_properties - index_keys - (has_seen_proto ? 1 : 0);
+ DCHECK_GE(backing_store_size, 0);
+ bool has_different_size_backing_store = boilerplate != backing_store_size;
+
+ // Space for name and value for every boilerplate property.
+ int size = 2 * boilerplate;
+
+ if (has_different_size_backing_store) {
+ // An extra entry for the backing store size.
+ size++;
+ }
+
+ Handle<BoilerplateDescription> description =
+ Handle<BoilerplateDescription>::cast(NewFixedArray(size, TENURED));
+
+ if (has_different_size_backing_store) {
+ DCHECK((boilerplate != (all_properties - index_keys)) || has_seen_proto);
+ description->set_backing_store_size(isolate(), backing_store_size);
+ }
+ return description;
+}
Handle<FixedArrayBase> Factory::NewFixedDoubleArray(int size,
PretenureFlag pretenure) {
@@ -278,6 +300,7 @@ Handle<String> Factory::InternalizeStringWithKey(StringTableKey* key) {
MaybeHandle<String> Factory::NewStringFromOneByte(Vector<const uint8_t> string,
PretenureFlag pretenure) {
int length = string.length();
+ if (length == 0) return empty_string();
if (length == 1) return LookupSingleCharacterStringFromCode(string[0]);
Handle<SeqOneByteString> result;
ASSIGN_RETURN_ON_EXCEPTION(
@@ -371,6 +394,7 @@ MaybeHandle<String> Factory::NewStringFromUtf8SubString(
MaybeHandle<String> Factory::NewStringFromTwoByte(const uc16* string,
int length,
PretenureFlag pretenure) {
+ if (length == 0) return empty_string();
if (String::IsOneByte(string, length)) {
if (length == 1) return LookupSingleCharacterStringFromCode(string[0]);
Handle<SeqOneByteString> result;
@@ -455,39 +479,63 @@ Handle<String> Factory::NewInternalizedStringImpl(
String);
}
-MaybeHandle<Map> Factory::InternalizedStringMapForString(
- Handle<String> string) {
- // If the string is in new space it cannot be used as internalized.
- if (isolate()->heap()->InNewSpace(*string)) return MaybeHandle<Map>();
+namespace {
- // Find the corresponding internalized string map for strings.
+MaybeHandle<Map> GetInternalizedStringMap(Factory* f, Handle<String> string) {
switch (string->map()->instance_type()) {
case STRING_TYPE:
- return internalized_string_map();
+ return f->internalized_string_map();
case ONE_BYTE_STRING_TYPE:
- return one_byte_internalized_string_map();
+ return f->one_byte_internalized_string_map();
case EXTERNAL_STRING_TYPE:
- return external_internalized_string_map();
+ return f->external_internalized_string_map();
case EXTERNAL_ONE_BYTE_STRING_TYPE:
- return external_one_byte_internalized_string_map();
+ return f->external_one_byte_internalized_string_map();
case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
- return external_internalized_string_with_one_byte_data_map();
+ return f->external_internalized_string_with_one_byte_data_map();
case SHORT_EXTERNAL_STRING_TYPE:
- return short_external_internalized_string_map();
+ return f->short_external_internalized_string_map();
case SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE:
- return short_external_one_byte_internalized_string_map();
+ return f->short_external_one_byte_internalized_string_map();
case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
- return short_external_internalized_string_with_one_byte_data_map();
+ return f->short_external_internalized_string_with_one_byte_data_map();
default: return MaybeHandle<Map>(); // No match found.
}
}
+} // namespace
+
+MaybeHandle<Map> Factory::InternalizedStringMapForString(
+ Handle<String> string) {
+ // If the string is in new space it cannot be used as internalized.
+ if (isolate()->heap()->InNewSpace(*string)) return MaybeHandle<Map>();
+
+ return GetInternalizedStringMap(this, string);
+}
+
+template <class StringClass>
+Handle<StringClass> Factory::InternalizeExternalString(Handle<String> string) {
+ Handle<StringClass> cast_string = Handle<StringClass>::cast(string);
+ Handle<Map> map = GetInternalizedStringMap(this, string).ToHandleChecked();
+ Handle<StringClass> external_string = New<StringClass>(map, OLD_SPACE);
+ external_string->set_length(cast_string->length());
+ external_string->set_hash_field(cast_string->hash_field());
+ external_string->set_resource(nullptr);
+ isolate()->heap()->RegisterExternalString(*external_string);
+ return external_string;
+}
+
+template Handle<ExternalOneByteString>
+ Factory::InternalizeExternalString<ExternalOneByteString>(Handle<String>);
+template Handle<ExternalTwoByteString>
+ Factory::InternalizeExternalString<ExternalTwoByteString>(Handle<String>);
MaybeHandle<SeqOneByteString> Factory::NewRawOneByteString(
int length, PretenureFlag pretenure) {
if (length > String::kMaxLength || length < 0) {
THROW_NEW_ERROR(isolate(), NewInvalidStringLengthError(), SeqOneByteString);
}
+ DCHECK(length > 0); // Use Factory::empty_string() instead.
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateRawOneByteString(length, pretenure),
@@ -500,6 +548,7 @@ MaybeHandle<SeqTwoByteString> Factory::NewRawTwoByteString(
if (length > String::kMaxLength || length < 0) {
THROW_NEW_ERROR(isolate(), NewInvalidStringLengthError(), SeqTwoByteString);
}
+ DCHECK(length > 0); // Use Factory::empty_string() instead.
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateRawTwoByteString(length, pretenure),
@@ -588,6 +637,12 @@ Handle<String> ConcatStringContent(Handle<StringType> result,
MaybeHandle<String> Factory::NewConsString(Handle<String> left,
Handle<String> right) {
+ if (left->IsThinString()) {
+ left = handle(Handle<ThinString>::cast(left)->actual(), isolate());
+ }
+ if (right->IsThinString()) {
+ right = handle(Handle<ThinString>::cast(right)->actual(), isolate());
+ }
int left_length = left->length();
if (left_length == 0) return right;
int right_length = right->length();
@@ -734,6 +789,10 @@ Handle<String> Factory::NewProperSubString(Handle<String> str,
str = Handle<String>(slice->parent(), isolate());
offset += slice->offset();
}
+ if (str->IsThinString()) {
+ Handle<ThinString> thin = Handle<ThinString>::cast(str);
+ str = handle(thin->actual(), isolate());
+ }
DCHECK(str->IsSeqString() || str->IsExternalString());
Handle<Map> map = str->IsOneByteRepresentation()
@@ -755,6 +814,7 @@ MaybeHandle<String> Factory::NewExternalStringFromOneByte(
if (length > static_cast<size_t>(String::kMaxLength)) {
THROW_NEW_ERROR(isolate(), NewInvalidStringLengthError(), String);
}
+ if (length == 0) return empty_string();
Handle<Map> map;
if (resource->IsCompressible()) {
@@ -779,6 +839,7 @@ MaybeHandle<String> Factory::NewExternalStringFromTwoByte(
if (length > static_cast<size_t>(String::kMaxLength)) {
THROW_NEW_ERROR(isolate(), NewInvalidStringLengthError(), String);
}
+ if (length == 0) return empty_string();
// For small strings we check whether the resource contains only
// one byte characters. If yes, we use a different string map.
@@ -1098,7 +1159,6 @@ Handle<FixedTypedArrayBase> Factory::NewFixedTypedArray(
FixedTypedArrayBase);
}
-
Handle<Cell> Factory::NewCell(Handle<Object> value) {
AllowDeferredHandleDereference convert_to_cell;
CALL_HEAP_FUNCTION(
@@ -1107,6 +1167,23 @@ Handle<Cell> Factory::NewCell(Handle<Object> value) {
Cell);
}
+Handle<Cell> Factory::NewNoClosuresCell(Handle<Object> value) {
+ Handle<Cell> cell = NewCell(value);
+ cell->set_map_no_write_barrier(*no_closures_cell_map());
+ return cell;
+}
+
+Handle<Cell> Factory::NewOneClosureCell(Handle<Object> value) {
+ Handle<Cell> cell = NewCell(value);
+ cell->set_map_no_write_barrier(*one_closure_cell_map());
+ return cell;
+}
+
+Handle<Cell> Factory::NewManyClosuresCell(Handle<Object> value) {
+ Handle<Cell> cell = NewCell(value);
+ cell->set_map_no_write_barrier(*many_closures_cell_map());
+ return cell;
+}
Handle<PropertyCell> Factory::NewPropertyCell() {
CALL_HEAP_FUNCTION(
@@ -1250,27 +1327,13 @@ Handle<Object> Factory::NewNumberFromUint(uint32_t value,
return NewHeapNumber(FastUI2D(value), IMMUTABLE, pretenure);
}
-
-Handle<HeapNumber> Factory::NewHeapNumber(double value,
- MutableMode mode,
+Handle<HeapNumber> Factory::NewHeapNumber(MutableMode mode,
PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateHeapNumber(value, mode, pretenure),
- HeapNumber);
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->AllocateHeapNumber(mode, pretenure),
+ HeapNumber);
}
-
-#define SIMD128_NEW_DEF(TYPE, Type, type, lane_count, lane_type) \
- Handle<Type> Factory::New##Type(lane_type lanes[lane_count], \
- PretenureFlag pretenure) { \
- CALL_HEAP_FUNCTION( \
- isolate(), isolate()->heap()->Allocate##Type(lanes, pretenure), Type); \
- }
-SIMD128_TYPES(SIMD128_NEW_DEF)
-#undef SIMD128_NEW_DEF
-
-
Handle<Object> Factory::NewError(Handle<JSFunction> constructor,
MessageTemplate::Template template_index,
Handle<Object> arg0, Handle<Object> arg1,
@@ -1360,7 +1423,7 @@ Handle<JSFunction> Factory::NewFunction(Handle<Map> map,
function->set_code(info->code());
function->set_context(*context_or_undefined);
function->set_prototype_or_initial_map(*the_hole_value());
- function->set_literals(LiteralsArray::cast(*empty_literals_array()));
+ function->set_feedback_vector_cell(*undefined_cell());
function->set_next_function_link(*undefined_value(), SKIP_WRITE_BARRIER);
isolate()->heap()->InitializeJSObjectBody(*function, *map, JSFunction::kSize);
return function;
@@ -1384,6 +1447,7 @@ Handle<JSFunction> Factory::NewFunction(Handle<Map> map,
map.is_identical_to(isolate()->strict_function_without_prototype_map()) ||
// TODO(titzer): wasm_function_map() could be undefined here. ugly.
(*map == context->get(Context::WASM_FUNCTION_MAP_INDEX)) ||
+ (*map == context->get(Context::NATIVE_FUNCTION_MAP_INDEX)) ||
map.is_identical_to(isolate()->proxy_function_map()));
return NewFunction(map, info, context);
}
@@ -1492,12 +1556,12 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
Handle<SharedFunctionInfo> info, Handle<Context> context,
- Handle<LiteralsArray> literals, PretenureFlag pretenure) {
+ Handle<Cell> vector, PretenureFlag pretenure) {
int map_index =
Context::FunctionMapIndex(info->language_mode(), info->kind());
Handle<Map> initial_map(Map::cast(context->native_context()->get(map_index)));
- return NewFunctionFromSharedFunctionInfo(initial_map, info, context, literals,
+ return NewFunctionFromSharedFunctionInfo(initial_map, info, context, vector,
pretenure);
}
@@ -1522,13 +1586,22 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
Handle<Map> initial_map, Handle<SharedFunctionInfo> info,
- Handle<Object> context_or_undefined, Handle<LiteralsArray> literals,
+ Handle<Object> context_or_undefined, Handle<Cell> vector,
PretenureFlag pretenure) {
DCHECK_EQ(JS_FUNCTION_TYPE, initial_map->instance_type());
Handle<JSFunction> result =
NewFunction(initial_map, info, context_or_undefined, pretenure);
- result->set_literals(*literals);
+ // Bump the closure count that is encoded in the vector cell's map.
+ if (vector->map() == *no_closures_cell_map()) {
+ vector->set_map(*one_closure_cell_map());
+ } else if (vector->map() == *one_closure_cell_map()) {
+ vector->set_map(*many_closures_cell_map());
+ } else {
+ DCHECK_EQ(vector->map(), *many_closures_cell_map());
+ }
+
+ result->set_feedback_vector_cell(*vector);
if (info->ic_age() != isolate()->heap()->global_ic_age()) {
info->ResetForNewContext(isolate()->heap()->global_ic_age());
}
@@ -1617,7 +1690,6 @@ Handle<Code> Factory::NewCode(const CodeDesc& desc,
code->set_prologue_offset(prologue_offset);
code->set_constant_pool_offset(desc.instr_size - desc.constant_pool_size);
code->set_builtin_index(-1);
- code->set_protected_instructions(*empty_fixed_array());
if (code->kind() == Code::OPTIMIZED_FUNCTION) {
code->set_marked_for_deoptimization(false);
@@ -1668,9 +1740,9 @@ Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor,
isolate()->heap()->AllocateJSObject(*constructor, pretenure), JSObject);
}
-
-Handle<JSObject> Factory::NewJSObjectWithNullProto() {
- Handle<JSObject> result = NewJSObject(isolate()->object_function());
+Handle<JSObject> Factory::NewJSObjectWithNullProto(PretenureFlag pretenure) {
+ Handle<JSObject> result =
+ NewJSObject(isolate()->object_function(), pretenure);
Handle<Map> new_map =
Map::Copy(Handle<Map>(result->map()), "ObjectWithNullProto");
Map::SetPrototype(new_map, null_value());
@@ -1903,6 +1975,16 @@ Handle<JSIteratorResult> Factory::NewJSIteratorResult(Handle<Object> value,
return js_iter_result;
}
+Handle<JSAsyncFromSyncIterator> Factory::NewJSAsyncFromSyncIterator(
+ Handle<JSReceiver> sync_iterator) {
+ Handle<Map> map(isolate()->native_context()->async_from_sync_iterator_map());
+ Handle<JSAsyncFromSyncIterator> iterator =
+ Handle<JSAsyncFromSyncIterator>::cast(NewJSObjectFromMap(map));
+
+ iterator->set_sync_iterator(*sync_iterator);
+ return iterator;
+}
+
Handle<JSMap> Factory::NewJSMap() {
Handle<Map> map(isolate()->native_context()->js_map_map());
Handle<JSMap> js_map = Handle<JSMap>::cast(NewJSObjectFromMap(map));
@@ -2274,15 +2356,14 @@ void Factory::ReinitializeJSGlobalProxy(Handle<JSGlobalProxy> object,
}
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
- Handle<String> name, int number_of_literals, FunctionKind kind,
- Handle<Code> code, Handle<ScopeInfo> scope_info) {
+ Handle<String> name, FunctionKind kind, Handle<Code> code,
+ Handle<ScopeInfo> scope_info) {
DCHECK(IsValidFunctionKind(kind));
Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(
name, code, IsConstructable(kind, scope_info->language_mode()));
shared->set_scope_info(*scope_info);
shared->set_outer_scope_info(*the_hole_value());
shared->set_kind(kind);
- shared->set_num_literals(number_of_literals);
if (IsGeneratorFunction(kind)) {
shared->set_instance_class_name(isolate()->heap()->Generator_string());
}
@@ -2293,9 +2374,8 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForLiteral(
FunctionLiteral* literal, Handle<Script> script) {
Handle<Code> code = isolate()->builtins()->CompileLazy();
Handle<ScopeInfo> scope_info(ScopeInfo::Empty(isolate()));
- Handle<SharedFunctionInfo> result = NewSharedFunctionInfo(
- literal->name(), literal->materialized_literal_count(), literal->kind(),
- code, scope_info);
+ Handle<SharedFunctionInfo> result =
+ NewSharedFunctionInfo(literal->name(), literal->kind(), code, scope_info);
SharedFunctionInfo::InitFromFunctionLiteral(result, literal);
SharedFunctionInfo::SetScript(result, script);
return result;
@@ -2347,7 +2427,7 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
share->SetConstructStub(*construct_stub);
share->set_instance_class_name(*Object_string());
share->set_script(*undefined_value(), SKIP_WRITE_BARRIER);
- share->set_debug_info(DebugInfo::uninitialized(), SKIP_WRITE_BARRIER);
+ share->set_debug_info(Smi::kZero, SKIP_WRITE_BARRIER);
share->set_function_identifier(*undefined_value(), SKIP_WRITE_BARRIER);
StaticFeedbackVectorSpec empty_spec;
Handle<FeedbackMetadata> feedback_metadata =
@@ -2365,7 +2445,6 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
share->set_length(0);
share->set_internal_formal_parameter_count(0);
share->set_expected_nof_properties(0);
- share->set_num_literals(0);
share->set_start_position_and_type(0);
share->set_end_position(0);
share->set_function_token_position(0);
@@ -2451,6 +2530,7 @@ Handle<String> Factory::NumberToString(Handle<Object> number,
Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
+ DCHECK(!shared->HasDebugInfo());
// Allocate initial fixed array for active break points before allocating the
// debug info object to avoid allocation while setting up the debug info
// object.
@@ -2470,6 +2550,7 @@ Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
Handle<DebugInfo> debug_info =
Handle<DebugInfo>::cast(NewStruct(DEBUG_INFO_TYPE));
debug_info->set_shared(*shared);
+ debug_info->set_debugger_hints(shared->debugger_hints());
debug_info->set_debug_bytecode_array(*maybe_debug_bytecode_array);
debug_info->set_break_points(*break_points);
@@ -2479,6 +2560,13 @@ Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
return debug_info;
}
+Handle<BreakPointInfo> Factory::NewBreakPointInfo(int source_position) {
+ Handle<BreakPointInfo> new_break_point_info =
+ Handle<BreakPointInfo>::cast(NewStruct(BREAK_POINT_INFO_TYPE));
+ new_break_point_info->set_source_position(source_position);
+ new_break_point_info->set_break_point_objects(*undefined_value());
+ return new_break_point_info;
+}
Handle<JSObject> Factory::NewArgumentsObject(Handle<JSFunction> callee,
int length) {
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index 6a95363801..50d0137fa0 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -9,10 +9,14 @@
#include "src/globals.h"
#include "src/isolate.h"
#include "src/messages.h"
+#include "src/objects/scope-info.h"
namespace v8 {
namespace internal {
+class BoilerplateDescription;
+class ConstantElementsPair;
+
enum FunctionMode {
// With prototype.
FUNCTION_WITH_WRITEABLE_PROTOTYPE,
@@ -48,6 +52,13 @@ class V8_EXPORT_PRIVATE Factory final {
// Allocates an uninitialized fixed array. It must be filled by the caller.
Handle<FixedArray> NewUninitializedFixedArray(int size);
+ // Allocates a fixed array for name-value pairs of boilerplate properties and
+ // calculates the number of properties we need to store in the backing store.
+ Handle<BoilerplateDescription> NewBoilerplateDescription(int boilerplate,
+ int all_properties,
+ int index_keys,
+ bool has_seen_proto);
+
// Allocate a new uninitialized fixed double array.
// The function returns a pre-allocated empty fixed array for capacity = 0,
// so the return type must be the general fixed array class.
@@ -66,9 +77,6 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<OrderedHashSet> NewOrderedHashSet();
Handle<OrderedHashMap> NewOrderedHashMap();
- // Create a new boxed value.
- Handle<Box> NewBox(Handle<Object> value);
-
// Create a new PrototypeInfo struct.
Handle<PrototypeInfo> NewPrototypeInfo();
@@ -221,6 +229,11 @@ class V8_EXPORT_PRIVATE Factory final {
MUST_USE_RESULT MaybeHandle<Map> InternalizedStringMapForString(
Handle<String> string);
+ // Creates an internalized copy of an external string. |string| must be
+ // of type StringClass.
+ template <class StringClass>
+ Handle<StringClass> InternalizeExternalString(Handle<String> string);
+
// Allocates and partially initializes an one-byte or two-byte String. The
// characters of the string are uninitialized. Currently used in regexp code
// only, where they are pretenured.
@@ -326,6 +339,8 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<Script> NewScript(Handle<String> source);
+ Handle<BreakPointInfo> NewBreakPointInfo(int source_position);
+
// Foreign objects are pretenured when allocated by the bootstrapper.
Handle<Foreign> NewForeign(Address addr,
PretenureFlag pretenure = NOT_TENURED);
@@ -355,6 +370,10 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<WeakCell> NewWeakCell(Handle<HeapObject> value);
+ Handle<Cell> NewNoClosuresCell(Handle<Object> value);
+ Handle<Cell> NewOneClosureCell(Handle<Object> value);
+ Handle<Cell> NewManyClosuresCell(Handle<Object> value);
+
Handle<TransitionArray> NewTransitionArray(int capacity);
// Allocate a tenured AllocationSite. It's payload is null.
@@ -424,21 +443,28 @@ class V8_EXPORT_PRIVATE Factory final {
}
return NewNumber(static_cast<double>(value), pretenure);
}
- Handle<HeapNumber> NewHeapNumber(double value,
- MutableMode mode = IMMUTABLE,
- PretenureFlag pretenure = NOT_TENURED);
-
+ Handle<HeapNumber> NewHeapNumber(double value, MutableMode mode = IMMUTABLE,
+ PretenureFlag pretenure = NOT_TENURED) {
+ Handle<HeapNumber> heap_number = NewHeapNumber(mode, pretenure);
+ heap_number->set_value(value);
+ return heap_number;
+ }
+ Handle<HeapNumber> NewHeapNumberFromBits(
+ uint64_t bits, MutableMode mode = IMMUTABLE,
+ PretenureFlag pretenure = NOT_TENURED) {
+ Handle<HeapNumber> heap_number = NewHeapNumber(mode, pretenure);
+ heap_number->set_value_as_bits(bits);
+ return heap_number;
+ }
+ // Creates mutable heap number object with value field set to hole NaN.
Handle<HeapNumber> NewMutableHeapNumber(
PretenureFlag pretenure = NOT_TENURED) {
- double hole_nan = bit_cast<double>(kHoleNanInt64);
- return NewHeapNumber(hole_nan, MUTABLE, pretenure);
+ return NewHeapNumberFromBits(kHoleNanInt64, MUTABLE, pretenure);
}
-#define SIMD128_NEW_DECL(TYPE, Type, type, lane_count, lane_type) \
- Handle<Type> New##Type(lane_type lanes[lane_count], \
- PretenureFlag pretenure = NOT_TENURED);
- SIMD128_TYPES(SIMD128_NEW_DECL)
-#undef SIMD128_NEW_DECL
+ // Creates heap number object with not yet set value field.
+ Handle<HeapNumber> NewHeapNumber(MutableMode mode,
+ PretenureFlag pretenure = NOT_TENURED);
Handle<JSWeakMap> NewJSWeakMap();
@@ -449,7 +475,8 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<JSObject> NewJSObject(Handle<JSFunction> constructor,
PretenureFlag pretenure = NOT_TENURED);
// JSObject without a prototype.
- Handle<JSObject> NewJSObjectWithNullProto();
+ Handle<JSObject> NewJSObjectWithNullProto(
+ PretenureFlag pretenure = NOT_TENURED);
// Global objects are pretenured and initialized based on a constructor.
Handle<JSGlobalObject> NewJSGlobalObject(Handle<JSFunction> constructor);
@@ -531,6 +558,8 @@ class V8_EXPORT_PRIVATE Factory final {
size_t byte_offset, size_t byte_length);
Handle<JSIteratorResult> NewJSIteratorResult(Handle<Object> value, bool done);
+ Handle<JSAsyncFromSyncIterator> NewJSAsyncFromSyncIterator(
+ Handle<JSReceiver> sync_iterator);
Handle<JSMap> NewJSMap();
Handle<JSSet> NewJSSet();
@@ -571,12 +600,12 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
Handle<Map> initial_map, Handle<SharedFunctionInfo> function_info,
- Handle<Object> context_or_undefined, Handle<LiteralsArray> literals,
+ Handle<Object> context_or_undefined, Handle<Cell> vector,
PretenureFlag pretenure = TENURED);
Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
Handle<SharedFunctionInfo> function_info, Handle<Context> context,
- Handle<LiteralsArray> literals, PretenureFlag pretenure = TENURED);
+ Handle<Cell> vector, PretenureFlag pretenure = TENURED);
Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
Handle<Map> initial_map, Handle<SharedFunctionInfo> function_info,
@@ -706,8 +735,8 @@ class V8_EXPORT_PRIVATE Factory final {
// Allocates a new SharedFunctionInfo object.
Handle<SharedFunctionInfo> NewSharedFunctionInfo(
- Handle<String> name, int number_of_literals, FunctionKind kind,
- Handle<Code> code, Handle<ScopeInfo> scope_info);
+ Handle<String> name, FunctionKind kind, Handle<Code> code,
+ Handle<ScopeInfo> scope_info);
Handle<SharedFunctionInfo> NewSharedFunctionInfo(Handle<String> name,
MaybeHandle<Code> code,
bool is_constructor);
diff --git a/deps/v8/src/fast-accessor-assembler.cc b/deps/v8/src/fast-accessor-assembler.cc
index e09db74666..6e7b49ea0b 100644
--- a/deps/v8/src/fast-accessor-assembler.cc
+++ b/deps/v8/src/fast-accessor-assembler.cc
@@ -8,6 +8,7 @@
#include "src/code-stub-assembler.h"
#include "src/code-stubs.h" // For CallApiCallbackStub.
#include "src/handles-inl.h"
+#include "src/objects-inl.h"
#include "src/objects.h" // For FAA::LoadInternalField impl.
namespace v8 {
@@ -184,12 +185,7 @@ FastAccessorAssembler::ValueId FastAccessorAssembler::Call(
CallInterfaceDescriptor descriptor = stub.GetCallInterfaceDescriptor();
DCHECK_EQ(4, descriptor.GetParameterCount());
DCHECK_EQ(0, descriptor.GetStackParameterCount());
- // TODO(vogelheim): There is currently no clean way to retrieve the context
- // parameter for a stub and the implementation details are hidden in
- // compiler/*. The context_paramter is computed as:
- // Linkage::GetJSCallContextParamIndex(descriptor->JSParameterCount())
- const int kContextParameter = 3;
- Node* context = assembler_->Parameter(kContextParameter);
+ Node* context = assembler_->GetJSContextParameter();
Node* target = assembler_->HeapConstant(stub.GetCode());
Node* call = assembler_->CallStub(
@@ -218,7 +214,7 @@ void FastAccessorAssembler::CheckIsJSObjectOrJump(ValueId value_id,
&is_jsobject);
// JSApiObject?.
- assembler_->GotoUnless(
+ assembler_->GotoIfNot(
assembler_->Word32Equal(instance_type, assembler_->Int32Constant(
Internals::kJSApiObjectType)),
FromId(label_id));
diff --git a/deps/v8/src/fast-accessor-assembler.h b/deps/v8/src/fast-accessor-assembler.h
index c1380c9025..f51d5a79e8 100644
--- a/deps/v8/src/fast-accessor-assembler.h
+++ b/deps/v8/src/fast-accessor-assembler.h
@@ -11,7 +11,7 @@
#include "include/v8-experimental.h"
#include "src/base/macros.h"
-#include "src/handles.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
@@ -19,7 +19,8 @@ namespace internal {
class Code;
class CodeStubAssembler;
class Isolate;
-class Zone;
+template <typename T>
+class MaybeHandle;
namespace compiler {
class Node;
diff --git a/deps/v8/src/feedback-vector-inl.h b/deps/v8/src/feedback-vector-inl.h
index b194751a54..45c2cd2601 100644
--- a/deps/v8/src/feedback-vector-inl.h
+++ b/deps/v8/src/feedback-vector-inl.h
@@ -13,15 +13,14 @@ namespace v8 {
namespace internal {
template <typename Derived>
-FeedbackVectorSlot FeedbackVectorSpecBase<Derived>::AddSlot(
- FeedbackVectorSlotKind kind) {
+FeedbackSlot FeedbackVectorSpecBase<Derived>::AddSlot(FeedbackSlotKind kind) {
int slot = This()->slots();
int entries_per_slot = FeedbackMetadata::GetSlotSize(kind);
This()->append(kind);
for (int i = 1; i < entries_per_slot; i++) {
- This()->append(FeedbackVectorSlotKind::INVALID);
+ This()->append(FeedbackSlotKind::kInvalid);
}
- return FeedbackVectorSlot(slot);
+ return FeedbackSlot(slot);
}
// static
@@ -47,43 +46,35 @@ FeedbackVector* FeedbackVector::cast(Object* obj) {
return reinterpret_cast<FeedbackVector*>(obj);
}
-
-int FeedbackMetadata::GetSlotSize(FeedbackVectorSlotKind kind) {
- DCHECK_NE(FeedbackVectorSlotKind::INVALID, kind);
- DCHECK_NE(FeedbackVectorSlotKind::KINDS_NUMBER, kind);
- if (kind == FeedbackVectorSlotKind::GENERAL ||
- kind == FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC ||
- kind == FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC ||
- kind == FeedbackVectorSlotKind::CREATE_CLOSURE) {
- return 1;
- }
-
- return 2;
-}
-
-bool FeedbackMetadata::SlotRequiresParameter(FeedbackVectorSlotKind kind) {
+int FeedbackMetadata::GetSlotSize(FeedbackSlotKind kind) {
switch (kind) {
- case FeedbackVectorSlotKind::CREATE_CLOSURE:
- return true;
-
- case FeedbackVectorSlotKind::CALL_IC:
- case FeedbackVectorSlotKind::LOAD_IC:
- case FeedbackVectorSlotKind::LOAD_GLOBAL_IC:
- case FeedbackVectorSlotKind::KEYED_LOAD_IC:
- case FeedbackVectorSlotKind::STORE_IC:
- case FeedbackVectorSlotKind::KEYED_STORE_IC:
- case FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC:
- case FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC:
- case FeedbackVectorSlotKind::STORE_DATA_PROPERTY_IN_LITERAL_IC:
- case FeedbackVectorSlotKind::GENERAL:
- case FeedbackVectorSlotKind::INVALID:
- return false;
-
- case FeedbackVectorSlotKind::KINDS_NUMBER:
+ case FeedbackSlotKind::kGeneral:
+ case FeedbackSlotKind::kCompareOp:
+ case FeedbackSlotKind::kBinaryOp:
+ case FeedbackSlotKind::kToBoolean:
+ case FeedbackSlotKind::kLiteral:
+ case FeedbackSlotKind::kCreateClosure:
+ return 1;
+
+ case FeedbackSlotKind::kCall:
+ case FeedbackSlotKind::kLoadProperty:
+ case FeedbackSlotKind::kLoadGlobalInsideTypeof:
+ case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
+ case FeedbackSlotKind::kLoadKeyed:
+ case FeedbackSlotKind::kStoreNamedSloppy:
+ case FeedbackSlotKind::kStoreNamedStrict:
+ case FeedbackSlotKind::kStoreOwnNamed:
+ case FeedbackSlotKind::kStoreKeyedSloppy:
+ case FeedbackSlotKind::kStoreKeyedStrict:
+ case FeedbackSlotKind::kStoreDataPropertyInLiteral:
+ return 2;
+
+ case FeedbackSlotKind::kInvalid:
+ case FeedbackSlotKind::kKindsNumber:
+ UNREACHABLE();
break;
}
- UNREACHABLE();
- return false;
+ return 1;
}
bool FeedbackVector::is_empty() const {
@@ -95,25 +86,33 @@ int FeedbackVector::slot_count() const {
}
FeedbackMetadata* FeedbackVector::metadata() const {
- return FeedbackMetadata::cast(get(kMetadataIndex));
+ return shared_function_info()->feedback_metadata();
+}
+
+SharedFunctionInfo* FeedbackVector::shared_function_info() const {
+ return SharedFunctionInfo::cast(get(kSharedFunctionInfoIndex));
}
int FeedbackVector::invocation_count() const {
return Smi::cast(get(kInvocationCountIndex))->value();
}
+void FeedbackVector::clear_invocation_count() {
+ set(kInvocationCountIndex, Smi::kZero);
+}
+
// Conversion from an integer index to either a slot or an ic slot.
// static
-FeedbackVectorSlot FeedbackVector::ToSlot(int index) {
+FeedbackSlot FeedbackVector::ToSlot(int index) {
DCHECK_GE(index, kReservedIndexCount);
- return FeedbackVectorSlot(index - kReservedIndexCount);
+ return FeedbackSlot(index - kReservedIndexCount);
}
-Object* FeedbackVector::Get(FeedbackVectorSlot slot) const {
+Object* FeedbackVector::Get(FeedbackSlot slot) const {
return get(GetIndex(slot));
}
-void FeedbackVector::Set(FeedbackVectorSlot slot, Object* value,
+void FeedbackVector::Set(FeedbackSlot slot, Object* value,
WriteBarrierMode mode) {
set(GetIndex(slot), value, mode);
}
@@ -153,6 +152,8 @@ CompareOperationHint CompareOperationHintFromFeedback(int type_feedback) {
return CompareOperationHint::kInternalizedString;
case CompareOperationFeedback::kString:
return CompareOperationHint::kString;
+ case CompareOperationFeedback::kReceiver:
+ return CompareOperationHint::kReceiver;
default:
return CompareOperationHint::kAny;
}
@@ -170,61 +171,74 @@ void FeedbackVector::ComputeCounts(int* with_type_info, int* generic,
int total = 0;
FeedbackMetadataIterator iter(metadata());
while (iter.HasNext()) {
- FeedbackVectorSlot slot = iter.Next();
- FeedbackVectorSlotKind kind = iter.kind();
+ FeedbackSlot slot = iter.Next();
+ FeedbackSlotKind kind = iter.kind();
Object* const obj = Get(slot);
switch (kind) {
- case FeedbackVectorSlotKind::CALL_IC:
- case FeedbackVectorSlotKind::LOAD_IC:
- case FeedbackVectorSlotKind::LOAD_GLOBAL_IC:
- case FeedbackVectorSlotKind::KEYED_LOAD_IC:
- case FeedbackVectorSlotKind::STORE_IC:
- case FeedbackVectorSlotKind::KEYED_STORE_IC:
- case FeedbackVectorSlotKind::STORE_DATA_PROPERTY_IN_LITERAL_IC: {
+ case FeedbackSlotKind::kCall:
+ case FeedbackSlotKind::kLoadProperty:
+ case FeedbackSlotKind::kLoadGlobalInsideTypeof:
+ case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
+ case FeedbackSlotKind::kLoadKeyed:
+ case FeedbackSlotKind::kStoreNamedSloppy:
+ case FeedbackSlotKind::kStoreNamedStrict:
+ case FeedbackSlotKind::kStoreOwnNamed:
+ case FeedbackSlotKind::kStoreKeyedSloppy:
+ case FeedbackSlotKind::kStoreKeyedStrict:
+ case FeedbackSlotKind::kStoreDataPropertyInLiteral: {
if (obj->IsWeakCell() || obj->IsFixedArray() || obj->IsString()) {
with++;
} else if (obj == megamorphic_sentinel) {
gen++;
+ if (code_is_interpreted) with++;
}
total++;
break;
}
- case FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC:
- case FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC: {
+ case FeedbackSlotKind::kBinaryOp:
// If we are not running interpreted code, we need to ignore the special
// IC slots for binaryop/compare used by the interpreter.
// TODO(mvstanton): Remove code_is_interpreted when full code is retired
// from service.
if (code_is_interpreted) {
int const feedback = Smi::cast(obj)->value();
- if (kind == FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC) {
- CompareOperationHint hint =
- CompareOperationHintFromFeedback(feedback);
- if (hint == CompareOperationHint::kAny) {
- gen++;
- } else if (hint != CompareOperationHint::kNone) {
- with++;
- }
- } else {
- DCHECK_EQ(FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC, kind);
- BinaryOperationHint hint =
- BinaryOperationHintFromFeedback(feedback);
- if (hint == BinaryOperationHint::kAny) {
- gen++;
- } else if (hint != BinaryOperationHint::kNone) {
- with++;
- }
+ BinaryOperationHint hint = BinaryOperationHintFromFeedback(feedback);
+ if (hint == BinaryOperationHint::kAny) {
+ gen++;
+ }
+ if (hint != BinaryOperationHint::kNone) {
+ with++;
+ }
+ total++;
+ }
+ break;
+ case FeedbackSlotKind::kCompareOp: {
+ // If we are not running interpreted code, we need to ignore the special
+ // IC slots for binaryop/compare used by the interpreter.
+ // TODO(mvstanton): Remove code_is_interpreted when full code is retired
+ // from service.
+ if (code_is_interpreted) {
+ int const feedback = Smi::cast(obj)->value();
+ CompareOperationHint hint =
+ CompareOperationHintFromFeedback(feedback);
+ if (hint == CompareOperationHint::kAny) {
+ gen++;
+ }
+ if (hint != CompareOperationHint::kNone) {
+ with++;
}
total++;
}
break;
}
- case FeedbackVectorSlotKind::CREATE_CLOSURE:
- case FeedbackVectorSlotKind::GENERAL:
+ case FeedbackSlotKind::kToBoolean:
+ case FeedbackSlotKind::kCreateClosure:
+ case FeedbackSlotKind::kGeneral:
+ case FeedbackSlotKind::kLiteral:
break;
- case FeedbackVectorSlotKind::INVALID:
- case FeedbackVectorSlotKind::KINDS_NUMBER:
+ case FeedbackSlotKind::kInvalid:
+ case FeedbackSlotKind::kKindsNumber:
UNREACHABLE();
break;
}
@@ -255,11 +269,11 @@ bool FeedbackMetadataIterator::HasNext() const {
return next_slot_.ToInt() < metadata()->slot_count();
}
-FeedbackVectorSlot FeedbackMetadataIterator::Next() {
+FeedbackSlot FeedbackMetadataIterator::Next() {
DCHECK(HasNext());
cur_slot_ = next_slot_;
slot_kind_ = metadata()->GetKind(cur_slot_);
- next_slot_ = FeedbackVectorSlot(next_slot_.ToInt() + entry_size());
+ next_slot_ = FeedbackSlot(next_slot_.ToInt() + entry_size());
return cur_slot_;
}
@@ -271,7 +285,7 @@ Object* FeedbackNexus::GetFeedback() const { return vector()->Get(slot()); }
Object* FeedbackNexus::GetFeedbackExtra() const {
#ifdef DEBUG
- FeedbackVectorSlotKind kind = vector()->GetKind(slot());
+ FeedbackSlotKind kind = vector()->GetKind(slot());
DCHECK_LT(1, FeedbackMetadata::GetSlotSize(kind));
#endif
int extra_index = vector()->GetIndex(slot()) + 1;
@@ -285,7 +299,7 @@ void FeedbackNexus::SetFeedback(Object* feedback, WriteBarrierMode mode) {
void FeedbackNexus::SetFeedbackExtra(Object* feedback_extra,
WriteBarrierMode mode) {
#ifdef DEBUG
- FeedbackVectorSlotKind kind = vector()->GetKind(slot());
+ FeedbackSlotKind kind = vector()->GetKind(slot());
DCHECK_LT(1, FeedbackMetadata::GetSlotSize(kind));
#endif
int index = vector()->GetIndex(slot()) + 1;
diff --git a/deps/v8/src/feedback-vector.cc b/deps/v8/src/feedback-vector.cc
index 795cfb446f..4003068e9b 100644
--- a/deps/v8/src/feedback-vector.cc
+++ b/deps/v8/src/feedback-vector.cc
@@ -22,24 +22,17 @@ static bool IsPropertyNameFeedback(Object* feedback) {
symbol != heap->megamorphic_symbol();
}
-std::ostream& operator<<(std::ostream& os, FeedbackVectorSlotKind kind) {
+std::ostream& operator<<(std::ostream& os, FeedbackSlotKind kind) {
return os << FeedbackMetadata::Kind2String(kind);
}
-FeedbackVectorSlotKind FeedbackMetadata::GetKind(
- FeedbackVectorSlot slot) const {
+FeedbackSlotKind FeedbackMetadata::GetKind(FeedbackSlot slot) const {
int index = VectorICComputer::index(kReservedIndexCount, slot.ToInt());
int data = Smi::cast(get(index))->value();
return VectorICComputer::decode(data, slot.ToInt());
}
-int FeedbackMetadata::GetParameter(int parameter_index) const {
- FixedArray* parameters = FixedArray::cast(get(kParametersTableIndex));
- return Smi::cast(parameters->get(parameter_index))->value();
-}
-
-void FeedbackMetadata::SetKind(FeedbackVectorSlot slot,
- FeedbackVectorSlotKind kind) {
+void FeedbackMetadata::SetKind(FeedbackSlot slot, FeedbackSlotKind kind) {
int index = VectorICComputer::index(kReservedIndexCount, slot.ToInt());
int data = Smi::cast(get(index))->value();
int new_data = VectorICComputer::encode(data, slot.ToInt(), kind);
@@ -65,11 +58,11 @@ Handle<FeedbackMetadata> FeedbackMetadata::New(Isolate* isolate,
}
#ifdef DEBUG
for (int i = 0; i < slot_count;) {
- FeedbackVectorSlotKind kind = spec->GetKind(i);
+ FeedbackSlotKind kind = spec->GetKind(FeedbackSlot(i));
int entry_size = FeedbackMetadata::GetSlotSize(kind);
for (int j = 1; j < entry_size; j++) {
- FeedbackVectorSlotKind kind = spec->GetKind(i + j);
- DCHECK_EQ(FeedbackVectorSlotKind::INVALID, kind);
+ FeedbackSlotKind kind = spec->GetKind(FeedbackSlot(i + j));
+ DCHECK_EQ(FeedbackSlotKind::kInvalid, kind);
}
i += entry_size;
}
@@ -85,20 +78,9 @@ Handle<FeedbackMetadata> FeedbackMetadata::New(Isolate* isolate,
Handle<FeedbackMetadata> metadata = Handle<FeedbackMetadata>::cast(array);
for (int i = 0; i < slot_count; i++) {
- FeedbackVectorSlotKind kind = spec->GetKind(i);
- metadata->SetKind(FeedbackVectorSlot(i), kind);
- }
-
- if (spec->parameters_count() > 0) {
- const int parameters_count = spec->parameters_count();
- Handle<FixedArray> params_array =
- factory->NewFixedArray(parameters_count, TENURED);
- for (int i = 0; i < parameters_count; i++) {
- params_array->set(i, Smi::FromInt(spec->GetParameter(i)));
- }
- metadata->set(kParametersTableIndex, *params_array);
- } else {
- metadata->set(kParametersTableIndex, *factory->empty_fixed_array());
+ FeedbackSlot slot(i);
+ FeedbackSlotKind kind = spec->GetKind(slot);
+ metadata->SetKind(slot, kind);
}
// It's important that the FeedbackMetadata have a COW map, since it's
@@ -118,262 +100,245 @@ bool FeedbackMetadata::SpecDiffersFrom(
}
int slots = slot_count();
- int parameter_index = 0;
for (int i = 0; i < slots;) {
- FeedbackVectorSlot slot(i);
- FeedbackVectorSlotKind kind = GetKind(slot);
+ FeedbackSlot slot(i);
+ FeedbackSlotKind kind = GetKind(slot);
int entry_size = FeedbackMetadata::GetSlotSize(kind);
- if (kind != other_spec->GetKind(i)) {
+ if (kind != other_spec->GetKind(slot)) {
return true;
}
- if (SlotRequiresParameter(kind)) {
- int parameter = GetParameter(parameter_index);
- int other_parameter = other_spec->GetParameter(parameter_index);
- if (parameter != other_parameter) {
- return true;
- }
- parameter_index++;
- }
i += entry_size;
}
return false;
}
-bool FeedbackMetadata::DiffersFrom(
- const FeedbackMetadata* other_metadata) const {
- if (other_metadata->slot_count() != slot_count()) {
- return true;
- }
-
- int slots = slot_count();
- int parameter_index = 0;
- for (int i = 0; i < slots;) {
- FeedbackVectorSlot slot(i);
- FeedbackVectorSlotKind kind = GetKind(slot);
- int entry_size = FeedbackMetadata::GetSlotSize(kind);
- if (GetKind(slot) != other_metadata->GetKind(slot)) {
- return true;
- }
- if (SlotRequiresParameter(kind)) {
- if (GetParameter(parameter_index) !=
- other_metadata->GetParameter(parameter_index)) {
- return true;
- }
- parameter_index++;
- }
- i += entry_size;
- }
- return false;
-}
-
-const char* FeedbackMetadata::Kind2String(FeedbackVectorSlotKind kind) {
+const char* FeedbackMetadata::Kind2String(FeedbackSlotKind kind) {
switch (kind) {
- case FeedbackVectorSlotKind::INVALID:
+ case FeedbackSlotKind::kInvalid:
return "INVALID";
- case FeedbackVectorSlotKind::CALL_IC:
+ case FeedbackSlotKind::kCall:
return "CALL_IC";
- case FeedbackVectorSlotKind::LOAD_IC:
+ case FeedbackSlotKind::kLoadProperty:
return "LOAD_IC";
- case FeedbackVectorSlotKind::LOAD_GLOBAL_IC:
- return "LOAD_GLOBAL_IC";
- case FeedbackVectorSlotKind::KEYED_LOAD_IC:
+ case FeedbackSlotKind::kLoadGlobalInsideTypeof:
+ return "LOAD_GLOBAL_INSIDE_TYPEOF_IC";
+ case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
+ return "LOAD_GLOBAL_NOT_INSIDE_TYPEOF_IC";
+ case FeedbackSlotKind::kLoadKeyed:
return "KEYED_LOAD_IC";
- case FeedbackVectorSlotKind::STORE_IC:
- return "STORE_IC";
- case FeedbackVectorSlotKind::KEYED_STORE_IC:
- return "KEYED_STORE_IC";
- case FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC:
+ case FeedbackSlotKind::kStoreNamedSloppy:
+ return "STORE_SLOPPY_IC";
+ case FeedbackSlotKind::kStoreNamedStrict:
+ return "STORE_STRICT_IC";
+ case FeedbackSlotKind::kStoreOwnNamed:
+ return "STORE_OWN_IC";
+ case FeedbackSlotKind::kStoreKeyedSloppy:
+ return "KEYED_STORE_SLOPPY_IC";
+ case FeedbackSlotKind::kStoreKeyedStrict:
+ return "KEYED_STORE_STRICT_IC";
+ case FeedbackSlotKind::kBinaryOp:
return "INTERPRETER_BINARYOP_IC";
- case FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC:
+ case FeedbackSlotKind::kCompareOp:
return "INTERPRETER_COMPARE_IC";
- case FeedbackVectorSlotKind::STORE_DATA_PROPERTY_IN_LITERAL_IC:
+ case FeedbackSlotKind::kToBoolean:
+ return "TO_BOOLEAN_IC";
+ case FeedbackSlotKind::kStoreDataPropertyInLiteral:
return "STORE_DATA_PROPERTY_IN_LITERAL_IC";
- case FeedbackVectorSlotKind::CREATE_CLOSURE:
- return "CREATE_CLOSURE";
- case FeedbackVectorSlotKind::GENERAL:
+ case FeedbackSlotKind::kCreateClosure:
+ return "kCreateClosure";
+ case FeedbackSlotKind::kLiteral:
+ return "LITERAL";
+ case FeedbackSlotKind::kGeneral:
return "STUB";
- case FeedbackVectorSlotKind::KINDS_NUMBER:
+ case FeedbackSlotKind::kKindsNumber:
break;
}
UNREACHABLE();
return "?";
}
-FeedbackVectorSlotKind FeedbackVector::GetKind(FeedbackVectorSlot slot) const {
+FeedbackSlotKind FeedbackVector::GetKind(FeedbackSlot slot) const {
DCHECK(!is_empty());
return metadata()->GetKind(slot);
}
-int FeedbackVector::GetParameter(FeedbackVectorSlot slot) const {
- DCHECK(!is_empty());
- DCHECK(
- FeedbackMetadata::SlotRequiresParameter(metadata()->GetKind(slot)));
- return FixedArray::cast(Get(slot))->length();
-}
-
// static
Handle<FeedbackVector> FeedbackVector::New(Isolate* isolate,
- Handle<FeedbackMetadata> metadata) {
+ Handle<SharedFunctionInfo> shared) {
Factory* factory = isolate->factory();
- const int slot_count = metadata->slot_count();
+ const int slot_count = shared->feedback_metadata()->slot_count();
const int length = slot_count + kReservedIndexCount;
- if (length == kReservedIndexCount) {
- return Handle<FeedbackVector>::cast(factory->empty_feedback_vector());
- }
Handle<FixedArray> array = factory->NewFixedArray(length, TENURED);
array->set_map_no_write_barrier(isolate->heap()->feedback_vector_map());
- array->set(kMetadataIndex, *metadata);
+ array->set(kSharedFunctionInfoIndex, *shared);
array->set(kInvocationCountIndex, Smi::kZero);
- int parameter_index = 0;
- for (int i = 0; i < slot_count;) {
- FeedbackVectorSlot slot(i);
- FeedbackVectorSlotKind kind = metadata->GetKind(slot);
- int index = FeedbackVector::GetIndex(slot);
- int entry_size = FeedbackMetadata::GetSlotSize(kind);
-
- if (kind == FeedbackVectorSlotKind::CREATE_CLOSURE) {
- // This fixed array is filled with undefined.
- int length = metadata->GetParameter(parameter_index++);
- if (length == 0) {
- // This is a native function literal. We can always point to
- // the empty literals array here.
- array->set(index, *factory->empty_literals_array(), SKIP_WRITE_BARRIER);
- } else {
- // TODO(mvstanton): Create the array.
- // Handle<FixedArray> value = factory->NewFixedArray(length);
- // array->set(index, *value);
- array->set(index, *factory->empty_literals_array(), SKIP_WRITE_BARRIER);
- }
- }
- i += entry_size;
- }
-
- DisallowHeapAllocation no_gc;
// Ensure we can skip the write barrier
Handle<Object> uninitialized_sentinel = UninitializedSentinel(isolate);
DCHECK_EQ(isolate->heap()->uninitialized_symbol(), *uninitialized_sentinel);
+ Handle<Oddball> undefined_value = factory->undefined_value();
for (int i = 0; i < slot_count;) {
- FeedbackVectorSlot slot(i);
- FeedbackVectorSlotKind kind = metadata->GetKind(slot);
+ FeedbackSlot slot(i);
+ FeedbackSlotKind kind = shared->feedback_metadata()->GetKind(slot);
int index = FeedbackVector::GetIndex(slot);
int entry_size = FeedbackMetadata::GetSlotSize(kind);
- Object* value;
- if (kind == FeedbackVectorSlotKind::LOAD_GLOBAL_IC) {
- value = isolate->heap()->empty_weak_cell();
- } else if (kind == FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC ||
- kind == FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC) {
- value = Smi::kZero;
- } else {
- value = *uninitialized_sentinel;
- }
-
- if (kind != FeedbackVectorSlotKind::CREATE_CLOSURE) {
- array->set(index, value, SKIP_WRITE_BARRIER);
- value = kind == FeedbackVectorSlotKind::CALL_IC ? Smi::kZero
- : *uninitialized_sentinel;
- for (int j = 1; j < entry_size; j++) {
- array->set(index + j, value, SKIP_WRITE_BARRIER);
+ Object* extra_value = *uninitialized_sentinel;
+ switch (kind) {
+ case FeedbackSlotKind::kLoadGlobalInsideTypeof:
+ case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
+ array->set(index, isolate->heap()->empty_weak_cell(),
+ SKIP_WRITE_BARRIER);
+ break;
+ case FeedbackSlotKind::kCompareOp:
+ case FeedbackSlotKind::kBinaryOp:
+ case FeedbackSlotKind::kToBoolean:
+ array->set(index, Smi::kZero, SKIP_WRITE_BARRIER);
+ break;
+ case FeedbackSlotKind::kCreateClosure: {
+ Handle<Cell> cell = factory->NewNoClosuresCell(undefined_value);
+ array->set(index, *cell);
+ break;
}
+ case FeedbackSlotKind::kLiteral:
+ array->set(index, *undefined_value, SKIP_WRITE_BARRIER);
+ break;
+ case FeedbackSlotKind::kCall:
+ array->set(index, *uninitialized_sentinel, SKIP_WRITE_BARRIER);
+ extra_value = Smi::kZero;
+ break;
+ case FeedbackSlotKind::kLoadProperty:
+ case FeedbackSlotKind::kLoadKeyed:
+ case FeedbackSlotKind::kStoreNamedSloppy:
+ case FeedbackSlotKind::kStoreNamedStrict:
+ case FeedbackSlotKind::kStoreOwnNamed:
+ case FeedbackSlotKind::kStoreKeyedSloppy:
+ case FeedbackSlotKind::kStoreKeyedStrict:
+ case FeedbackSlotKind::kStoreDataPropertyInLiteral:
+ case FeedbackSlotKind::kGeneral:
+ array->set(index, *uninitialized_sentinel, SKIP_WRITE_BARRIER);
+ break;
+
+ case FeedbackSlotKind::kInvalid:
+ case FeedbackSlotKind::kKindsNumber:
+ UNREACHABLE();
+ array->set(index, Smi::kZero, SKIP_WRITE_BARRIER);
+ break;
+ }
+ for (int j = 1; j < entry_size; j++) {
+ array->set(index + j, extra_value, SKIP_WRITE_BARRIER);
}
i += entry_size;
}
- return Handle<FeedbackVector>::cast(array);
-}
-
-// static
-int FeedbackVector::GetIndexFromSpec(const FeedbackVectorSpec* spec,
- FeedbackVectorSlot slot) {
- return kReservedIndexCount + slot.ToInt();
+ Handle<FeedbackVector> result = Handle<FeedbackVector>::cast(array);
+ if (isolate->IsCodeCoverageEnabled()) AddToCodeCoverageList(isolate, result);
+ return result;
}
-
// static
Handle<FeedbackVector> FeedbackVector::Copy(Isolate* isolate,
Handle<FeedbackVector> vector) {
Handle<FeedbackVector> result;
result = Handle<FeedbackVector>::cast(
isolate->factory()->CopyFixedArray(Handle<FixedArray>::cast(vector)));
+ if (isolate->IsCodeCoverageEnabled()) AddToCodeCoverageList(isolate, result);
return result;
}
-
-// This logic is copied from
-// StaticMarkingVisitor<StaticVisitor>::VisitCodeTarget.
-static bool ClearLogic(Isolate* isolate) {
- return FLAG_cleanup_code_caches_at_gc && isolate->serializer_enabled();
+// static
+void FeedbackVector::AddToCodeCoverageList(Isolate* isolate,
+ Handle<FeedbackVector> vector) {
+ DCHECK(isolate->IsCodeCoverageEnabled());
+ if (!vector->shared_function_info()->IsSubjectToDebugging()) return;
+ Handle<ArrayList> list =
+ Handle<ArrayList>::cast(isolate->factory()->code_coverage_list());
+ list = ArrayList::Add(list, vector);
+ isolate->SetCodeCoverageList(*list);
}
-
-void FeedbackVector::ClearSlotsImpl(SharedFunctionInfo* shared,
- bool force_clear) {
+void FeedbackVector::ClearSlots(JSFunction* host_function) {
Isolate* isolate = GetIsolate();
- if (!force_clear && !ClearLogic(isolate)) return;
-
- if (this == isolate->heap()->empty_feedback_vector()) return;
Object* uninitialized_sentinel =
FeedbackVector::RawUninitializedSentinel(isolate);
+ Oddball* undefined_value = isolate->heap()->undefined_value();
+ bool feedback_updated = false;
FeedbackMetadataIterator iter(metadata());
while (iter.HasNext()) {
- FeedbackVectorSlot slot = iter.Next();
- FeedbackVectorSlotKind kind = iter.kind();
+ FeedbackSlot slot = iter.Next();
+ FeedbackSlotKind kind = iter.kind();
Object* obj = Get(slot);
if (obj != uninitialized_sentinel) {
switch (kind) {
- case FeedbackVectorSlotKind::CALL_IC: {
+ case FeedbackSlotKind::kCall: {
CallICNexus nexus(this, slot);
- nexus.Clear(shared->code());
+ if (!nexus.IsCleared()) {
+ nexus.Clear();
+ feedback_updated = true;
+ }
break;
}
- case FeedbackVectorSlotKind::LOAD_IC: {
+ case FeedbackSlotKind::kLoadProperty: {
LoadICNexus nexus(this, slot);
- nexus.Clear(shared->code());
+ if (!nexus.IsCleared()) {
+ nexus.Clear();
+ feedback_updated = true;
+ }
break;
}
- case FeedbackVectorSlotKind::LOAD_GLOBAL_IC: {
+ case FeedbackSlotKind::kLoadGlobalInsideTypeof:
+ case FeedbackSlotKind::kLoadGlobalNotInsideTypeof: {
LoadGlobalICNexus nexus(this, slot);
- nexus.Clear(shared->code());
+ if (!nexus.IsCleared()) {
+ nexus.Clear();
+ feedback_updated = true;
+ }
break;
}
- case FeedbackVectorSlotKind::KEYED_LOAD_IC: {
+ case FeedbackSlotKind::kLoadKeyed: {
KeyedLoadICNexus nexus(this, slot);
- nexus.Clear(shared->code());
+ if (!nexus.IsCleared()) {
+ nexus.Clear();
+ feedback_updated = true;
+ }
break;
}
- case FeedbackVectorSlotKind::STORE_IC: {
+ case FeedbackSlotKind::kStoreNamedSloppy:
+ case FeedbackSlotKind::kStoreNamedStrict:
+ case FeedbackSlotKind::kStoreOwnNamed: {
StoreICNexus nexus(this, slot);
- nexus.Clear(shared->code());
+ if (!nexus.IsCleared()) {
+ nexus.Clear();
+ feedback_updated = true;
+ }
break;
}
- case FeedbackVectorSlotKind::KEYED_STORE_IC: {
+ case FeedbackSlotKind::kStoreKeyedSloppy:
+ case FeedbackSlotKind::kStoreKeyedStrict: {
KeyedStoreICNexus nexus(this, slot);
- nexus.Clear(shared->code());
+ if (!nexus.IsCleared()) {
+ nexus.Clear();
+ feedback_updated = true;
+ }
break;
}
- case FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC:
- case FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC: {
+ case FeedbackSlotKind::kBinaryOp:
+ case FeedbackSlotKind::kCompareOp: {
DCHECK(Get(slot)->IsSmi());
// don't clear these smi slots.
// Set(slot, Smi::kZero);
break;
}
- case FeedbackVectorSlotKind::CREATE_CLOSURE: {
- // Fill the array with undefined.
- FixedArray* array = FixedArray::cast(Get(slot));
- for (int i = 1; i < array->length(); i++) {
- array->set_undefined(i);
- }
+ case FeedbackSlotKind::kCreateClosure: {
break;
}
- case FeedbackVectorSlotKind::GENERAL: {
+ case FeedbackSlotKind::kGeneral: {
if (obj->IsHeapObject()) {
InstanceType instance_type =
HeapObject::cast(obj)->map()->instance_type();
@@ -382,31 +347,37 @@ void FeedbackVector::ClearSlotsImpl(SharedFunctionInfo* shared,
// regularly.
if (instance_type != ALLOCATION_SITE_TYPE) {
Set(slot, uninitialized_sentinel, SKIP_WRITE_BARRIER);
+ feedback_updated = true;
}
}
break;
}
- case FeedbackVectorSlotKind::STORE_DATA_PROPERTY_IN_LITERAL_IC: {
+ case FeedbackSlotKind::kLiteral: {
+ Set(slot, undefined_value, SKIP_WRITE_BARRIER);
+ feedback_updated = true;
+ break;
+ }
+ case FeedbackSlotKind::kStoreDataPropertyInLiteral: {
StoreDataPropertyInLiteralICNexus nexus(this, slot);
- nexus.Clear(shared->code());
+ if (!nexus.IsCleared()) {
+ nexus.Clear();
+ feedback_updated = true;
+ }
break;
}
- case FeedbackVectorSlotKind::INVALID:
- case FeedbackVectorSlotKind::KINDS_NUMBER:
+ case FeedbackSlotKind::kToBoolean:
+ case FeedbackSlotKind::kInvalid:
+ case FeedbackSlotKind::kKindsNumber:
UNREACHABLE();
break;
}
}
}
+ if (feedback_updated) {
+ IC::OnFeedbackChanged(isolate, host_function);
+ }
}
-
-// static
-Handle<FeedbackVector> FeedbackVector::DummyVector(Isolate* isolate) {
- return isolate->factory()->dummy_vector();
-}
-
-
Handle<FixedArray> FeedbackNexus::EnsureArrayOfSize(int length) {
Isolate* isolate = GetIsolate();
Handle<Object> feedback = handle(GetFeedback(), isolate);
@@ -459,8 +430,8 @@ void FeedbackNexus::ConfigurePremonomorphic() {
void FeedbackNexus::ConfigureMegamorphic() {
// Keyed ICs must use ConfigureMegamorphicKeyed.
- DCHECK_NE(FeedbackVectorSlotKind::KEYED_LOAD_IC, vector()->GetKind(slot()));
- DCHECK_NE(FeedbackVectorSlotKind::KEYED_STORE_IC, vector()->GetKind(slot()));
+ DCHECK(!vector()->IsKeyedLoadIC(slot()));
+ DCHECK(!vector()->IsKeyedStoreIC(slot()));
Isolate* isolate = GetIsolate();
SetFeedback(*FeedbackVector::MegamorphicSentinel(isolate),
@@ -623,8 +594,6 @@ float CallICNexus::ComputeCallFrequency() {
return static_cast<float>(call_count / invocation_count);
}
-void CallICNexus::Clear(Code* host) { CallIC::Clear(GetIsolate(), host, this); }
-
void CallICNexus::ConfigureUninitialized() {
Isolate* isolate = GetIsolate();
SetFeedback(*FeedbackVector::UninitializedSentinel(isolate),
@@ -929,16 +898,6 @@ bool FeedbackNexus::FindHandlers(List<Handle<Object>>* code_list,
return count == length;
}
-void LoadICNexus::Clear(Code* host) { LoadIC::Clear(GetIsolate(), host, this); }
-
-void LoadGlobalICNexus::Clear(Code* host) {
- LoadGlobalIC::Clear(GetIsolate(), host, this);
-}
-
-void KeyedLoadICNexus::Clear(Code* host) {
- KeyedLoadIC::Clear(GetIsolate(), host, this);
-}
-
Name* KeyedLoadICNexus::FindFirstName() const {
Object* feedback = GetFeedback();
if (IsPropertyNameFeedback(feedback)) {
@@ -955,14 +914,6 @@ Name* KeyedStoreICNexus::FindFirstName() const {
return NULL;
}
-void StoreICNexus::Clear(Code* host) {
- StoreIC::Clear(GetIsolate(), host, this);
-}
-
-void KeyedStoreICNexus::Clear(Code* host) {
- KeyedStoreIC::Clear(GetIsolate(), host, this);
-}
-
KeyedAccessStoreMode KeyedStoreICNexus::GetKeyedAccessStoreMode() const {
KeyedAccessStoreMode mode = STANDARD_STORE;
MapHandleList maps;
@@ -986,7 +937,7 @@ KeyedAccessStoreMode KeyedStoreICNexus::GetKeyedAccessStoreMode() const {
uint32_t minor_key = CodeStub::MinorKeyFromKey(handler->stub_key());
CHECK(major_key == CodeStub::KeyedStoreSloppyArguments ||
major_key == CodeStub::StoreFastElement ||
- major_key == CodeStub::StoreElement ||
+ major_key == CodeStub::StoreSlowElement ||
major_key == CodeStub::ElementsTransitionAndStore ||
major_key == CodeStub::NoCache);
if (major_key != CodeStub::NoCache) {
diff --git a/deps/v8/src/feedback-vector.h b/deps/v8/src/feedback-vector.h
index e7b51b1efd..9ac146d511 100644
--- a/deps/v8/src/feedback-vector.h
+++ b/deps/v8/src/feedback-vector.h
@@ -16,80 +16,138 @@
namespace v8 {
namespace internal {
-enum class FeedbackVectorSlotKind {
+enum class FeedbackSlotKind {
// This kind means that the slot points to the middle of other slot
// which occupies more than one feedback vector element.
// There must be no such slots in the system.
- INVALID,
-
- CALL_IC,
- LOAD_IC,
- LOAD_GLOBAL_IC,
- KEYED_LOAD_IC,
- STORE_IC,
- KEYED_STORE_IC,
- INTERPRETER_BINARYOP_IC,
- INTERPRETER_COMPARE_IC,
- STORE_DATA_PROPERTY_IN_LITERAL_IC,
-
- // This kind of slot has an integer parameter associated with it.
- CREATE_CLOSURE,
+ kInvalid,
+
+ kCall,
+ kLoadProperty,
+ kLoadGlobalNotInsideTypeof,
+ kLoadGlobalInsideTypeof,
+ kLoadKeyed,
+ kStoreNamedSloppy,
+ kStoreNamedStrict,
+ kStoreOwnNamed,
+ kStoreKeyedSloppy,
+ kStoreKeyedStrict,
+ kBinaryOp,
+ kCompareOp,
+ kToBoolean,
+ kStoreDataPropertyInLiteral,
+ kCreateClosure,
+ kLiteral,
// This is a general purpose slot that occupies one feedback vector element.
- GENERAL,
+ kGeneral,
- KINDS_NUMBER // Last value indicating number of kinds.
+ kKindsNumber // Last value indicating number of kinds.
};
-std::ostream& operator<<(std::ostream& os, FeedbackVectorSlotKind kind);
+inline bool IsCallICKind(FeedbackSlotKind kind) {
+ return kind == FeedbackSlotKind::kCall;
+}
+
+inline bool IsLoadICKind(FeedbackSlotKind kind) {
+ return kind == FeedbackSlotKind::kLoadProperty;
+}
+
+inline bool IsLoadGlobalICKind(FeedbackSlotKind kind) {
+ return kind == FeedbackSlotKind::kLoadGlobalNotInsideTypeof ||
+ kind == FeedbackSlotKind::kLoadGlobalInsideTypeof;
+}
+
+inline bool IsKeyedLoadICKind(FeedbackSlotKind kind) {
+ return kind == FeedbackSlotKind::kLoadKeyed;
+}
+
+inline bool IsStoreICKind(FeedbackSlotKind kind) {
+ return kind == FeedbackSlotKind::kStoreNamedSloppy ||
+ kind == FeedbackSlotKind::kStoreNamedStrict;
+}
+
+inline bool IsStoreOwnICKind(FeedbackSlotKind kind) {
+ return kind == FeedbackSlotKind::kStoreOwnNamed;
+}
+
+inline bool IsKeyedStoreICKind(FeedbackSlotKind kind) {
+ return kind == FeedbackSlotKind::kStoreKeyedSloppy ||
+ kind == FeedbackSlotKind::kStoreKeyedStrict;
+}
+
+inline TypeofMode GetTypeofModeFromSlotKind(FeedbackSlotKind kind) {
+ DCHECK(IsLoadGlobalICKind(kind));
+ return (kind == FeedbackSlotKind::kLoadGlobalInsideTypeof)
+ ? INSIDE_TYPEOF
+ : NOT_INSIDE_TYPEOF;
+}
+
+inline LanguageMode GetLanguageModeFromSlotKind(FeedbackSlotKind kind) {
+ DCHECK(IsStoreICKind(kind) || IsStoreOwnICKind(kind) ||
+ IsKeyedStoreICKind(kind));
+ return (kind == FeedbackSlotKind::kStoreNamedSloppy ||
+ kind == FeedbackSlotKind::kStoreKeyedSloppy)
+ ? SLOPPY
+ : STRICT;
+}
+
+std::ostream& operator<<(std::ostream& os, FeedbackSlotKind kind);
template <typename Derived>
class FeedbackVectorSpecBase {
public:
- inline FeedbackVectorSlot AddSlot(FeedbackVectorSlotKind kind);
+ FeedbackSlot AddCallICSlot() { return AddSlot(FeedbackSlotKind::kCall); }
- FeedbackVectorSlot AddCallICSlot() {
- return AddSlot(FeedbackVectorSlotKind::CALL_IC);
+ FeedbackSlot AddLoadICSlot() {
+ return AddSlot(FeedbackSlotKind::kLoadProperty);
}
- FeedbackVectorSlot AddLoadICSlot() {
- return AddSlot(FeedbackVectorSlotKind::LOAD_IC);
+ FeedbackSlot AddLoadGlobalICSlot(TypeofMode typeof_mode) {
+ return AddSlot(typeof_mode == INSIDE_TYPEOF
+ ? FeedbackSlotKind::kLoadGlobalInsideTypeof
+ : FeedbackSlotKind::kLoadGlobalNotInsideTypeof);
}
- FeedbackVectorSlot AddLoadGlobalICSlot() {
- return AddSlot(FeedbackVectorSlotKind::LOAD_GLOBAL_IC);
+ FeedbackSlot AddCreateClosureSlot() {
+ return AddSlot(FeedbackSlotKind::kCreateClosure);
}
- FeedbackVectorSlot AddCreateClosureSlot(int size) {
- This()->append_parameter(size);
- return AddSlot(FeedbackVectorSlotKind::CREATE_CLOSURE);
+ FeedbackSlot AddKeyedLoadICSlot() {
+ return AddSlot(FeedbackSlotKind::kLoadKeyed);
}
- FeedbackVectorSlot AddKeyedLoadICSlot() {
- return AddSlot(FeedbackVectorSlotKind::KEYED_LOAD_IC);
+ FeedbackSlot AddStoreICSlot(LanguageMode language_mode) {
+ STATIC_ASSERT(LANGUAGE_END == 2);
+ return AddSlot(is_strict(language_mode)
+ ? FeedbackSlotKind::kStoreNamedStrict
+ : FeedbackSlotKind::kStoreNamedSloppy);
}
- FeedbackVectorSlot AddStoreICSlot() {
- return AddSlot(FeedbackVectorSlotKind::STORE_IC);
+ FeedbackSlot AddStoreOwnICSlot() {
+ return AddSlot(FeedbackSlotKind::kStoreOwnNamed);
}
- FeedbackVectorSlot AddKeyedStoreICSlot() {
- return AddSlot(FeedbackVectorSlotKind::KEYED_STORE_IC);
+ FeedbackSlot AddKeyedStoreICSlot(LanguageMode language_mode) {
+ STATIC_ASSERT(LANGUAGE_END == 2);
+ return AddSlot(is_strict(language_mode)
+ ? FeedbackSlotKind::kStoreKeyedStrict
+ : FeedbackSlotKind::kStoreKeyedSloppy);
}
- FeedbackVectorSlot AddInterpreterBinaryOpICSlot() {
- return AddSlot(FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC);
+ FeedbackSlot AddInterpreterBinaryOpICSlot() {
+ return AddSlot(FeedbackSlotKind::kBinaryOp);
}
- FeedbackVectorSlot AddInterpreterCompareICSlot() {
- return AddSlot(FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC);
+ FeedbackSlot AddInterpreterCompareICSlot() {
+ return AddSlot(FeedbackSlotKind::kCompareOp);
}
- FeedbackVectorSlot AddGeneralSlot() {
- return AddSlot(FeedbackVectorSlotKind::GENERAL);
- }
+ FeedbackSlot AddGeneralSlot() { return AddSlot(FeedbackSlotKind::kGeneral); }
+
+ FeedbackSlot AddLiteralSlot() { return AddSlot(FeedbackSlotKind::kLiteral); }
- FeedbackVectorSlot AddStoreDataPropertyInLiteralICSlot() {
- return AddSlot(FeedbackVectorSlotKind::STORE_DATA_PROPERTY_IN_LITERAL_IC);
+ FeedbackSlot AddStoreDataPropertyInLiteralICSlot() {
+ return AddSlot(FeedbackSlotKind::kStoreDataPropertyInLiteral);
}
#ifdef OBJECT_PRINT
@@ -100,78 +158,57 @@ class FeedbackVectorSpecBase {
DECLARE_PRINTER(FeedbackVectorSpec)
private:
+ inline FeedbackSlot AddSlot(FeedbackSlotKind kind);
+
Derived* This() { return static_cast<Derived*>(this); }
};
class StaticFeedbackVectorSpec
: public FeedbackVectorSpecBase<StaticFeedbackVectorSpec> {
public:
- StaticFeedbackVectorSpec() : slot_count_(0), parameters_count_(0) {}
+ StaticFeedbackVectorSpec() : slot_count_(0) {}
int slots() const { return slot_count_; }
- FeedbackVectorSlotKind GetKind(int slot) const {
- DCHECK(slot >= 0 && slot < slot_count_);
- return kinds_[slot];
- }
-
- int parameters_count() const { return parameters_count_; }
-
- int GetParameter(int index) const {
- DCHECK(index >= 0 && index < parameters_count_);
- return parameters_[index];
+ FeedbackSlotKind GetKind(FeedbackSlot slot) const {
+ DCHECK(slot.ToInt() >= 0 && slot.ToInt() < slot_count_);
+ return kinds_[slot.ToInt()];
}
private:
friend class FeedbackVectorSpecBase<StaticFeedbackVectorSpec>;
- void append(FeedbackVectorSlotKind kind) {
+ void append(FeedbackSlotKind kind) {
DCHECK(slot_count_ < kMaxLength);
kinds_[slot_count_++] = kind;
}
- void append_parameter(int parameter) {
- DCHECK(parameters_count_ < kMaxLength);
- parameters_[parameters_count_++] = parameter;
- }
-
static const int kMaxLength = 12;
int slot_count_;
- FeedbackVectorSlotKind kinds_[kMaxLength];
- int parameters_count_;
- int parameters_[kMaxLength];
+ FeedbackSlotKind kinds_[kMaxLength];
};
class FeedbackVectorSpec : public FeedbackVectorSpecBase<FeedbackVectorSpec> {
public:
- explicit FeedbackVectorSpec(Zone* zone)
- : slot_kinds_(zone), parameters_(zone) {
+ explicit FeedbackVectorSpec(Zone* zone) : slot_kinds_(zone) {
slot_kinds_.reserve(16);
- parameters_.reserve(8);
}
int slots() const { return static_cast<int>(slot_kinds_.size()); }
- FeedbackVectorSlotKind GetKind(int slot) const {
- return static_cast<FeedbackVectorSlotKind>(slot_kinds_.at(slot));
+ FeedbackSlotKind GetKind(FeedbackSlot slot) const {
+ return static_cast<FeedbackSlotKind>(slot_kinds_.at(slot.ToInt()));
}
- int parameters_count() const { return static_cast<int>(parameters_.size()); }
-
- int GetParameter(int index) const { return parameters_.at(index); }
-
private:
friend class FeedbackVectorSpecBase<FeedbackVectorSpec>;
- void append(FeedbackVectorSlotKind kind) {
+ void append(FeedbackSlotKind kind) {
slot_kinds_.push_back(static_cast<unsigned char>(kind));
}
- void append_parameter(int parameter) { parameters_.push_back(parameter); }
-
ZoneVector<unsigned char> slot_kinds_;
- ZoneVector<int> parameters_;
};
// The shape of the FeedbackMetadata is an array with:
@@ -186,29 +223,20 @@ class FeedbackMetadata : public FixedArray {
static inline FeedbackMetadata* cast(Object* obj);
static const int kSlotsCountIndex = 0;
- static const int kParametersTableIndex = 1;
- static const int kReservedIndexCount = 2;
+ static const int kReservedIndexCount = 1;
// Returns number of feedback vector elements used by given slot kind.
- static inline int GetSlotSize(FeedbackVectorSlotKind kind);
-
- // Defines if slots of given kind require "parameter".
- static inline bool SlotRequiresParameter(FeedbackVectorSlotKind kind);
+ static inline int GetSlotSize(FeedbackSlotKind kind);
bool SpecDiffersFrom(const FeedbackVectorSpec* other_spec) const;
- bool DiffersFrom(const FeedbackMetadata* other_metadata) const;
-
inline bool is_empty() const;
// Returns number of slots in the vector.
inline int slot_count() const;
// Returns slot kind for given slot.
- FeedbackVectorSlotKind GetKind(FeedbackVectorSlot slot) const;
-
- // Returns parameter for given index (note: this is not the slot)
- int GetParameter(int parameter_index) const;
+ FeedbackSlotKind GetKind(FeedbackSlot slot) const;
template <typename Spec>
static Handle<FeedbackMetadata> New(Isolate* isolate, const Spec* spec);
@@ -220,17 +248,17 @@ class FeedbackMetadata : public FixedArray {
DECLARE_PRINTER(FeedbackMetadata)
- static const char* Kind2String(FeedbackVectorSlotKind kind);
+ static const char* Kind2String(FeedbackSlotKind kind);
private:
- static const int kFeedbackVectorSlotKindBits = 5;
- STATIC_ASSERT(static_cast<int>(FeedbackVectorSlotKind::KINDS_NUMBER) <
- (1 << kFeedbackVectorSlotKindBits));
+ static const int kFeedbackSlotKindBits = 5;
+ STATIC_ASSERT(static_cast<int>(FeedbackSlotKind::kKindsNumber) <
+ (1 << kFeedbackSlotKindBits));
- void SetKind(FeedbackVectorSlot slot, FeedbackVectorSlotKind kind);
+ void SetKind(FeedbackSlot slot, FeedbackSlotKind kind);
- typedef BitSetComputer<FeedbackVectorSlotKind, kFeedbackVectorSlotKindBits,
- kSmiValueSize, uint32_t>
+ typedef BitSetComputer<FeedbackSlotKind, kFeedbackSlotKindBits, kSmiValueSize,
+ uint32_t>
VectorICComputer;
DISALLOW_IMPLICIT_CONSTRUCTORS(FeedbackMetadata);
@@ -248,7 +276,7 @@ class FeedbackVector : public FixedArray {
// Casting.
static inline FeedbackVector* cast(Object* obj);
- static const int kMetadataIndex = 0;
+ static const int kSharedFunctionInfoIndex = 0;
static const int kInvocationCountIndex = 1;
static const int kReservedIndexCount = 2;
@@ -261,32 +289,52 @@ class FeedbackVector : public FixedArray {
inline int slot_count() const;
inline FeedbackMetadata* metadata() const;
+ inline SharedFunctionInfo* shared_function_info() const;
inline int invocation_count() const;
+ inline void clear_invocation_count();
// Conversion from a slot to an integer index to the underlying array.
- static int GetIndex(FeedbackVectorSlot slot) {
+ static int GetIndex(FeedbackSlot slot) {
return kReservedIndexCount + slot.ToInt();
}
- static int GetIndexFromSpec(const FeedbackVectorSpec* spec,
- FeedbackVectorSlot slot);
// Conversion from an integer index to the underlying array to a slot.
- static inline FeedbackVectorSlot ToSlot(int index);
- inline Object* Get(FeedbackVectorSlot slot) const;
- inline void Set(FeedbackVectorSlot slot, Object* value,
+ static inline FeedbackSlot ToSlot(int index);
+ inline Object* Get(FeedbackSlot slot) const;
+ inline void Set(FeedbackSlot slot, Object* value,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// Returns slot kind for given slot.
- FeedbackVectorSlotKind GetKind(FeedbackVectorSlot slot) const;
- // Returns parameter corresponding to given slot or -1.
- int GetParameter(FeedbackVectorSlot slot) const;
+ FeedbackSlotKind GetKind(FeedbackSlot slot) const;
static Handle<FeedbackVector> New(Isolate* isolate,
- Handle<FeedbackMetadata> metadata);
+ Handle<SharedFunctionInfo> shared);
static Handle<FeedbackVector> Copy(Isolate* isolate,
Handle<FeedbackVector> vector);
+#define DEFINE_SLOT_KIND_PREDICATE(Name) \
+ bool Name(FeedbackSlot slot) const { return Name##Kind(GetKind(slot)); }
+
+ DEFINE_SLOT_KIND_PREDICATE(IsCallIC)
+ DEFINE_SLOT_KIND_PREDICATE(IsLoadIC)
+ DEFINE_SLOT_KIND_PREDICATE(IsLoadGlobalIC)
+ DEFINE_SLOT_KIND_PREDICATE(IsKeyedLoadIC)
+ DEFINE_SLOT_KIND_PREDICATE(IsStoreIC)
+ DEFINE_SLOT_KIND_PREDICATE(IsStoreOwnIC)
+ DEFINE_SLOT_KIND_PREDICATE(IsKeyedStoreIC)
+#undef DEFINE_SLOT_KIND_PREDICATE
+
+ // Returns typeof mode encoded into kind of given slot.
+ inline TypeofMode GetTypeofMode(FeedbackSlot slot) const {
+ return GetTypeofModeFromSlotKind(GetKind(slot));
+ }
+
+ // Returns language mode encoded into kind of given slot.
+ inline LanguageMode GetLanguageMode(FeedbackSlot slot) const {
+ return GetLanguageModeFromSlotKind(GetKind(slot));
+ }
+
#ifdef OBJECT_PRINT
// For gdb debugging.
void Print();
@@ -295,11 +343,7 @@ class FeedbackVector : public FixedArray {
DECLARE_PRINTER(FeedbackVector)
// Clears the vector slots.
- void ClearSlots(SharedFunctionInfo* shared) { ClearSlotsImpl(shared, true); }
-
- void ClearSlotsAtGCTime(SharedFunctionInfo* shared) {
- ClearSlotsImpl(shared, false);
- }
+ void ClearSlots(JSFunction* host_function);
// The object that indicates an uninitialized cache.
static inline Handle<Symbol> UninitializedSentinel(Isolate* isolate);
@@ -314,15 +358,9 @@ class FeedbackVector : public FixedArray {
// garbage collection (e.g., for patching the cache).
static inline Symbol* RawUninitializedSentinel(Isolate* isolate);
- static const int kDummyLoadICSlot = 0;
- static const int kDummyKeyedLoadICSlot = 2;
- static const int kDummyStoreICSlot = 4;
- static const int kDummyKeyedStoreICSlot = 6;
-
- static Handle<FeedbackVector> DummyVector(Isolate* isolate);
-
private:
- void ClearSlotsImpl(SharedFunctionInfo* shared, bool force_clear);
+ static void AddToCodeCoverageList(Isolate* isolate,
+ Handle<FeedbackVector> vector);
DISALLOW_IMPLICIT_CONSTRUCTORS(FeedbackVector);
};
@@ -345,22 +383,22 @@ class FeedbackMetadataIterator {
public:
explicit FeedbackMetadataIterator(Handle<FeedbackMetadata> metadata)
: metadata_handle_(metadata),
- next_slot_(FeedbackVectorSlot(0)),
- slot_kind_(FeedbackVectorSlotKind::INVALID) {}
+ next_slot_(FeedbackSlot(0)),
+ slot_kind_(FeedbackSlotKind::kInvalid) {}
explicit FeedbackMetadataIterator(FeedbackMetadata* metadata)
: metadata_(metadata),
- next_slot_(FeedbackVectorSlot(0)),
- slot_kind_(FeedbackVectorSlotKind::INVALID) {}
+ next_slot_(FeedbackSlot(0)),
+ slot_kind_(FeedbackSlotKind::kInvalid) {}
inline bool HasNext() const;
- inline FeedbackVectorSlot Next();
+ inline FeedbackSlot Next();
// Returns slot kind of the last slot returned by Next().
- FeedbackVectorSlotKind kind() const {
- DCHECK_NE(FeedbackVectorSlotKind::INVALID, slot_kind_);
- DCHECK_NE(FeedbackVectorSlotKind::KINDS_NUMBER, slot_kind_);
+ FeedbackSlotKind kind() const {
+ DCHECK_NE(FeedbackSlotKind::kInvalid, slot_kind_);
+ DCHECK_NE(FeedbackSlotKind::kKindsNumber, slot_kind_);
return slot_kind_;
}
@@ -377,18 +415,18 @@ class FeedbackMetadataIterator {
// pointer use cases.
Handle<FeedbackMetadata> metadata_handle_;
FeedbackMetadata* metadata_;
- FeedbackVectorSlot cur_slot_;
- FeedbackVectorSlot next_slot_;
- FeedbackVectorSlotKind slot_kind_;
+ FeedbackSlot cur_slot_;
+ FeedbackSlot next_slot_;
+ FeedbackSlotKind slot_kind_;
};
// A FeedbackNexus is the combination of a FeedbackVector and a slot.
// Derived classes customize the update and retrieval of feedback.
class FeedbackNexus {
public:
- FeedbackNexus(Handle<FeedbackVector> vector, FeedbackVectorSlot slot)
+ FeedbackNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
: vector_handle_(vector), vector_(NULL), slot_(slot) {}
- FeedbackNexus(FeedbackVector* vector, FeedbackVectorSlot slot)
+ FeedbackNexus(FeedbackVector* vector, FeedbackSlot slot)
: vector_(vector), slot_(slot) {}
virtual ~FeedbackNexus() {}
@@ -399,7 +437,8 @@ class FeedbackNexus {
FeedbackVector* vector() const {
return vector_handle_.is_null() ? vector_ : *vector_handle_;
}
- FeedbackVectorSlot slot() const { return slot_; }
+ FeedbackSlot slot() const { return slot_; }
+ FeedbackSlotKind kind() const { return vector()->GetKind(slot()); }
InlineCacheState ic_state() const { return StateFromFeedback(); }
bool IsUninitialized() const { return StateFromFeedback() == UNINITIALIZED; }
@@ -420,6 +459,12 @@ class FeedbackNexus {
int length = -1) const;
virtual Name* FindFirstName() const { return NULL; }
+ bool IsCleared() {
+ InlineCacheState state = StateFromFeedback();
+ return !FLAG_use_ic || state == UNINITIALIZED || state == PREMONOMORPHIC;
+ }
+
+ virtual void Clear() { ConfigureUninitialized(); }
virtual void ConfigureUninitialized();
virtual void ConfigurePremonomorphic();
virtual void ConfigureMegamorphic();
@@ -447,22 +492,20 @@ class FeedbackNexus {
// be done, like allocation.
Handle<FeedbackVector> vector_handle_;
FeedbackVector* vector_;
- FeedbackVectorSlot slot_;
+ FeedbackSlot slot_;
};
class CallICNexus final : public FeedbackNexus {
public:
- CallICNexus(Handle<FeedbackVector> vector, FeedbackVectorSlot slot)
+ CallICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
: FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackVectorSlotKind::CALL_IC, vector->GetKind(slot));
+ DCHECK(vector->IsCallIC(slot));
}
- CallICNexus(FeedbackVector* vector, FeedbackVectorSlot slot)
+ CallICNexus(FeedbackVector* vector, FeedbackSlot slot)
: FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackVectorSlotKind::CALL_IC, vector->GetKind(slot));
+ DCHECK(vector->IsCallIC(slot));
}
- void Clear(Code* host);
-
void ConfigureUninitialized() override;
void ConfigureMonomorphicArray();
void ConfigureMonomorphic(Handle<JSFunction> function);
@@ -492,20 +535,16 @@ class CallICNexus final : public FeedbackNexus {
class LoadICNexus : public FeedbackNexus {
public:
- LoadICNexus(Handle<FeedbackVector> vector, FeedbackVectorSlot slot)
+ LoadICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
: FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackVectorSlotKind::LOAD_IC, vector->GetKind(slot));
+ DCHECK(vector->IsLoadIC(slot));
}
- explicit LoadICNexus(Isolate* isolate)
- : FeedbackNexus(
- FeedbackVector::DummyVector(isolate),
- FeedbackVectorSlot(FeedbackVector::kDummyLoadICSlot)) {}
- LoadICNexus(FeedbackVector* vector, FeedbackVectorSlot slot)
+ LoadICNexus(FeedbackVector* vector, FeedbackSlot slot)
: FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackVectorSlotKind::LOAD_IC, vector->GetKind(slot));
+ DCHECK(vector->IsLoadIC(slot));
}
- void Clear(Code* host);
+ void Clear() override { ConfigurePremonomorphic(); }
void ConfigureMonomorphic(Handle<Map> receiver_map, Handle<Object> handler);
@@ -517,13 +556,13 @@ class LoadICNexus : public FeedbackNexus {
class LoadGlobalICNexus : public FeedbackNexus {
public:
- LoadGlobalICNexus(Handle<FeedbackVector> vector, FeedbackVectorSlot slot)
+ LoadGlobalICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
: FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackVectorSlotKind::LOAD_GLOBAL_IC, vector->GetKind(slot));
+ DCHECK(vector->IsLoadGlobalIC(slot));
}
- LoadGlobalICNexus(FeedbackVector* vector, FeedbackVectorSlot slot)
+ LoadGlobalICNexus(FeedbackVector* vector, FeedbackSlot slot)
: FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackVectorSlotKind::LOAD_GLOBAL_IC, vector->GetKind(slot));
+ DCHECK(vector->IsLoadGlobalIC(slot));
}
int ExtractMaps(MapHandleList* maps) const final {
@@ -539,7 +578,6 @@ class LoadGlobalICNexus : public FeedbackNexus {
}
void ConfigureMegamorphic() override { UNREACHABLE(); }
- void Clear(Code* host);
void ConfigureUninitialized() override;
void ConfigurePropertyCellMode(Handle<PropertyCell> cell);
@@ -550,20 +588,16 @@ class LoadGlobalICNexus : public FeedbackNexus {
class KeyedLoadICNexus : public FeedbackNexus {
public:
- KeyedLoadICNexus(Handle<FeedbackVector> vector, FeedbackVectorSlot slot)
+ KeyedLoadICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
: FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackVectorSlotKind::KEYED_LOAD_IC, vector->GetKind(slot));
+ DCHECK(vector->IsKeyedLoadIC(slot));
}
- explicit KeyedLoadICNexus(Isolate* isolate)
- : FeedbackNexus(
- FeedbackVector::DummyVector(isolate),
- FeedbackVectorSlot(FeedbackVector::kDummyKeyedLoadICSlot)) {}
- KeyedLoadICNexus(FeedbackVector* vector, FeedbackVectorSlot slot)
+ KeyedLoadICNexus(FeedbackVector* vector, FeedbackSlot slot)
: FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackVectorSlotKind::KEYED_LOAD_IC, vector->GetKind(slot));
+ DCHECK(vector->IsKeyedLoadIC(slot));
}
- void Clear(Code* host);
+ void Clear() override { ConfigurePremonomorphic(); }
// name can be a null handle for element loads.
void ConfigureMonomorphic(Handle<Name> name, Handle<Map> receiver_map,
@@ -581,20 +615,16 @@ class KeyedLoadICNexus : public FeedbackNexus {
class StoreICNexus : public FeedbackNexus {
public:
- StoreICNexus(Handle<FeedbackVector> vector, FeedbackVectorSlot slot)
+ StoreICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
: FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackVectorSlotKind::STORE_IC, vector->GetKind(slot));
+ DCHECK(vector->IsStoreIC(slot) || vector->IsStoreOwnIC(slot));
}
- explicit StoreICNexus(Isolate* isolate)
- : FeedbackNexus(
- FeedbackVector::DummyVector(isolate),
- FeedbackVectorSlot(FeedbackVector::kDummyStoreICSlot)) {}
- StoreICNexus(FeedbackVector* vector, FeedbackVectorSlot slot)
+ StoreICNexus(FeedbackVector* vector, FeedbackSlot slot)
: FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackVectorSlotKind::STORE_IC, vector->GetKind(slot));
+ DCHECK(vector->IsStoreIC(slot) || vector->IsStoreOwnIC(slot));
}
- void Clear(Code* host);
+ void Clear() override { ConfigurePremonomorphic(); }
void ConfigureMonomorphic(Handle<Map> receiver_map, Handle<Object> handler);
@@ -604,22 +634,22 @@ class StoreICNexus : public FeedbackNexus {
InlineCacheState StateFromFeedback() const override;
};
+// TODO(ishell): Currently we use StoreOwnIC only for storing properties that
+// already exist in the boilerplate therefore we can use StoreIC.
+typedef StoreICNexus StoreOwnICNexus;
+
class KeyedStoreICNexus : public FeedbackNexus {
public:
- KeyedStoreICNexus(Handle<FeedbackVector> vector, FeedbackVectorSlot slot)
+ KeyedStoreICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
: FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC, vector->GetKind(slot));
+ DCHECK(vector->IsKeyedStoreIC(slot));
}
- explicit KeyedStoreICNexus(Isolate* isolate)
- : FeedbackNexus(
- FeedbackVector::DummyVector(isolate),
- FeedbackVectorSlot(FeedbackVector::kDummyKeyedStoreICSlot)) {}
- KeyedStoreICNexus(FeedbackVector* vector, FeedbackVectorSlot slot)
+ KeyedStoreICNexus(FeedbackVector* vector, FeedbackSlot slot)
: FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC, vector->GetKind(slot));
+ DCHECK(vector->IsKeyedStoreIC(slot));
}
- void Clear(Code* host);
+ void Clear() override { ConfigurePremonomorphic(); }
// name can be a null handle for element loads.
void ConfigureMonomorphic(Handle<Name> name, Handle<Map> receiver_map,
@@ -641,19 +671,15 @@ class KeyedStoreICNexus : public FeedbackNexus {
class BinaryOpICNexus final : public FeedbackNexus {
public:
- BinaryOpICNexus(Handle<FeedbackVector> vector, FeedbackVectorSlot slot)
+ BinaryOpICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
: FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC,
- vector->GetKind(slot));
+ DCHECK_EQ(FeedbackSlotKind::kBinaryOp, vector->GetKind(slot));
}
- BinaryOpICNexus(FeedbackVector* vector, FeedbackVectorSlot slot)
+ BinaryOpICNexus(FeedbackVector* vector, FeedbackSlot slot)
: FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC,
- vector->GetKind(slot));
+ DCHECK_EQ(FeedbackSlotKind::kBinaryOp, vector->GetKind(slot));
}
- void Clear(Code* host);
-
InlineCacheState StateFromFeedback() const final;
BinaryOperationHint GetBinaryOperationFeedback() const;
@@ -672,19 +698,15 @@ class BinaryOpICNexus final : public FeedbackNexus {
class CompareICNexus final : public FeedbackNexus {
public:
- CompareICNexus(Handle<FeedbackVector> vector, FeedbackVectorSlot slot)
+ CompareICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
: FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC,
- vector->GetKind(slot));
+ DCHECK_EQ(FeedbackSlotKind::kCompareOp, vector->GetKind(slot));
}
- CompareICNexus(FeedbackVector* vector, FeedbackVectorSlot slot)
+ CompareICNexus(FeedbackVector* vector, FeedbackSlot slot)
: FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC,
- vector->GetKind(slot));
+ DCHECK_EQ(FeedbackSlotKind::kCompareOp, vector->GetKind(slot));
}
- void Clear(Code* host);
-
InlineCacheState StateFromFeedback() const final;
CompareOperationHint GetCompareOperationFeedback() const;
@@ -704,20 +726,17 @@ class CompareICNexus final : public FeedbackNexus {
class StoreDataPropertyInLiteralICNexus : public FeedbackNexus {
public:
StoreDataPropertyInLiteralICNexus(Handle<FeedbackVector> vector,
- FeedbackVectorSlot slot)
+ FeedbackSlot slot)
: FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackVectorSlotKind::STORE_DATA_PROPERTY_IN_LITERAL_IC,
+ DCHECK_EQ(FeedbackSlotKind::kStoreDataPropertyInLiteral,
vector->GetKind(slot));
}
- StoreDataPropertyInLiteralICNexus(FeedbackVector* vector,
- FeedbackVectorSlot slot)
+ StoreDataPropertyInLiteralICNexus(FeedbackVector* vector, FeedbackSlot slot)
: FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackVectorSlotKind::STORE_DATA_PROPERTY_IN_LITERAL_IC,
+ DCHECK_EQ(FeedbackSlotKind::kStoreDataPropertyInLiteral,
vector->GetKind(slot));
}
- void Clear(Code* host) { ConfigureUninitialized(); }
-
void ConfigureMonomorphic(Handle<Name> name, Handle<Map> receiver_map);
InlineCacheState StateFromFeedback() const override;
diff --git a/deps/v8/src/ffi/OWNERS b/deps/v8/src/ffi/OWNERS
new file mode 100644
index 0000000000..dc9a9780a6
--- /dev/null
+++ b/deps/v8/src/ffi/OWNERS
@@ -0,0 +1,2 @@
+mattloring@google.com
+ofrobots@google.com
diff --git a/deps/v8/src/ffi/ffi-compiler.cc b/deps/v8/src/ffi/ffi-compiler.cc
new file mode 100644
index 0000000000..d7fdbb957d
--- /dev/null
+++ b/deps/v8/src/ffi/ffi-compiler.cc
@@ -0,0 +1,128 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/ffi/ffi-compiler.h"
+#include "src/api.h"
+#include "src/code-factory.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+void InstallFFIMap(Isolate* isolate) {
+ Handle<Context> context(isolate->context());
+ DCHECK(!context->get(Context::NATIVE_FUNCTION_MAP_INDEX)->IsMap());
+ Handle<Map> prev_map = Handle<Map>(context->sloppy_function_map(), isolate);
+
+ InstanceType instance_type = prev_map->instance_type();
+ int internal_fields = JSObject::GetInternalFieldCount(*prev_map);
+ CHECK_EQ(0, internal_fields);
+ int pre_allocated =
+ prev_map->GetInObjectProperties() - prev_map->unused_property_fields();
+ int instance_size;
+ int in_object_properties;
+ JSFunction::CalculateInstanceSizeHelper(
+ instance_type, internal_fields, 0, &instance_size, &in_object_properties);
+ int unused_property_fields = in_object_properties - pre_allocated;
+ Handle<Map> map = Map::CopyInitialMap(
+ prev_map, instance_size, in_object_properties, unused_property_fields);
+ context->set_native_function_map(*map);
+}
+
+namespace ffi {
+
+class FFIAssembler : public CodeStubAssembler {
+ public:
+ explicit FFIAssembler(CodeAssemblerState* state) : CodeStubAssembler(state) {}
+
+ Node* ToJS(Node* node, Node* context, FFIType type) {
+ switch (type) {
+ case FFIType::kInt32:
+ return ChangeInt32ToTagged(node);
+ }
+ UNREACHABLE();
+ return nullptr;
+ }
+
+ Node* FromJS(Node* node, Node* context, FFIType type) {
+ switch (type) {
+ case FFIType::kInt32:
+ return TruncateTaggedToWord32(context, node);
+ }
+ UNREACHABLE();
+ return nullptr;
+ }
+
+ MachineType FFIToMachineType(FFIType type) {
+ switch (type) {
+ case FFIType::kInt32:
+ return MachineType::Int32();
+ }
+ UNREACHABLE();
+ return MachineType::None();
+ }
+
+ Signature<MachineType>* FFIToMachineSignature(FFISignature* sig) {
+ Signature<MachineType>::Builder sig_builder(zone(), sig->return_count(),
+ sig->parameter_count());
+ for (size_t i = 0; i < sig->return_count(); i++) {
+ sig_builder.AddReturn(FFIToMachineType(sig->GetReturn(i)));
+ }
+ for (size_t j = 0; j < sig->parameter_count(); j++) {
+ sig_builder.AddParam(FFIToMachineType(sig->GetParam(j)));
+ }
+ return sig_builder.Build();
+ }
+
+ void GenerateJSToNativeWrapper(NativeFunction* func) {
+ int params = static_cast<int>(func->sig->parameter_count());
+ int returns = static_cast<int>(func->sig->return_count());
+ ApiFunction api_func(func->start);
+ ExternalReference ref(&api_func, ExternalReference::BUILTIN_CALL,
+ isolate());
+
+ Node* context_param = GetJSContextParameter();
+
+ Node** inputs = zone()->NewArray<Node*>(params + 1);
+ int input_count = 0;
+ inputs[input_count++] = ExternalConstant(ref);
+ for (int i = 0; i < params; i++) {
+ inputs[input_count++] =
+ FromJS(Parameter(i), context_param, func->sig->GetParam(i));
+ }
+
+ Node* call =
+ CallCFunctionN(FFIToMachineSignature(func->sig), input_count, inputs);
+ Node* return_val = UndefinedConstant();
+ if (returns == 1) {
+ return_val = ToJS(call, context_param, func->sig->GetReturn());
+ }
+ Return(return_val);
+ }
+};
+
+Handle<JSFunction> CompileJSToNativeWrapper(Isolate* isolate,
+ Handle<String> name,
+ NativeFunction func) {
+ int params = static_cast<int>(func.sig->parameter_count());
+ Zone zone(isolate->allocator(), ZONE_NAME);
+ CodeAssemblerState state(isolate, &zone, params,
+ Code::ComputeFlags(Code::BUILTIN), "js-to-native");
+ FFIAssembler assembler(&state);
+ assembler.GenerateJSToNativeWrapper(&func);
+ Handle<Code> code = assembler.GenerateCode(&state);
+
+ Handle<SharedFunctionInfo> shared =
+ isolate->factory()->NewSharedFunctionInfo(name, code, false);
+ shared->set_length(params);
+ shared->set_internal_formal_parameter_count(params);
+ Handle<JSFunction> function = isolate->factory()->NewFunction(
+ isolate->native_function_map(), name, code);
+ function->set_shared(*shared);
+ return function;
+}
+
+} // namespace ffi
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/ffi/ffi-compiler.h b/deps/v8/src/ffi/ffi-compiler.h
new file mode 100644
index 0000000000..2825f4f0af
--- /dev/null
+++ b/deps/v8/src/ffi/ffi-compiler.h
@@ -0,0 +1,37 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SRC_FFI_FFI_COMPILER_H_
+#define SRC_FFI_FFI_COMPILER_H_
+
+#include "src/code-stub-assembler.h"
+#include "src/machine-type.h"
+
+namespace v8 {
+namespace internal {
+
+typedef compiler::Node Node;
+typedef compiler::CodeAssemblerState CodeAssemblerState;
+
+void InstallFFIMap(Isolate* isolate);
+
+namespace ffi {
+
+enum class FFIType : uint8_t { kInt32 };
+
+typedef Signature<FFIType> FFISignature;
+
+struct NativeFunction {
+ FFISignature* sig;
+ uint8_t* start;
+};
+
+Handle<JSFunction> CompileJSToNativeWrapper(Isolate* isolate,
+ Handle<String> name,
+ NativeFunction func);
+} // namespace ffi
+} // namespace internal
+} // namespace v8
+
+#endif // SRC_FFI_FFI_COMPILER_H_
diff --git a/deps/v8/src/field-type.h b/deps/v8/src/field-type.h
index 11e1069c83..2f8250a161 100644
--- a/deps/v8/src/field-type.h
+++ b/deps/v8/src/field-type.h
@@ -6,13 +6,15 @@
#define V8_FIELD_TYPE_H_
#include "src/ast/ast-types.h"
-#include "src/handles.h"
#include "src/objects.h"
#include "src/ostreams.h"
namespace v8 {
namespace internal {
+template <typename T>
+class Handle;
+
class FieldType : public Object {
public:
static FieldType* None();
diff --git a/deps/v8/src/find-and-replace-pattern.h b/deps/v8/src/find-and-replace-pattern.h
new file mode 100644
index 0000000000..845ee0f782
--- /dev/null
+++ b/deps/v8/src/find-and-replace-pattern.h
@@ -0,0 +1,37 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_FIND_AND_REPLACE_PATTERN_H_
+#define V8_FIND_AND_REPLACE_PATTERN_H_
+
+#include "src/handles.h"
+
+namespace v8 {
+namespace internal {
+
+class Map;
+class Object;
+
+class FindAndReplacePattern {
+ public:
+ FindAndReplacePattern() : count_(0) {}
+ void Add(Handle<Map> map_to_find, Handle<Object> obj_to_replace) {
+ DCHECK(count_ < kMaxCount);
+ find_[count_] = map_to_find;
+ replace_[count_] = obj_to_replace;
+ ++count_;
+ }
+
+ private:
+ static const int kMaxCount = 4;
+ int count_;
+ Handle<Map> find_[kMaxCount];
+ Handle<Object> replace_[kMaxCount];
+ friend class Code;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_FIND_AND_REPLACE_PATTERN_H_
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index 39534f9301..e6e9b7eb7a 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -159,6 +159,7 @@ struct MaybeBoolFlag {
#define DEFINE_MAYBE_BOOL(nam, cmt) \
FLAG(MAYBE_BOOL, MaybeBoolFlag, nam, {false COMMA false}, cmt)
#define DEFINE_INT(nam, def, cmt) FLAG(INT, int, nam, def, cmt)
+#define DEFINE_UINT(nam, def, cmt) FLAG(UINT, unsigned int, nam, def, cmt)
#define DEFINE_FLOAT(nam, def, cmt) FLAG(FLOAT, double, nam, def, cmt)
#define DEFINE_STRING(nam, def, cmt) FLAG(STRING, const char*, nam, def, cmt)
#define DEFINE_ARGS(nam, cmt) FLAG(ARGS, JSArguments, nam, {0 COMMA NULL}, cmt)
@@ -196,38 +197,36 @@ DEFINE_IMPLICATION(es_staging, move_object_start)
#define HARMONY_INPROGRESS(V) \
V(harmony_array_prototype_values, "harmony Array.prototype.values") \
V(harmony_function_sent, "harmony function.sent") \
+ V(harmony_tailcalls, "harmony tail calls") \
V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
- V(harmony_simd, "harmony simd") \
V(harmony_do_expressions, "harmony do-expressions") \
V(harmony_regexp_named_captures, "harmony regexp named captures") \
V(harmony_regexp_property, "harmony unicode regexp property classes") \
+ V(harmony_function_tostring, "harmony Function.prototype.toString") \
V(harmony_class_fields, "harmony public fields in class literals") \
- V(harmony_object_spread, "harmony object spread")
+ V(harmony_async_iteration, "harmony async iteration") \
+ V(harmony_dynamic_import, "harmony dynamic import") \
+ V(harmony_promise_finally, "harmony Promise.prototype.finally")
// Features that are complete (but still behind --harmony/es-staging flag).
-#define HARMONY_STAGED_BASE(V) \
- V(harmony_regexp_lookbehind, "harmony regexp lookbehind") \
- V(harmony_restrictive_generators, \
- "harmony restrictions on generator declarations") \
- V(harmony_tailcalls, "harmony tail calls") \
- V(harmony_trailing_commas, \
- "harmony trailing commas in function parameter lists")
-
-#ifdef V8_I18N_SUPPORT
-#define HARMONY_STAGED(V) \
- HARMONY_STAGED_BASE(V) \
- V(icu_case_mapping, "case mapping with ICU rather than Unibrow")
-#else
-#define HARMONY_STAGED(V) HARMONY_STAGED_BASE(V)
-#endif
+#define HARMONY_STAGED(V) \
+ V(harmony_regexp_lookbehind, "harmony regexp lookbehind") \
+ V(harmony_restrictive_generators, \
+ "harmony restrictions on generator declarations") \
+ V(harmony_object_rest_spread, "harmony object rest spread properties") \
+ V(harmony_template_escapes, \
+ "harmony invalid escapes in tagged template literals")
// Features that are shipping (turned on by default, but internal flag remains).
-#define HARMONY_SHIPPING_BASE(V)
+#define HARMONY_SHIPPING_BASE(V) \
+ V(harmony_trailing_commas, \
+ "harmony trailing commas in function parameter lists")
#ifdef V8_I18N_SUPPORT
-#define HARMONY_SHIPPING(V) \
- HARMONY_SHIPPING_BASE(V) \
- V(datetime_format_to_parts, "Intl.DateTimeFormat.formatToParts")
+#define HARMONY_SHIPPING(V) \
+ HARMONY_SHIPPING_BASE(V) \
+ V(datetime_format_to_parts, "Intl.DateTimeFormat.formatToParts") \
+ V(icu_case_mapping, "case mapping with ICU rather than Unibrow")
#else
#define HARMONY_SHIPPING(V) HARMONY_SHIPPING_BASE(V)
#endif
@@ -258,14 +257,27 @@ HARMONY_SHIPPING(FLAG_SHIPPING_FEATURES)
DEFINE_BOOL(future, false,
"Implies all staged features that we want to ship in the "
"not-too-far future")
-DEFINE_IMPLICATION(future, ignition_staging)
+DEFINE_IMPLICATION(future, turbo)
+
+DEFINE_IMPLICATION(turbo, ignition_staging)
+DEFINE_IMPLICATION(turbo, enable_fast_array_builtins)
+DEFINE_IMPLICATION(turbo, thin_strings)
+
+// TODO(rmcilroy): Remove ignition-staging and set these implications directly
+// with the turbo flag.
+DEFINE_BOOL(ignition_staging, false, "use ignition with all staged features")
+DEFINE_IMPLICATION(ignition_staging, ignition)
+DEFINE_IMPLICATION(ignition_staging, compiler_dispatcher)
// Flags for experimental implementation features.
DEFINE_BOOL(allocation_site_pretenuring, true,
"pretenure with allocation sites")
+DEFINE_BOOL(mark_shared_functions_for_tier_up, true,
+ "mark shared functions for tier up")
DEFINE_BOOL(page_promotion, true, "promote pages based on utilization")
DEFINE_INT(page_promotion_threshold, 70,
"min percentage of live bytes on a page to enable fast evacuation")
+DEFINE_BOOL(smi_binop, true, "support smi representation in binary operations")
DEFINE_BOOL(trace_pretenuring, false,
"trace pretenuring decisions of HAllocate instructions")
DEFINE_BOOL(trace_pretenuring_statistics, false,
@@ -280,12 +292,7 @@ DEFINE_IMPLICATION(track_computed_fields, track_fields)
DEFINE_BOOL(track_field_types, true, "track field types")
DEFINE_IMPLICATION(track_field_types, track_fields)
DEFINE_IMPLICATION(track_field_types, track_heap_object_fields)
-DEFINE_BOOL(smi_binop, true, "support smi representation in binary operations")
-DEFINE_BOOL(mark_shared_functions_for_tier_up, false,
- "mark shared functions for tier up")
-
-// Flags for strongly rooting literal arrays in the feedback vector.
-DEFINE_BOOL(trace_strong_rooted_literals, false, "trace literal rooting")
+DEFINE_BOOL(type_profile, false, "collect type information")
// Flags for optimization types.
DEFINE_BOOL(optimize_for_size, false,
@@ -300,10 +307,6 @@ DEFINE_BOOL(string_slices, true, "use string slices")
// Flags for Ignition.
DEFINE_BOOL(ignition, false, "use ignition interpreter")
-DEFINE_BOOL(ignition_staging, false, "use ignition with all staged features")
-DEFINE_IMPLICATION(ignition_staging, ignition)
-DEFINE_IMPLICATION(ignition_staging, compiler_dispatcher)
-DEFINE_STRING(ignition_filter, "*", "filter for ignition interpreter")
DEFINE_BOOL(ignition_deadcode, true,
"use ignition dead code elimination optimizer")
DEFINE_BOOL(ignition_osr, true, "enable support for OSR from ignition code")
@@ -313,6 +316,8 @@ DEFINE_BOOL(ignition_filter_expression_positions, true,
"filter expression positions before the bytecode pipeline")
DEFINE_BOOL(print_bytecode, false,
"print bytecode generated by ignition interpreter")
+DEFINE_STRING(print_bytecode_filter, "*",
+ "filter for selecting which functions to print bytecode")
DEFINE_BOOL(trace_ignition, false,
"trace the bytecodes executed by the ignition interpreter")
DEFINE_BOOL(trace_ignition_codegen, false,
@@ -434,7 +439,6 @@ DEFINE_BOOL(turbo_sp_frame_access, false,
"use stack pointer-relative access to frame wherever possible")
DEFINE_BOOL(turbo_preprocess_ranges, true,
"run pre-register allocation heuristics")
-DEFINE_BOOL(turbo_loop_stackcheck, true, "enable stack checks in loops")
DEFINE_STRING(turbo_filter, "~~", "optimization filter for TurboFan compiler")
DEFINE_BOOL(trace_turbo, false, "trace generated TurboFan IR")
DEFINE_BOOL(trace_turbo_graph, false, "trace generated TurboFan graphs")
@@ -452,9 +456,16 @@ DEFINE_BOOL(turbo_asm, true, "enable TurboFan for asm.js code")
DEFINE_BOOL(turbo_verify, DEBUG_BOOL, "verify TurboFan graphs at each phase")
DEFINE_STRING(turbo_verify_machine_graph, nullptr,
"verify TurboFan machine graph before instruction selection")
-DEFINE_BOOL(csa_verify, DEBUG_BOOL,
+#ifdef ENABLE_VERIFY_CSA
+DEFINE_BOOL(verify_csa, DEBUG_BOOL,
"verify TurboFan machine graph of code stubs")
-DEFINE_BOOL(trace_csa_verify, false, "trace code stubs verification")
+#else
+// Define the flag as read-only-false so that code still compiles even in the
+// non-ENABLE_VERIFY_CSA configuration.
+DEFINE_BOOL_READONLY(verify_csa, false,
+ "verify TurboFan machine graph of code stubs")
+#endif
+DEFINE_BOOL(trace_verify_csa, false, "trace code stubs verification")
DEFINE_STRING(csa_trap_on_node, nullptr,
"trigger break point when a node with given id is created in "
"given stub. The format is: StubName,NodeId")
@@ -486,8 +497,8 @@ DEFINE_BOOL(turbo_stress_instruction_scheduling, false,
"randomly schedule instructions to stress dependency tracking")
DEFINE_BOOL(turbo_store_elimination, true,
"enable store-store elimination in TurboFan")
-DEFINE_BOOL(turbo_lower_create_closure, false,
- "enable inline allocation for closure instantiation")
+// TODO(turbofan): Rename --crankshaft to --optimize eventually.
+DEFINE_IMPLICATION(turbo, crankshaft)
// Flags to help platform porters
DEFINE_BOOL(minimal, false,
@@ -499,10 +510,16 @@ DEFINE_NEG_IMPLICATION(minimal, use_ic)
// Flags for native WebAssembly.
DEFINE_BOOL(expose_wasm, true, "expose WASM interface to JavaScript")
+DEFINE_BOOL(assume_asmjs_origin, false,
+ "force wasm decoder to assume input is internal asm-wasm format")
DEFINE_BOOL(wasm_disable_structured_cloning, false,
"disable WASM structured cloning")
DEFINE_INT(wasm_num_compilation_tasks, 10,
"number of parallel compilation tasks for wasm")
+DEFINE_UINT(wasm_max_mem_pages, v8::internal::wasm::kV8MaxWasmMemoryPages,
+ "maximum memory size of a wasm instance")
+DEFINE_UINT(wasm_max_table_size, v8::internal::wasm::kV8MaxWasmTableSize,
+ "maximum table size of a wasm instance")
DEFINE_BOOL(trace_wasm_encoder, false, "trace encoding of wasm code")
DEFINE_BOOL(trace_wasm_decoder, false, "trace decoding of wasm code")
DEFINE_BOOL(trace_wasm_decode_time, false, "trace decoding time of wasm code")
@@ -522,7 +539,6 @@ DEFINE_BOOL(wasm_loop_assignment_analysis, true,
"perform loop assignment analysis for WASM")
DEFINE_BOOL(validate_asm, false, "validate asm.js modules before compiling")
-DEFINE_IMPLICATION(ignition_staging, validate_asm)
DEFINE_BOOL(suppress_asm_messages, false,
"don't emit asm.js related messages (for golden file testing)")
DEFINE_BOOL(trace_asm_time, false, "log asm.js timing info to the console")
@@ -555,10 +571,12 @@ DEFINE_BOOL(wasm_guard_pages, false,
"add guard pages to the end of WebWassembly memory"
" (experimental, no effect on 32-bit)")
DEFINE_IMPLICATION(wasm_trap_handler, wasm_guard_pages)
-DEFINE_BOOL(wasm_trap_if, false,
+DEFINE_BOOL(wasm_trap_if, true,
"enable the use of the trap_if operator for traps")
DEFINE_BOOL(wasm_code_fuzzer_gen_test, false,
"Generate a test case when running the wasm-code fuzzer")
+DEFINE_BOOL(print_wasm_code, false, "Print WebAssembly code")
+
// Profiler flags.
DEFINE_INT(frame_count, 1, "number of stack frames inspected by the profiler")
// 0x1800 fits in the immediate field of an ARM instruction.
@@ -573,6 +591,112 @@ DEFINE_INT(self_opt_count, 130, "call count before self-optimization")
DEFINE_BOOL(trace_opt_verbose, false, "extra verbose compilation tracing")
DEFINE_IMPLICATION(trace_opt_verbose, trace_opt)
+// Garbage collections flags.
+DEFINE_INT(min_semi_space_size, 0,
+ "min size of a semi-space (in MBytes), the new space consists of two"
+ "semi-spaces")
+DEFINE_INT(max_semi_space_size, 0,
+ "max size of a semi-space (in MBytes), the new space consists of two"
+ "semi-spaces")
+DEFINE_INT(semi_space_growth_factor, 2, "factor by which to grow the new space")
+DEFINE_BOOL(experimental_new_space_growth_heuristic, false,
+ "Grow the new space based on the percentage of survivors instead "
+ "of their absolute value.")
+DEFINE_INT(max_old_space_size, 0, "max size of the old space (in Mbytes)")
+DEFINE_INT(initial_old_space_size, 0, "initial old space size (in Mbytes)")
+DEFINE_INT(max_executable_size, 0, "max size of executable memory (in Mbytes)")
+DEFINE_BOOL(gc_global, false, "always perform global GCs")
+DEFINE_INT(gc_interval, -1, "garbage collect after <n> allocations")
+DEFINE_INT(retain_maps_for_n_gc, 2,
+ "keeps maps alive for <n> old space garbage collections")
+DEFINE_BOOL(trace_gc, false,
+ "print one trace line following each garbage collection")
+DEFINE_BOOL(trace_gc_nvp, false,
+ "print one detailed trace line in name=value format "
+ "after each garbage collection")
+DEFINE_BOOL(trace_gc_ignore_scavenger, false,
+ "do not print trace line after scavenger collection")
+DEFINE_BOOL(trace_idle_notification, false,
+ "print one trace line following each idle notification")
+DEFINE_BOOL(trace_idle_notification_verbose, false,
+ "prints the heap state used by the idle notification")
+DEFINE_BOOL(trace_gc_verbose, false,
+ "print more details following each garbage collection")
+DEFINE_INT(trace_allocation_stack_interval, -1,
+ "print stack trace after <n> free-list allocations")
+DEFINE_BOOL(trace_fragmentation, false, "report fragmentation for old space")
+DEFINE_BOOL(trace_fragmentation_verbose, false,
+ "report fragmentation for old space (detailed)")
+DEFINE_BOOL(trace_evacuation, false, "report evacuation statistics")
+DEFINE_BOOL(trace_mutator_utilization, false,
+ "print mutator utilization, allocation speed, gc speed")
+DEFINE_BOOL(flush_code, true, "flush code that we expect not to use again")
+DEFINE_BOOL(trace_code_flushing, false, "trace code flushing progress")
+DEFINE_BOOL(age_code, true,
+ "track un-executed functions to age code and flush only "
+ "old code (required for code flushing)")
+DEFINE_BOOL(incremental_marking, true, "use incremental marking")
+DEFINE_BOOL(incremental_marking_wrappers, true,
+ "use incremental marking for marking wrappers")
+DEFINE_INT(min_progress_during_incremental_marking_finalization, 32,
+ "keep finalizing incremental marking as long as we discover at "
+ "least this many unmarked objects")
+DEFINE_INT(max_incremental_marking_finalization_rounds, 3,
+ "at most try this many times to finalize incremental marking")
+DEFINE_BOOL(minor_mc, false, "perform young generation mark compact GCs")
+DEFINE_NEG_IMPLICATION(minor_mc, incremental_marking)
+DEFINE_BOOL(black_allocation, true, "use black allocation")
+DEFINE_BOOL(concurrent_sweeping, true, "use concurrent sweeping")
+DEFINE_BOOL(parallel_compaction, true, "use parallel compaction")
+DEFINE_BOOL(parallel_pointer_update, true,
+ "use parallel pointer update during compaction")
+DEFINE_BOOL(trace_incremental_marking, false,
+ "trace progress of the incremental marking")
+DEFINE_BOOL(track_gc_object_stats, false,
+ "track object counts and memory usage")
+DEFINE_BOOL(trace_gc_object_stats, false,
+ "trace object counts and memory usage")
+DEFINE_INT(gc_stats, 0, "Used by tracing internally to enable gc statistics")
+DEFINE_IMPLICATION(trace_gc_object_stats, track_gc_object_stats)
+DEFINE_VALUE_IMPLICATION(track_gc_object_stats, gc_stats, 1)
+DEFINE_VALUE_IMPLICATION(trace_gc_object_stats, gc_stats, 1)
+DEFINE_NEG_IMPLICATION(trace_gc_object_stats, incremental_marking)
+DEFINE_BOOL(track_detached_contexts, true,
+ "track native contexts that are expected to be garbage collected")
+DEFINE_BOOL(trace_detached_contexts, false,
+ "trace native contexts that are expected to be garbage collected")
+DEFINE_IMPLICATION(trace_detached_contexts, track_detached_contexts)
+#ifdef VERIFY_HEAP
+DEFINE_BOOL(verify_heap, false, "verify heap pointers before and after GC")
+#endif
+DEFINE_BOOL(move_object_start, true, "enable moving of object starts")
+DEFINE_BOOL(memory_reducer, true, "use memory reducer")
+DEFINE_INT(heap_growing_percent, 0,
+ "specifies heap growing factor as (1 + heap_growing_percent/100)")
+DEFINE_INT(v8_os_page_size, 0, "override OS page size (in KBytes)")
+DEFINE_BOOL(always_compact, false, "Perform compaction on every full GC")
+DEFINE_BOOL(never_compact, false,
+ "Never perform compaction on full GC - testing only")
+DEFINE_BOOL(compact_code_space, true, "Compact code space on full collections")
+DEFINE_BOOL(cleanup_code_caches_at_gc, true,
+ "Flush code caches in maps during mark compact cycle.")
+DEFINE_BOOL(use_marking_progress_bar, true,
+ "Use a progress bar to scan large objects in increments when "
+ "incremental marking is active.")
+DEFINE_BOOL(zap_code_space, DEBUG_BOOL,
+ "Zap free memory in code space with 0xCC while sweeping.")
+DEFINE_BOOL(force_marking_deque_overflows, false,
+ "force overflows of marking deque by reducing it's size "
+ "to 64 words")
+DEFINE_BOOL(stress_compaction, false,
+ "stress the GC compactor to flush out bugs (implies "
+ "--force_marking_deque_overflows)")
+DEFINE_BOOL(manual_evacuation_candidates_selection, false,
+ "Test mode only flag. It allows an unit test to select evacuation "
+ "candidates pages (requires --stress_compaction).")
+DEFINE_BOOL(fast_promotion_new_space, false,
+ "fast promote new space on high survival rates")
+
// assembler-ia32.cc / assembler-arm.cc / assembler-x64.cc
DEFINE_BOOL(debug_code, DEBUG_BOOL,
"generate extra code (assertions) for debugging")
@@ -613,10 +737,12 @@ DEFINE_BOOL(enable_regexp_unaligned_accesses, true,
// api.cc
DEFINE_BOOL(script_streaming, true, "enable parsing on background")
+DEFINE_BOOL(disable_old_api_accessors, false,
+ "Disable old-style API accessors whose setters trigger through the "
+ "prototype chain")
// bootstrapper.cc
DEFINE_STRING(expose_natives_as, NULL, "expose natives in global object")
-DEFINE_STRING(expose_debug_as, NULL, "expose debug in global object")
DEFINE_BOOL(expose_free_buffer, false, "expose freeBuffer extension")
DEFINE_BOOL(expose_gc, false, "expose gc extension")
DEFINE_STRING(expose_gc_as, NULL,
@@ -630,6 +756,7 @@ DEFINE_BOOL(builtins_in_stack_traces, false,
"show built-in functions in stack traces")
// builtins.cc
+DEFINE_BOOL(enable_fast_array_builtins, false, "use optimized builtins")
DEFINE_BOOL(allow_unsafe_function_constructor, false,
"allow invoking the function constructor without security checks")
@@ -677,6 +804,8 @@ DEFINE_BOOL(cache_prototype_transitions, true, "cache prototype transitions")
// compiler-dispatcher.cc
DEFINE_BOOL(compiler_dispatcher, false, "enable compiler dispatcher")
+DEFINE_BOOL(compiler_dispatcher_eager_inner, false,
+ "enable background compilation of eager inner functions")
DEFINE_BOOL(trace_compiler_dispatcher, false,
"trace compiler dispatcher activity")
@@ -702,8 +831,6 @@ DEFINE_IMPLICATION(trace_array_abuse, trace_external_array_abuse)
// debugger
DEFINE_BOOL(trace_debug_json, false, "trace debugging JSON request/response")
DEFINE_BOOL(enable_liveedit, true, "enable liveedit experimental feature")
-DEFINE_BOOL(side_effect_free_debug_evaluate, false,
- "use side-effect-free debug-evaluate for testing")
DEFINE_BOOL(
trace_side_effect_free_debug_evaluate, false,
"print debug messages for side-effect-free debug-evaluate for testing")
@@ -724,101 +851,6 @@ DEFINE_BOOL(verify_operand_stack_depth, false,
"emit debug code that verifies the static tracking of the operand "
"stack depth")
-// heap.cc
-DEFINE_INT(min_semi_space_size, 0,
- "min size of a semi-space (in MBytes), the new space consists of two"
- "semi-spaces")
-DEFINE_INT(max_semi_space_size, 0,
- "max size of a semi-space (in MBytes), the new space consists of two"
- "semi-spaces")
-DEFINE_INT(semi_space_growth_factor, 2, "factor by which to grow the new space")
-DEFINE_BOOL(experimental_new_space_growth_heuristic, false,
- "Grow the new space based on the percentage of survivors instead "
- "of their absolute value.")
-DEFINE_INT(max_old_space_size, 0, "max size of the old space (in Mbytes)")
-DEFINE_INT(initial_old_space_size, 0, "initial old space size (in Mbytes)")
-DEFINE_INT(max_executable_size, 0, "max size of executable memory (in Mbytes)")
-DEFINE_BOOL(gc_global, false, "always perform global GCs")
-DEFINE_INT(gc_interval, -1, "garbage collect after <n> allocations")
-DEFINE_INT(retain_maps_for_n_gc, 2,
- "keeps maps alive for <n> old space garbage collections")
-DEFINE_BOOL(trace_gc, false,
- "print one trace line following each garbage collection")
-DEFINE_BOOL(trace_gc_nvp, false,
- "print one detailed trace line in name=value format "
- "after each garbage collection")
-DEFINE_BOOL(trace_gc_ignore_scavenger, false,
- "do not print trace line after scavenger collection")
-DEFINE_BOOL(trace_idle_notification, false,
- "print one trace line following each idle notification")
-DEFINE_BOOL(trace_idle_notification_verbose, false,
- "prints the heap state used by the idle notification")
-DEFINE_BOOL(print_max_heap_committed, false,
- "print statistics of the maximum memory committed for the heap "
- "in name=value format on exit")
-DEFINE_BOOL(trace_gc_verbose, false,
- "print more details following each garbage collection")
-DEFINE_INT(trace_allocation_stack_interval, -1,
- "print stack trace after <n> free-list allocations")
-DEFINE_BOOL(trace_fragmentation, false, "report fragmentation for old space")
-DEFINE_BOOL(trace_fragmentation_verbose, false,
- "report fragmentation for old space (detailed)")
-DEFINE_BOOL(trace_evacuation, false, "report evacuation statistics")
-DEFINE_BOOL(trace_mutator_utilization, false,
- "print mutator utilization, allocation speed, gc speed")
-DEFINE_BOOL(weak_embedded_maps_in_optimized_code, true,
- "make maps embedded in optimized code weak")
-DEFINE_BOOL(weak_embedded_objects_in_optimized_code, true,
- "make objects embedded in optimized code weak")
-DEFINE_BOOL(flush_code, true, "flush code that we expect not to use again")
-DEFINE_BOOL(trace_code_flushing, false, "trace code flushing progress")
-DEFINE_BOOL(age_code, true,
- "track un-executed functions to age code and flush only "
- "old code (required for code flushing)")
-DEFINE_BOOL(incremental_marking, true, "use incremental marking")
-DEFINE_BOOL(incremental_marking_wrappers, true,
- "use incremental marking for marking wrappers")
-DEFINE_BOOL(object_grouping_in_incremental_finalization, true,
- "enable object grouping in incremental finalization")
-DEFINE_INT(min_progress_during_incremental_marking_finalization, 32,
- "keep finalizing incremental marking as long as we discover at "
- "least this many unmarked objects")
-DEFINE_INT(max_incremental_marking_finalization_rounds, 3,
- "at most try this many times to finalize incremental marking")
-DEFINE_BOOL(minor_mc, false, "perform young generation mark compact GCs")
-DEFINE_NEG_IMPLICATION(minor_mc, incremental_marking)
-DEFINE_BOOL(black_allocation, true, "use black allocation")
-DEFINE_BOOL(concurrent_sweeping, true, "use concurrent sweeping")
-DEFINE_BOOL(parallel_compaction, true, "use parallel compaction")
-DEFINE_BOOL(parallel_pointer_update, true,
- "use parallel pointer update during compaction")
-DEFINE_BOOL(trace_incremental_marking, false,
- "trace progress of the incremental marking")
-DEFINE_BOOL(track_gc_object_stats, false,
- "track object counts and memory usage")
-DEFINE_BOOL(trace_gc_object_stats, false,
- "trace object counts and memory usage")
-DEFINE_INT(gc_stats, 0, "Used by tracing internally to enable gc statistics")
-DEFINE_IMPLICATION(trace_gc_object_stats, track_gc_object_stats)
-DEFINE_VALUE_IMPLICATION(track_gc_object_stats, gc_stats, 1)
-DEFINE_VALUE_IMPLICATION(trace_gc_object_stats, gc_stats, 1)
-DEFINE_NEG_IMPLICATION(trace_gc_object_stats, incremental_marking)
-DEFINE_BOOL(track_detached_contexts, true,
- "track native contexts that are expected to be garbage collected")
-DEFINE_BOOL(trace_detached_contexts, false,
- "trace native contexts that are expected to be garbage collected")
-DEFINE_IMPLICATION(trace_detached_contexts, track_detached_contexts)
-#ifdef VERIFY_HEAP
-DEFINE_BOOL(verify_heap, false, "verify heap pointers before and after GC")
-#endif
-DEFINE_BOOL(move_object_start, true, "enable moving of object starts")
-DEFINE_BOOL(memory_reducer, true, "use memory reducer")
-DEFINE_INT(heap_growing_percent, 0,
- "specifies heap growing factor as (1 + heap_growing_percent/100)")
-
-// spaces.cc
-DEFINE_INT(v8_os_page_size, 0, "override OS page size (in KBytes)")
-
// execution.cc, messages.cc
DEFINE_BOOL(clear_exceptions_on_js_entry, false,
"clear pending exceptions when entering JavaScript")
@@ -847,31 +879,18 @@ DEFINE_BOOL(use_idle_notification, true,
// ic.cc
DEFINE_BOOL(use_ic, true, "use inline caching")
DEFINE_BOOL(trace_ic, false, "trace inline cache state transitions")
+DEFINE_IMPLICATION(trace_ic, log_code)
DEFINE_INT(ic_stats, 0, "inline cache state transitions statistics")
DEFINE_VALUE_IMPLICATION(trace_ic, ic_stats, 1)
+DEFINE_BOOL_READONLY(track_constant_fields, false,
+ "enable constant field tracking")
// macro-assembler-ia32.cc
DEFINE_BOOL(native_code_counters, false,
"generate extra code for manipulating stats counters")
-// mark-compact.cc
-DEFINE_BOOL(always_compact, false, "Perform compaction on every full GC")
-DEFINE_BOOL(never_compact, false,
- "Never perform compaction on full GC - testing only")
-DEFINE_BOOL(compact_code_space, true, "Compact code space on full collections")
-DEFINE_BOOL(cleanup_code_caches_at_gc, true,
- "Flush inline caches prior to mark compact collection and "
- "flush code caches in maps during mark compact cycle.")
-DEFINE_BOOL(use_marking_progress_bar, true,
- "Use a progress bar to scan large objects in increments when "
- "incremental marking is active.")
-DEFINE_BOOL(zap_code_space, DEBUG_BOOL,
- "Zap free memory in code space with 0xCC while sweeping.")
-DEFINE_INT(random_seed, 0,
- "Default seed for initializing random generator "
- "(0, the default, means to use system random).")
-
// objects.cc
+DEFINE_BOOL(thin_strings, false, "Enable ThinString support")
DEFINE_BOOL(trace_weak_arrays, false, "Trace WeakFixedArray usage")
DEFINE_BOOL(trace_prototype_users, false,
"Trace updates to prototype user tracking")
@@ -885,10 +904,13 @@ DEFINE_BOOL(trace_maps, false, "trace map creation")
DEFINE_BOOL(allow_natives_syntax, false, "allow natives syntax")
DEFINE_BOOL(trace_parse, false, "trace parsing and preparsing")
DEFINE_BOOL(trace_preparse, false, "trace preparsing decisions")
-DEFINE_BOOL(lazy_inner_functions, false, "enable lazy parsing inner functions")
+DEFINE_BOOL(lazy_inner_functions, true, "enable lazy parsing inner functions")
DEFINE_BOOL(aggressive_lazy_inner_functions, false,
"even lazier inner function parsing")
DEFINE_IMPLICATION(aggressive_lazy_inner_functions, lazy_inner_functions)
+DEFINE_BOOL(preparser_scope_analysis, false,
+ "perform scope analysis for preparsed inner functions")
+DEFINE_IMPLICATION(preparser_scope_analysis, lazy_inner_functions)
// simulator-arm.cc, simulator-arm64.cc and simulator-mips.cc
DEFINE_BOOL(trace_sim, false, "Trace simulator execution")
@@ -932,6 +954,9 @@ DEFINE_BOOL(randomize_hashes, true,
DEFINE_INT(hash_seed, 0,
"Fixed seed to use to hash property keys (0 means random)"
"(with snapshots this option cannot override the baked-in seed)")
+DEFINE_INT(random_seed, 0,
+ "Default seed for initializing random generator "
+ "(0, the default, means to use system random).")
DEFINE_BOOL(trace_rail, false, "trace RAIL mode")
DEFINE_BOOL(print_all_exceptions, false,
"print exception object and stack trace on each thrown exception")
@@ -969,23 +994,6 @@ DEFINE_STRING(startup_blob, NULL,
DEFINE_BOOL(profile_hydrogen_code_stub_compilation, false,
"Print the time it takes to lazily compile hydrogen code stubs.")
-// mark-compact.cc
-DEFINE_BOOL(force_marking_deque_overflows, false,
- "force overflows of marking deque by reducing it's size "
- "to 64 words")
-
-DEFINE_BOOL(stress_compaction, false,
- "stress the GC compactor to flush out bugs (implies "
- "--force_marking_deque_overflows)")
-
-DEFINE_BOOL(manual_evacuation_candidates_selection, false,
- "Test mode only flag. It allows an unit test to select evacuation "
- "candidates pages (requires --stress_compaction).")
-
-DEFINE_BOOL(disable_old_api_accessors, false,
- "Disable old-style API accessors whose setters trigger through the "
- "prototype chain")
-
//
// Dev shell flags
//
diff --git a/deps/v8/src/flags.cc b/deps/v8/src/flags.cc
index f7ae004ac2..6998d49e17 100644
--- a/deps/v8/src/flags.cc
+++ b/deps/v8/src/flags.cc
@@ -15,6 +15,7 @@
#include "src/list-inl.h"
#include "src/ostreams.h"
#include "src/utils.h"
+#include "src/wasm/wasm-limits.h"
namespace v8 {
namespace internal {
@@ -33,8 +34,15 @@ namespace {
// to the actual flag, default value, comment, etc. This is designed to be POD
// initialized as to avoid requiring static constructors.
struct Flag {
- enum FlagType { TYPE_BOOL, TYPE_MAYBE_BOOL, TYPE_INT, TYPE_FLOAT,
- TYPE_STRING, TYPE_ARGS };
+ enum FlagType {
+ TYPE_BOOL,
+ TYPE_MAYBE_BOOL,
+ TYPE_INT,
+ TYPE_UINT,
+ TYPE_FLOAT,
+ TYPE_STRING,
+ TYPE_ARGS
+ };
FlagType type_; // What type of flag, bool, int, or string.
const char* name_; // Name of the flag, ex "my_flag".
@@ -64,6 +72,11 @@ struct Flag {
return reinterpret_cast<int*>(valptr_);
}
+ unsigned int* uint_variable() const {
+ DCHECK(type_ == TYPE_UINT);
+ return reinterpret_cast<unsigned int*>(valptr_);
+ }
+
double* float_variable() const {
DCHECK(type_ == TYPE_FLOAT);
return reinterpret_cast<double*>(valptr_);
@@ -97,6 +110,11 @@ struct Flag {
return *reinterpret_cast<const int*>(defptr_);
}
+ unsigned int uint_default() const {
+ DCHECK(type_ == TYPE_UINT);
+ return *reinterpret_cast<const unsigned int*>(defptr_);
+ }
+
double float_default() const {
DCHECK(type_ == TYPE_FLOAT);
return *reinterpret_cast<const double*>(defptr_);
@@ -121,6 +139,8 @@ struct Flag {
return maybe_bool_variable()->has_value == false;
case TYPE_INT:
return *int_variable() == int_default();
+ case TYPE_UINT:
+ return *uint_variable() == uint_default();
case TYPE_FLOAT:
return *float_variable() == float_default();
case TYPE_STRING: {
@@ -149,6 +169,9 @@ struct Flag {
case TYPE_INT:
*int_variable() = int_default();
break;
+ case TYPE_UINT:
+ *uint_variable() = uint_default();
+ break;
case TYPE_FLOAT:
*float_variable() = float_default();
break;
@@ -177,6 +200,8 @@ static const char* Type2String(Flag::FlagType type) {
case Flag::TYPE_BOOL: return "bool";
case Flag::TYPE_MAYBE_BOOL: return "maybe_bool";
case Flag::TYPE_INT: return "int";
+ case Flag::TYPE_UINT:
+ return "uint";
case Flag::TYPE_FLOAT: return "float";
case Flag::TYPE_STRING: return "string";
case Flag::TYPE_ARGS: return "arguments";
@@ -199,6 +224,9 @@ std::ostream& operator<<(std::ostream& os, const Flag& flag) { // NOLINT
case Flag::TYPE_INT:
os << *flag.int_variable();
break;
+ case Flag::TYPE_UINT:
+ os << *flag.uint_variable();
+ break;
case Flag::TYPE_FLOAT:
os << *flag.float_variable();
break;
@@ -399,6 +427,24 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
case Flag::TYPE_INT:
*flag->int_variable() = static_cast<int>(strtol(value, &endp, 10));
break;
+ case Flag::TYPE_UINT: {
+ // We do not use strtoul because it accepts negative numbers.
+ int64_t val = static_cast<int64_t>(strtoll(value, &endp, 10));
+ if (val < 0 || val > std::numeric_limits<unsigned int>::max()) {
+ PrintF(stderr,
+ "Error: Value for flag %s of type %s is out of bounds "
+ "[0-%" PRIu64
+ "]\n"
+ "Try --help for options\n",
+ arg, Type2String(flag->type()),
+ static_cast<uint64_t>(
+ std::numeric_limits<unsigned int>::max()));
+ return_code = j;
+ break;
+ }
+ *flag->uint_variable() = static_cast<unsigned int>(val);
+ break;
+ }
case Flag::TYPE_FLOAT:
*flag->float_variable() = strtod(value, &endp);
break;
diff --git a/deps/v8/src/frames-inl.h b/deps/v8/src/frames-inl.h
index c18938c18e..bf1db05295 100644
--- a/deps/v8/src/frames-inl.h
+++ b/deps/v8/src/frames-inl.h
@@ -166,16 +166,16 @@ inline Address StandardFrame::ComputeConstantPoolAddress(Address fp) {
inline bool StandardFrame::IsArgumentsAdaptorFrame(Address fp) {
- Object* frame_type =
- Memory::Object_at(fp + TypedFrameConstants::kFrameTypeOffset);
- return frame_type == Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR);
+ intptr_t frame_type =
+ Memory::intptr_at(fp + TypedFrameConstants::kFrameTypeOffset);
+ return frame_type == StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR);
}
inline bool StandardFrame::IsConstructFrame(Address fp) {
- Object* frame_type =
- Memory::Object_at(fp + TypedFrameConstants::kFrameTypeOffset);
- return frame_type == Smi::FromInt(StackFrame::CONSTRUCT);
+ intptr_t frame_type =
+ Memory::intptr_at(fp + TypedFrameConstants::kFrameTypeOffset);
+ return frame_type == StackFrame::TypeToMarker(StackFrame::CONSTRUCT);
}
inline JavaScriptFrame::JavaScriptFrame(StackFrameIteratorBase* iterator)
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index 45d26a161a..680a226adf 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -131,16 +131,6 @@ StackFrame* StackFrameIteratorBase::SingletonFor(StackFrame::Type type) {
// -------------------------------------------------------------------------
-JavaScriptFrameIterator::JavaScriptFrameIterator(Isolate* isolate,
- StackFrame::Id id)
- : iterator_(isolate) {
- while (!done()) {
- Advance();
- if (frame()->id() == id) return;
- }
-}
-
-
void JavaScriptFrameIterator::Advance() {
do {
iterator_.Advance();
@@ -192,6 +182,29 @@ void StackTraceFrameIterator::AdvanceToArgumentsFrame() {
// -------------------------------------------------------------------------
+namespace {
+
+bool IsInterpreterFramePc(Isolate* isolate, Address pc) {
+ Code* interpreter_entry_trampoline =
+ isolate->builtins()->builtin(Builtins::kInterpreterEntryTrampoline);
+ Code* interpreter_bytecode_advance =
+ isolate->builtins()->builtin(Builtins::kInterpreterEnterBytecodeAdvance);
+ Code* interpreter_bytecode_dispatch =
+ isolate->builtins()->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
+
+ return (pc >= interpreter_entry_trampoline->instruction_start() &&
+ pc < interpreter_entry_trampoline->instruction_end()) ||
+ (pc >= interpreter_bytecode_advance->instruction_start() &&
+ pc < interpreter_bytecode_advance->instruction_end()) ||
+ (pc >= interpreter_bytecode_dispatch->instruction_start() &&
+ pc < interpreter_bytecode_dispatch->instruction_end());
+}
+
+DISABLE_ASAN Address ReadMemoryAt(Address address) {
+ return Memory::Address_at(address);
+}
+
+} // namespace
SafeStackFrameIterator::SafeStackFrameIterator(
Isolate* isolate,
@@ -204,6 +217,7 @@ SafeStackFrameIterator::SafeStackFrameIterator(
StackFrame::State state;
StackFrame::Type type;
ThreadLocalTop* top = isolate->thread_local_top();
+ bool advance_frame = true;
if (IsValidTop(top)) {
type = ExitFrame::GetStateForFramePointer(Isolate::c_entry_fp(top), &state);
top_frame_type_ = type;
@@ -213,6 +227,19 @@ SafeStackFrameIterator::SafeStackFrameIterator(
state.sp = sp;
state.pc_address = StackFrame::ResolveReturnAddressLocation(
reinterpret_cast<Address*>(StandardFrame::ComputePCAddress(fp)));
+
+ // If the top of stack is a return address to the interpreter trampoline,
+ // then we are likely in a bytecode handler with elided frame. In that
+ // case, set the PC properly and make sure we do not drop the frame.
+ if (IsValidStackAddress(sp)) {
+ MSAN_MEMORY_IS_INITIALIZED(sp, kPointerSize);
+ Address tos = ReadMemoryAt(reinterpret_cast<Address>(sp));
+ if (IsInterpreterFramePc(isolate, tos)) {
+ state.pc_address = reinterpret_cast<Address*>(sp);
+ advance_frame = false;
+ }
+ }
+
// StackFrame::ComputeType will read both kContextOffset and kMarkerOffset,
// we check only that kMarkerOffset is within the stack bounds and do
// compile time check that kContextOffset slot is pushed on the stack before
@@ -223,6 +250,10 @@ SafeStackFrameIterator::SafeStackFrameIterator(
if (IsValidStackAddress(frame_marker)) {
type = StackFrame::ComputeType(this, &state);
top_frame_type_ = type;
+ // We only keep the top frame if we believe it to be interpreted frame.
+ if (type != StackFrame::INTERPRETED) {
+ advance_frame = true;
+ }
} else {
// Mark the frame as JAVA_SCRIPT if we cannot determine its type.
// The frame anyways will be skipped.
@@ -234,7 +265,7 @@ SafeStackFrameIterator::SafeStackFrameIterator(
return;
}
frame_ = SingletonFor(type, &state);
- if (frame_) Advance();
+ if (advance_frame && frame_) Advance();
}
@@ -399,22 +430,6 @@ void StackFrame::SetReturnAddressLocationResolver(
return_address_location_resolver_ = resolver;
}
-static bool IsInterpreterFramePc(Isolate* isolate, Address pc) {
- Code* interpreter_entry_trampoline =
- isolate->builtins()->builtin(Builtins::kInterpreterEntryTrampoline);
- Code* interpreter_bytecode_advance =
- isolate->builtins()->builtin(Builtins::kInterpreterEnterBytecodeAdvance);
- Code* interpreter_bytecode_dispatch =
- isolate->builtins()->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
-
- return (pc >= interpreter_entry_trampoline->instruction_start() &&
- pc < interpreter_entry_trampoline->instruction_end()) ||
- (pc >= interpreter_bytecode_advance->instruction_start() &&
- pc < interpreter_bytecode_advance->instruction_end()) ||
- (pc >= interpreter_bytecode_dispatch->instruction_start() &&
- pc < interpreter_bytecode_dispatch->instruction_end());
-}
-
StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
State* state) {
DCHECK(state->fp != NULL);
@@ -422,7 +437,7 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
MSAN_MEMORY_IS_INITIALIZED(
state->fp + CommonFrameConstants::kContextOrFrameTypeOffset,
kPointerSize);
- Object* marker = Memory::Object_at(
+ intptr_t marker = Memory::intptr_at(
state->fp + CommonFrameConstants::kContextOrFrameTypeOffset);
if (!iterator->can_access_heap_objects_) {
// TODO(titzer): "can_access_heap_objects" is kind of bogus. It really
@@ -434,7 +449,7 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
state->fp + StandardFrameConstants::kFunctionOffset, kPointerSize);
Object* maybe_function =
Memory::Object_at(state->fp + StandardFrameConstants::kFunctionOffset);
- if (!marker->IsSmi()) {
+ if (!StackFrame::IsTypeMarker(marker)) {
if (maybe_function->IsSmi()) {
return NONE;
} else if (IsInterpreterFramePc(iterator->isolate(),
@@ -451,7 +466,7 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
if (code_obj != nullptr) {
switch (code_obj->kind()) {
case Code::BUILTIN:
- if (marker->IsSmi()) break;
+ if (StackFrame::IsTypeMarker(marker)) break;
if (code_obj->is_interpreter_trampoline_builtin()) {
return INTERPRETED;
}
@@ -484,9 +499,8 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
}
}
- DCHECK(marker->IsSmi());
- StackFrame::Type candidate =
- static_cast<StackFrame::Type>(Smi::cast(marker)->value());
+ DCHECK(StackFrame::IsTypeMarker(marker));
+ StackFrame::Type candidate = StackFrame::MarkerToType(marker);
switch (candidate) {
case ENTRY:
case ENTRY_CONSTRUCT:
@@ -621,8 +635,9 @@ StackFrame::Type ExitFrame::ComputeFrameType(Address fp) {
return EXIT;
}
- StackFrame::Type frame_type =
- static_cast<StackFrame::Type>(Smi::cast(marker)->value());
+ intptr_t marker_int = bit_cast<intptr_t>(marker);
+
+ StackFrame::Type frame_type = static_cast<StackFrame::Type>(marker_int >> 1);
if (frame_type == EXIT || frame_type == BUILTIN_EXIT) {
return frame_type;
}
@@ -782,11 +797,10 @@ void StandardFrame::IterateCompiledFrame(ObjectVisitor* v) const {
// Determine the fixed header and spill slot area size.
int frame_header_size = StandardFrameConstants::kFixedFrameSizeFromFp;
- Object* marker =
- Memory::Object_at(fp() + CommonFrameConstants::kContextOrFrameTypeOffset);
- if (marker->IsSmi()) {
- StackFrame::Type candidate =
- static_cast<StackFrame::Type>(Smi::cast(marker)->value());
+ intptr_t marker =
+ Memory::intptr_at(fp() + CommonFrameConstants::kContextOrFrameTypeOffset);
+ if (StackFrame::IsTypeMarker(marker)) {
+ StackFrame::Type candidate = StackFrame::MarkerToType(marker);
switch (candidate) {
case ENTRY:
case ENTRY_CONSTRUCT:
@@ -964,6 +978,16 @@ void JavaScriptFrame::GetFunctions(List<SharedFunctionInfo*>* functions) const {
functions->Add(function()->shared());
}
+void JavaScriptFrame::GetFunctions(
+ List<Handle<SharedFunctionInfo>>* functions) const {
+ DCHECK(functions->length() == 0);
+ List<SharedFunctionInfo*> raw_functions;
+ GetFunctions(&raw_functions);
+ for (const auto& raw_function : raw_functions) {
+ functions->Add(Handle<SharedFunctionInfo>(raw_function));
+ }
+}
+
void JavaScriptFrame::Summarize(List<FrameSummary>* functions,
FrameSummary::Mode mode) const {
DCHECK(functions->length() == 0);
@@ -1263,12 +1287,15 @@ FrameSummary::~FrameSummary() {
#undef FRAME_SUMMARY_DESTR
}
-FrameSummary FrameSummary::Get(const StandardFrame* frame, int index) {
- DCHECK_LE(0, index);
+FrameSummary FrameSummary::GetTop(const StandardFrame* frame) {
List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
frame->Summarize(&frames);
- DCHECK_GT(frames.length(), index);
- return frames[index];
+ DCHECK_LT(0, frames.length());
+ return frames.last();
+}
+
+FrameSummary FrameSummary::GetBottom(const StandardFrame* frame) {
+ return Get(frame, 0);
}
FrameSummary FrameSummary::GetSingle(const StandardFrame* frame) {
@@ -1278,6 +1305,14 @@ FrameSummary FrameSummary::GetSingle(const StandardFrame* frame) {
return frames.first();
}
+FrameSummary FrameSummary::Get(const StandardFrame* frame, int index) {
+ DCHECK_LE(0, index);
+ List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
+ frame->Summarize(&frames);
+ DCHECK_GT(frames.length(), index);
+ return frames[index];
+}
+
#define FRAME_SUMMARY_DISPATCH(ret, name) \
ret FrameSummary::name() const { \
switch (base_.kind()) { \
@@ -1752,7 +1787,7 @@ void WasmInterpreterEntryFrame::Iterate(ObjectVisitor* v) const {
void WasmInterpreterEntryFrame::Print(StringStream* accumulator, PrintMode mode,
int index) const {
PrintIndex(accumulator, mode, index);
- accumulator->Add("WASM TO INTERPRETER [");
+ accumulator->Add("WASM INTERPRETER ENTRY [");
Script* script = this->script();
accumulator->PrintName(script->name());
accumulator->Add("]");
@@ -1761,8 +1796,15 @@ void WasmInterpreterEntryFrame::Print(StringStream* accumulator, PrintMode mode,
void WasmInterpreterEntryFrame::Summarize(List<FrameSummary>* functions,
FrameSummary::Mode mode) const {
- // TODO(clemensh): Implement this.
- UNIMPLEMENTED();
+ Handle<WasmInstanceObject> instance(wasm_instance(), isolate());
+ std::vector<std::pair<uint32_t, int>> interpreted_stack =
+ instance->debug_info()->GetInterpretedStack(fp());
+
+ for (auto& e : interpreted_stack) {
+ FrameSummary::WasmInterpretedFrameSummary summary(isolate(), instance,
+ e.first, e.second);
+ functions->Add(summary);
+ }
}
Code* WasmInterpreterEntryFrame::unchecked_code() const {
@@ -1781,7 +1823,7 @@ Script* WasmInterpreterEntryFrame::script() const {
}
int WasmInterpreterEntryFrame::position() const {
- return FrameSummary::GetFirst(this).AsWasmInterpreted().SourcePosition();
+ return FrameSummary::GetBottom(this).AsWasmInterpreted().SourcePosition();
}
Address WasmInterpreterEntryFrame::GetCallerStackPointer() const {
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index 74131e874b..2255b812b7 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -437,10 +437,16 @@ class StackFrame BASE_EMBEDDED {
};
// Used to mark the outermost JS entry frame.
+ //
+ // The mark is an opaque value that should be pushed onto the stack directly,
+ // carefully crafted to not be interpreted as a tagged pointer.
enum JsFrameMarker {
- INNER_JSENTRY_FRAME = 0,
- OUTERMOST_JSENTRY_FRAME = 1
+ INNER_JSENTRY_FRAME = (0 << kSmiTagSize) | kSmiTag,
+ OUTERMOST_JSENTRY_FRAME = (1 << kSmiTagSize) | kSmiTag
};
+ STATIC_ASSERT((INNER_JSENTRY_FRAME & kHeapObjectTagMask) != kHeapObjectTag);
+ STATIC_ASSERT((OUTERMOST_JSENTRY_FRAME & kHeapObjectTagMask) !=
+ kHeapObjectTag);
struct State {
Address sp = nullptr;
@@ -450,6 +456,40 @@ class StackFrame BASE_EMBEDDED {
Address* constant_pool_address = nullptr;
};
+ // Convert a stack frame type to a marker that can be stored on the stack.
+ //
+ // The marker is an opaque value, not intended to be interpreted in any way
+ // except being checked by IsTypeMarker or converted by MarkerToType.
+ // It has the same tagging as Smis, so any marker value that does not pass
+ // IsTypeMarker can instead be interpreted as a tagged pointer.
+ //
+ // Note that the marker is not a Smi: Smis on 64-bit architectures are stored
+ // in the top 32 bits of a 64-bit value, which in turn makes them expensive
+ // (in terms of code/instruction size) to push as immediates onto the stack.
+ static int32_t TypeToMarker(Type type) {
+ DCHECK_GE(type, 0);
+ return (type << kSmiTagSize) | kSmiTag;
+ }
+
+ // Convert a marker back to a stack frame type.
+ //
+ // Unlike the return value of TypeToMarker, this takes an intptr_t, as that is
+ // the type of the value on the stack.
+ static Type MarkerToType(intptr_t marker) {
+ DCHECK(IsTypeMarker(marker));
+ return static_cast<Type>(marker >> kSmiTagSize);
+ }
+
+ // Check if a marker is a stack frame type marker or a tagged pointer.
+ //
+ // Returns true if the given marker is tagged as a stack frame type marker,
+ // and should be converted back to a stack frame type using MarkerToType.
+ // Otherwise, the value is a tagged function pointer.
+ static bool IsTypeMarker(intptr_t function_or_marker) {
+ bool is_marker = ((function_or_marker & kSmiTagMask) == kSmiTag);
+ return is_marker;
+ }
+
// Copy constructor; it breaks the connection to host iterator
// (as an iterator usually lives on stack).
StackFrame(const StackFrame& original) {
@@ -858,11 +898,10 @@ class FrameSummary BASE_EMBEDDED {
~FrameSummary();
- static inline FrameSummary GetFirst(const StandardFrame* frame) {
- return Get(frame, 0);
- }
- static FrameSummary Get(const StandardFrame* frame, int index);
+ static FrameSummary GetTop(const StandardFrame* frame);
+ static FrameSummary GetBottom(const StandardFrame* frame);
static FrameSummary GetSingle(const StandardFrame* frame);
+ static FrameSummary Get(const StandardFrame* frame, int index);
// Dispatched accessors.
Handle<Object> receiver() const;
@@ -924,6 +963,8 @@ class StandardFrame : public StackFrame {
virtual bool IsConstructor() const;
// Build a list with summaries for this frame including all inlined frames.
+ // The functions are ordered bottom-to-top (i.e. summaries.last() is the
+ // top-most activation; caller comes before callee).
virtual void Summarize(
List<FrameSummary>* frames,
FrameSummary::Mode mode = FrameSummary::kExactSummary) const;
@@ -1028,6 +1069,8 @@ class JavaScriptFrame : public StandardFrame {
// Return a list with {SharedFunctionInfo} objects of this frame.
virtual void GetFunctions(List<SharedFunctionInfo*>* functions) const;
+ void GetFunctions(List<Handle<SharedFunctionInfo>>* functions) const;
+
// Lookup exception handler for current {pc}, returns -1 if none found. Also
// returns data associated with the handler site specific to the frame type:
// - OptimizedFrame : Data is the stack slot count of the entire frame.
@@ -1461,8 +1504,6 @@ class JavaScriptFrameIterator BASE_EMBEDDED {
public:
inline explicit JavaScriptFrameIterator(Isolate* isolate);
inline JavaScriptFrameIterator(Isolate* isolate, ThreadLocalTop* top);
- // Skip frames until the frame with the given id is reached.
- JavaScriptFrameIterator(Isolate* isolate, StackFrame::Id id);
inline JavaScriptFrame* frame() const;
@@ -1484,6 +1525,7 @@ class JavaScriptFrameIterator BASE_EMBEDDED {
class StackTraceFrameIterator BASE_EMBEDDED {
public:
explicit StackTraceFrameIterator(Isolate* isolate);
+ // Skip frames until the frame with the given id is reached.
StackTraceFrameIterator(Isolate* isolate, StackFrame::Id id);
bool done() const { return iterator_.done(); }
void Advance();
diff --git a/deps/v8/src/full-codegen/arm/full-codegen-arm.cc b/deps/v8/src/full-codegen/arm/full-codegen-arm.cc
index e4e4b3a8a4..d650f2e47b 100644
--- a/deps/v8/src/full-codegen/arm/full-codegen-arm.cc
+++ b/deps/v8/src/full-codegen/arm/full-codegen-arm.cc
@@ -132,11 +132,11 @@ void FullCodeGenerator::Generate() {
// Increment invocation count for the function.
{
Comment cmnt(masm_, "[ Increment invocation count");
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kLiteralsOffset));
- __ ldr(r2, FieldMemOperand(r2, LiteralsArray::kFeedbackVectorOffset));
- __ ldr(r9, FieldMemOperand(r2, FeedbackVector::kInvocationCountIndex *
- kPointerSize +
- FeedbackVector::kHeaderSize));
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kFeedbackVectorOffset));
+ __ ldr(r2, FieldMemOperand(r2, Cell::kValueOffset));
+ __ ldr(r9, FieldMemOperand(
+ r2, FeedbackVector::kInvocationCountIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
__ add(r9, r9, Operand(Smi::FromInt(1)));
__ str(r9, FieldMemOperand(
r2, FeedbackVector::kInvocationCountIndex * kPointerSize +
@@ -271,14 +271,16 @@ void FullCodeGenerator::Generate() {
__ ldr(r1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
if (is_strict(language_mode()) || !has_simple_parameters()) {
- FastNewStrictArgumentsStub stub(isolate());
- __ CallStub(&stub);
+ Callable callable = CodeFactory::FastNewStrictArguments(isolate());
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+ RestoreContext();
} else if (literal()->has_duplicate_parameters()) {
__ Push(r1);
__ CallRuntime(Runtime::kNewSloppyArguments_Generic);
} else {
- FastNewSloppyArgumentsStub stub(isolate());
- __ CallStub(&stub);
+ Callable callable = CodeFactory::FastNewSloppyArguments(isolate());
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+ RestoreContext();
}
SetVar(arguments, r0, r1, r2);
@@ -310,10 +312,12 @@ void FullCodeGenerator::Generate() {
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
Handle<Code> stack_check = isolate()->builtins()->StackCheck();
+ masm_->MaybeCheckConstPool();
PredictableCodeSizeScope predictable(masm_);
predictable.ExpectSize(
masm_->CallSize(stack_check, RelocInfo::CODE_TARGET));
- __ Call(stack_check, RelocInfo::CODE_TARGET);
+ __ Call(stack_check, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al,
+ CAN_INLINE_TARGET_ADDRESS, false);
__ bind(&ok);
}
@@ -757,10 +761,11 @@ void FullCodeGenerator::VisitVariableDeclaration(
case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
globals_->Add(variable->name(), zone());
- FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+ FeedbackSlot slot = proxy->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
globals_->Add(isolate()->factory()->undefined_value(), zone());
+ globals_->Add(isolate()->factory()->undefined_value(), zone());
break;
}
case VariableLocation::PARAMETER:
@@ -797,9 +802,15 @@ void FullCodeGenerator::VisitFunctionDeclaration(
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
globals_->Add(variable->name(), zone());
- FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+ FeedbackSlot slot = proxy->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+
+ // We need the slot where the literals array lives, too.
+ slot = declaration->fun()->LiteralFeedbackSlot();
+ DCHECK(!slot.IsInvalid());
+ globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+
Handle<SharedFunctionInfo> function =
Compiler::GetSharedFunctionInfo(declaration->fun(), script(), info_);
// Check for stack-overflow exception.
@@ -951,7 +962,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
SetStatementPosition(stmt, SKIP_BREAK);
- FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
+ FeedbackSlot slot = stmt->ForInFeedbackSlot();
// Get the object to enumerate over.
SetExpressionAsStatementPosition(stmt->enumerable());
@@ -1118,9 +1129,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
decrement_loop_depth();
}
-
void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
- FeedbackVectorSlot slot) {
+ FeedbackSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ ldr(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
__ ldr(StoreDescriptor::ValueRegister(),
@@ -1128,10 +1138,9 @@ void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
CallStoreIC(slot, isolate()->factory()->home_object_symbol());
}
-
void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
int offset,
- FeedbackVectorSlot slot) {
+ FeedbackSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ Move(StoreDescriptor::ReceiverRegister(), r0);
__ ldr(StoreDescriptor::ValueRegister(),
@@ -1206,10 +1215,10 @@ void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
- Handle<FixedArray> constant_properties =
+ Handle<BoilerplateDescription> constant_properties =
expr->GetOrBuildConstantProperties(isolate());
__ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
+ __ mov(r2, Operand(SmiFromSlot(expr->literal_slot())));
__ mov(r1, Operand(constant_properties));
int flags = expr->ComputeFlags();
__ mov(r0, Operand(Smi::FromInt(flags)));
@@ -1256,7 +1265,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForAccumulatorValue(value);
DCHECK(StoreDescriptor::ValueRegister().is(r0));
__ ldr(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
- CallStoreIC(property->GetSlot(0), key->value());
+ CallStoreIC(property->GetSlot(0), key->value(), true);
PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
if (NeedsHomeObject(value)) {
@@ -1340,18 +1349,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Handle<ConstantElementsPair> constant_elements =
expr->GetOrBuildConstantElements(isolate());
- bool has_fast_elements =
- IsFastObjectElementsKind(expr->constant_elements_kind());
-
- AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
- if (has_fast_elements && !FLAG_allocation_site_pretenuring) {
- // If the only customer of allocation sites is transitioning, then
- // we can turn it off if we don't have anywhere else to transition to.
- allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
- }
__ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
+ __ mov(r2, Operand(SmiFromSlot(expr->literal_slot())));
__ mov(r1, Operand(constant_elements));
if (MustCreateArrayLiteralWithRuntime(expr)) {
__ mov(r0, Operand(Smi::FromInt(expr->ComputeFlags())));
@@ -1359,7 +1359,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ CallRuntime(Runtime::kCreateArrayLiteral);
} else {
Callable callable =
- CodeFactory::FastCloneShallowArray(isolate(), allocation_site_mode);
+ CodeFactory::FastCloneShallowArray(isolate(), TRACK_ALLOCATION_SITE);
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
@@ -1664,9 +1664,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
context()->Plug(r0);
}
-
-void FullCodeGenerator::EmitAssignment(Expression* expr,
- FeedbackVectorSlot slot) {
+void FullCodeGenerator::EmitAssignment(Expression* expr, FeedbackSlot slot) {
DCHECK(expr->IsValidReferenceExpressionOrThis());
Property* prop = expr->AsProperty();
@@ -1720,7 +1718,7 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
}
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
- FeedbackVectorSlot slot,
+ FeedbackSlot slot,
HoleCheckMode hole_check_mode) {
if (var->IsUnallocated()) {
// Global var, const, or let.
@@ -1884,8 +1882,9 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
EmitProfilingCounterHandlingForReturnSequence(true);
}
Handle<Code> code =
- CodeFactory::CallIC(isolate(), mode, expr->tail_call_mode()).code();
- __ mov(r3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
+ CodeFactory::CallICTrampoline(isolate(), mode, expr->tail_call_mode())
+ .code();
+ __ mov(r3, Operand(IntFromSlot(expr->CallFeedbackICSlot())));
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ mov(r0, Operand(arg_count));
CallIC(code);
@@ -2602,16 +2601,6 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
__ tst(r1, Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
Split(eq, if_true, if_false, fall_through);
-// clang-format off
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
- } else if (String::Equals(check, factory->type##_string())) { \
- __ JumpIfSmi(r0, if_false); \
- __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); \
- __ CompareRoot(r0, Heap::k##Type##MapRootIndex); \
- Split(eq, if_true, if_false, fall_through);
- SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
- // clang-format on
} else {
if (if_false != fall_through) __ jmp(if_false);
}
@@ -2652,6 +2641,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
SetExpressionPosition(expr);
PopOperand(r1);
__ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
+ RestoreContext();
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(r0, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
diff --git a/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc b/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc
index a464e72fe0..f6b9c2f389 100644
--- a/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc
+++ b/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc
@@ -136,11 +136,11 @@ void FullCodeGenerator::Generate() {
// Increment invocation count for the function.
{
Comment cmnt(masm_, "[ Increment invocation count");
- __ Ldr(x11, FieldMemOperand(x1, JSFunction::kLiteralsOffset));
- __ Ldr(x11, FieldMemOperand(x11, LiteralsArray::kFeedbackVectorOffset));
- __ Ldr(x10, FieldMemOperand(x11, FeedbackVector::kInvocationCountIndex *
- kPointerSize +
- FeedbackVector::kHeaderSize));
+ __ Ldr(x11, FieldMemOperand(x1, JSFunction::kFeedbackVectorOffset));
+ __ Ldr(x11, FieldMemOperand(x11, Cell::kValueOffset));
+ __ Ldr(x10, FieldMemOperand(
+ x11, FeedbackVector::kInvocationCountIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
__ Add(x10, x10, Operand(Smi::FromInt(1)));
__ Str(x10, FieldMemOperand(
x11, FeedbackVector::kInvocationCountIndex * kPointerSize +
@@ -273,14 +273,16 @@ void FullCodeGenerator::Generate() {
__ Ldr(x1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
if (is_strict(language_mode()) || !has_simple_parameters()) {
- FastNewStrictArgumentsStub stub(isolate());
- __ CallStub(&stub);
+ Callable callable = CodeFactory::FastNewStrictArguments(isolate());
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+ RestoreContext();
} else if (literal()->has_duplicate_parameters()) {
__ Push(x1);
__ CallRuntime(Runtime::kNewSloppyArguments_Generic);
} else {
- FastNewSloppyArgumentsStub stub(isolate());
- __ CallStub(&stub);
+ Callable callable = CodeFactory::FastNewSloppyArguments(isolate());
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+ RestoreContext();
}
SetVar(arguments, x0, x1, x2);
@@ -752,10 +754,11 @@ void FullCodeGenerator::VisitVariableDeclaration(
case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
globals_->Add(variable->name(), zone());
- FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+ FeedbackSlot slot = proxy->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
globals_->Add(isolate()->factory()->undefined_value(), zone());
+ globals_->Add(isolate()->factory()->undefined_value(), zone());
break;
}
case VariableLocation::PARAMETER:
@@ -792,9 +795,15 @@ void FullCodeGenerator::VisitFunctionDeclaration(
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
globals_->Add(variable->name(), zone());
- FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+ FeedbackSlot slot = proxy->VariableFeedbackSlot();
+ DCHECK(!slot.IsInvalid());
+ globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+
+ // We need the slot where the literals array lives, too.
+ slot = declaration->fun()->LiteralFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+
Handle<SharedFunctionInfo> function =
Compiler::GetSharedFunctionInfo(declaration->fun(), script(), info_);
// Check for stack overflow exception.
@@ -947,7 +956,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
SetStatementPosition(stmt, SKIP_BREAK);
- FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
+ FeedbackSlot slot = stmt->ForInFeedbackSlot();
// TODO(all): This visitor probably needs better comments and a revisit.
@@ -1107,19 +1116,17 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
decrement_loop_depth();
}
-
void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
- FeedbackVectorSlot slot) {
+ FeedbackSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ Peek(StoreDescriptor::ReceiverRegister(), 0);
__ Peek(StoreDescriptor::ValueRegister(), offset * kPointerSize);
CallStoreIC(slot, isolate()->factory()->home_object_symbol());
}
-
void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
int offset,
- FeedbackVectorSlot slot) {
+ FeedbackSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ Move(StoreDescriptor::ReceiverRegister(), x0);
__ Peek(StoreDescriptor::ValueRegister(), offset * kPointerSize);
@@ -1193,10 +1200,10 @@ void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
- Handle<FixedArray> constant_properties =
+ Handle<BoilerplateDescription> constant_properties =
expr->GetOrBuildConstantProperties(isolate());
__ Ldr(x3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Mov(x2, Smi::FromInt(expr->literal_index()));
+ __ Mov(x2, SmiFromSlot(expr->literal_slot()));
__ Mov(x1, Operand(constant_properties));
int flags = expr->ComputeFlags();
__ Mov(x0, Smi::FromInt(flags));
@@ -1243,7 +1250,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForAccumulatorValue(value);
DCHECK(StoreDescriptor::ValueRegister().is(x0));
__ Peek(StoreDescriptor::ReceiverRegister(), 0);
- CallStoreIC(property->GetSlot(0), key->value());
+ CallStoreIC(property->GetSlot(0), key->value(), true);
PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
if (NeedsHomeObject(value)) {
@@ -1325,18 +1332,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Handle<ConstantElementsPair> constant_elements =
expr->GetOrBuildConstantElements(isolate());
- bool has_fast_elements =
- IsFastObjectElementsKind(expr->constant_elements_kind());
-
- AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
- if (has_fast_elements && !FLAG_allocation_site_pretenuring) {
- // If the only customer of allocation sites is transitioning, then
- // we can turn it off if we don't have anywhere else to transition to.
- allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
- }
__ Ldr(x3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Mov(x2, Smi::FromInt(expr->literal_index()));
+ __ Mov(x2, SmiFromSlot(expr->literal_slot()));
__ Mov(x1, Operand(constant_elements));
if (MustCreateArrayLiteralWithRuntime(expr)) {
__ Mov(x0, Smi::FromInt(expr->ComputeFlags()));
@@ -1344,7 +1342,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ CallRuntime(Runtime::kCreateArrayLiteral);
} else {
Callable callable =
- CodeFactory::FastCloneShallowArray(isolate(), allocation_site_mode);
+ CodeFactory::FastCloneShallowArray(isolate(), TRACK_ALLOCATION_SITE);
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
@@ -1608,8 +1606,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
context()->Plug(x0);
}
-void FullCodeGenerator::EmitAssignment(Expression* expr,
- FeedbackVectorSlot slot) {
+void FullCodeGenerator::EmitAssignment(Expression* expr, FeedbackSlot slot) {
DCHECK(expr->IsValidReferenceExpressionOrThis());
Property* prop = expr->AsProperty();
@@ -1665,7 +1662,7 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
}
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
- FeedbackVectorSlot slot,
+ FeedbackSlot slot,
HoleCheckMode hole_check_mode) {
ASM_LOCATION("FullCodeGenerator::EmitVariableAssignment");
if (var->IsUnallocated()) {
@@ -1836,8 +1833,9 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
EmitProfilingCounterHandlingForReturnSequence(true);
}
Handle<Code> code =
- CodeFactory::CallIC(isolate(), mode, expr->tail_call_mode()).code();
- __ Mov(x3, SmiFromSlot(expr->CallFeedbackICSlot()));
+ CodeFactory::CallICTrampoline(isolate(), mode, expr->tail_call_mode())
+ .code();
+ __ Mov(x3, IntFromSlot(expr->CallFeedbackICSlot()));
__ Peek(x1, (arg_count + 1) * kXRegSize);
__ Mov(x0, arg_count);
CallIC(code);
@@ -2571,18 +2569,6 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ Ldrb(x10, FieldMemOperand(x10, Map::kBitFieldOffset));
__ TestAndSplit(x10, (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable),
if_true, if_false, fall_through);
-// clang-format off
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
- } else if (String::Equals(check, factory->type##_string())) { \
- ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof " \
- #type "_string"); \
- __ JumpIfSmi(x0, if_true); \
- __ Ldr(x0, FieldMemOperand(x0, HeapObject::kMapOffset)); \
- __ CompareRoot(x0, Heap::k##Type##MapRootIndex); \
- Split(eq, if_true, if_false, fall_through);
- SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
- // clang-format on
} else {
ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof other");
if (if_false != fall_through) __ B(if_false);
@@ -2627,6 +2613,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
SetExpressionPosition(expr);
PopOperand(x1);
__ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
+ RestoreContext();
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(x0, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
diff --git a/deps/v8/src/full-codegen/full-codegen.cc b/deps/v8/src/full-codegen/full-codegen.cc
index 5cfc63bb63..58872d08ab 100644
--- a/deps/v8/src/full-codegen/full-codegen.cc
+++ b/deps/v8/src/full-codegen/full-codegen.cc
@@ -204,8 +204,7 @@ void FullCodeGenerator::CallIC(Handle<Code> code, TypeFeedbackId ast_id) {
__ Call(code, RelocInfo::CODE_TARGET, ast_id);
}
-void FullCodeGenerator::CallLoadIC(FeedbackVectorSlot slot,
- Handle<Object> name) {
+void FullCodeGenerator::CallLoadIC(FeedbackSlot slot, Handle<Object> name) {
DCHECK(name->IsName());
__ Move(LoadDescriptor::NameRegister(), name);
@@ -216,8 +215,8 @@ void FullCodeGenerator::CallLoadIC(FeedbackVectorSlot slot,
RestoreContext();
}
-void FullCodeGenerator::CallStoreIC(FeedbackVectorSlot slot,
- Handle<Object> name) {
+void FullCodeGenerator::CallStoreIC(FeedbackSlot slot, Handle<Object> name,
+ bool store_own_property) {
DCHECK(name->IsName());
__ Move(StoreDescriptor::NameRegister(), name);
@@ -230,12 +229,23 @@ void FullCodeGenerator::CallStoreIC(FeedbackVectorSlot slot,
EmitLoadSlot(StoreDescriptor::SlotRegister(), slot);
}
- Handle<Code> code = CodeFactory::StoreIC(isolate(), language_mode()).code();
+ Handle<Code> code;
+ if (store_own_property) {
+ DCHECK_EQ(FeedbackSlotKind::kStoreOwnNamed,
+ feedback_vector_spec()->GetKind(slot));
+ code = CodeFactory::StoreOwnIC(isolate()).code();
+ } else {
+ // Ensure that language mode is in sync with the IC slot kind.
+ DCHECK_EQ(
+ GetLanguageModeFromSlotKind(feedback_vector_spec()->GetKind(slot)),
+ language_mode());
+ code = CodeFactory::StoreIC(isolate(), language_mode()).code();
+ }
__ Call(code, RelocInfo::CODE_TARGET);
RestoreContext();
}
-void FullCodeGenerator::CallKeyedStoreIC(FeedbackVectorSlot slot) {
+void FullCodeGenerator::CallKeyedStoreIC(FeedbackSlot slot) {
STATIC_ASSERT(!StoreDescriptor::kPassLastArgsOnStack ||
StoreDescriptor::kStackArgumentsCount == 2);
if (StoreDescriptor::kPassLastArgsOnStack) {
@@ -245,6 +255,9 @@ void FullCodeGenerator::CallKeyedStoreIC(FeedbackVectorSlot slot) {
EmitLoadSlot(StoreDescriptor::SlotRegister(), slot);
}
+ // Ensure that language mode is in sync with the IC slot kind.
+ DCHECK_EQ(GetLanguageModeFromSlotKind(feedback_vector_spec()->GetKind(slot)),
+ language_mode());
Handle<Code> code =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
__ Call(code, RelocInfo::CODE_TARGET);
@@ -479,8 +492,12 @@ void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
DCHECK(var->IsUnallocated());
__ Move(LoadDescriptor::NameRegister(), var->name());
- EmitLoadSlot(LoadGlobalDescriptor::SlotRegister(),
- proxy->VariableFeedbackSlot());
+ FeedbackSlot slot = proxy->VariableFeedbackSlot();
+ // Ensure that typeof mode is in sync with the IC slot kind.
+ DCHECK_EQ(GetTypeofModeFromSlotKind(feedback_vector_spec()->GetKind(slot)),
+ typeof_mode);
+
+ EmitLoadSlot(LoadGlobalDescriptor::SlotRegister(), slot);
Handle<Code> code = CodeFactory::LoadGlobalIC(isolate(), typeof_mode).code();
__ Call(code, RelocInfo::CODE_TARGET);
RestoreContext();
@@ -553,21 +570,6 @@ void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- RegExpExecStub stub(isolate());
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 4);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- VisitForStackValue(args->at(3));
- __ CallStub(&stub);
- OperandStackDepthDecrement(4);
- context()->Plug(result_register());
-}
-
-
void FullCodeGenerator::EmitIntrinsicAsStubCall(CallRuntime* expr,
const Callable& callable) {
ZoneList<Expression*>* args = expr->arguments();
@@ -599,10 +601,6 @@ void FullCodeGenerator::EmitIntrinsicAsStubCall(CallRuntime* expr,
context()->Plug(result_register());
}
-void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
- EmitIntrinsicAsStubCall(expr, CodeFactory::NumberToString(isolate()));
-}
-
void FullCodeGenerator::EmitToString(CallRuntime* expr) {
EmitIntrinsicAsStubCall(expr, CodeFactory::ToString(isolate()));
@@ -1016,8 +1014,7 @@ void FullCodeGenerator::EmitUnwindAndReturn() {
}
void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
- FeedbackVectorSlot slot,
- bool pretenure) {
+ FeedbackSlot slot, bool pretenure) {
// If slot is invalid, then it's a native function literal and we
// can pass the empty array or empty literal array, something like that...
@@ -1062,13 +1059,12 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
RestoreContext();
}
-void FullCodeGenerator::EmitLoadSlot(Register destination,
- FeedbackVectorSlot slot) {
+void FullCodeGenerator::EmitLoadSlot(Register destination, FeedbackSlot slot) {
DCHECK(!slot.IsInvalid());
__ Move(destination, SmiFromSlot(slot));
}
-void FullCodeGenerator::EmitPushSlot(FeedbackVectorSlot slot) {
+void FullCodeGenerator::EmitPushSlot(FeedbackSlot slot) {
__ Push(SmiFromSlot(slot));
}
@@ -1227,13 +1223,8 @@ void FullCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
void FullCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
- Comment cmnt(masm_, "[ DebuggerStatement");
- SetStatementPosition(stmt);
-
- __ DebugBreak();
- // Ignore the return value.
-
- PrepareForBailoutForId(stmt->DebugBreakId(), BailoutState::NO_REGISTERS);
+ // Debugger statement is not supported.
+ UNREACHABLE();
}
@@ -1306,7 +1297,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
LoadFromFrameField(JavaScriptFrameConstants::kFunctionOffset,
descriptor.GetRegisterParameter(0));
__ Move(descriptor.GetRegisterParameter(1),
- Smi::FromInt(expr->literal_index()));
+ SmiFromSlot(expr->literal_slot()));
__ Move(descriptor.GetRegisterParameter(2), expr->pattern());
__ Move(descriptor.GetRegisterParameter(3), Smi::FromInt(expr->flags()));
__ Call(callable.code(), RelocInfo::CODE_TARGET);
@@ -1604,6 +1595,10 @@ bool FullCodeGenerator::has_simple_parameters() {
FunctionLiteral* FullCodeGenerator::literal() const { return info_->literal(); }
+const FeedbackVectorSpec* FullCodeGenerator::feedback_vector_spec() const {
+ return literal()->feedback_vector_spec();
+}
+
#undef __
diff --git a/deps/v8/src/full-codegen/full-codegen.h b/deps/v8/src/full-codegen/full-codegen.h
index 642b095c8b..58a9b9a813 100644
--- a/deps/v8/src/full-codegen/full-codegen.h
+++ b/deps/v8/src/full-codegen/full-codegen.h
@@ -16,6 +16,7 @@
#include "src/deoptimizer.h"
#include "src/globals.h"
#include "src/objects.h"
+#include "src/source-position-table.h"
namespace v8 {
namespace internal {
@@ -341,11 +342,16 @@ class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
void PrepareForBailout(Expression* node, Deoptimizer::BailoutState state);
void PrepareForBailoutForId(BailoutId id, Deoptimizer::BailoutState state);
+ // Returns an int32 for the index into the FixedArray that backs the feedback
+ // vector
+ int32_t IntFromSlot(FeedbackSlot slot) const {
+ return FeedbackVector::GetIndex(slot);
+ }
+
// Returns a smi for the index into the FixedArray that backs the feedback
// vector
- Smi* SmiFromSlot(FeedbackVectorSlot slot) const {
- return Smi::FromInt(FeedbackVector::GetIndexFromSpec(
- literal()->feedback_vector_spec(), slot));
+ Smi* SmiFromSlot(FeedbackSlot slot) const {
+ return Smi::FromInt(IntFromSlot(slot));
}
// Record a call's return site offset, used to rebuild the frame if the
@@ -407,9 +413,7 @@ class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
F(ClassOf) \
F(StringCharCodeAt) \
F(SubString) \
- F(RegExpExec) \
F(ToInteger) \
- F(NumberToString) \
F(ToString) \
F(ToLength) \
F(ToNumber) \
@@ -438,7 +442,7 @@ class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
// Platform-specific support for allocating a new closure based on
// the given function info.
- void EmitNewClosure(Handle<SharedFunctionInfo> info, FeedbackVectorSlot slot,
+ void EmitNewClosure(Handle<SharedFunctionInfo> info, FeedbackSlot slot,
bool pretenure);
// Re-usable portions of CallRuntime
@@ -467,12 +471,11 @@ class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
// Assign to the given expression as if via '='. The right-hand-side value
// is expected in the accumulator. slot is only used if FLAG_vector_stores
// is true.
- void EmitAssignment(Expression* expr, FeedbackVectorSlot slot);
+ void EmitAssignment(Expression* expr, FeedbackSlot slot);
// Complete a variable assignment. The right-hand-side value is expected
// in the accumulator.
- void EmitVariableAssignment(Variable* var, Token::Value op,
- FeedbackVectorSlot slot,
+ void EmitVariableAssignment(Variable* var, Token::Value op, FeedbackSlot slot,
HoleCheckMode hole_check_mode);
// Helper functions to EmitVariableAssignment
@@ -496,22 +499,23 @@ class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
// The value of the initializer is expected to be at the top of the stack.
// |offset| is the offset in the stack where the home object can be found.
void EmitSetHomeObject(Expression* initializer, int offset,
- FeedbackVectorSlot slot);
+ FeedbackSlot slot);
void EmitSetHomeObjectAccumulator(Expression* initializer, int offset,
- FeedbackVectorSlot slot);
+ FeedbackSlot slot);
// Platform-specific code for loading a slot to a register.
- void EmitLoadSlot(Register destination, FeedbackVectorSlot slot);
+ void EmitLoadSlot(Register destination, FeedbackSlot slot);
// Platform-specific code for pushing a slot to the stack.
- void EmitPushSlot(FeedbackVectorSlot slot);
+ void EmitPushSlot(FeedbackSlot slot);
void CallIC(Handle<Code> code,
TypeFeedbackId id = TypeFeedbackId::None());
- void CallLoadIC(FeedbackVectorSlot slot, Handle<Object> name);
- void CallStoreIC(FeedbackVectorSlot slot, Handle<Object> name);
- void CallKeyedStoreIC(FeedbackVectorSlot slot);
+ void CallLoadIC(FeedbackSlot slot, Handle<Object> name);
+ void CallStoreIC(FeedbackSlot slot, Handle<Object> name,
+ bool store_own_property = false);
+ void CallKeyedStoreIC(FeedbackSlot slot);
void SetFunctionPosition(FunctionLiteral* fun);
void SetReturnPosition(FunctionLiteral* fun);
@@ -565,6 +569,7 @@ class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
LanguageMode language_mode();
bool has_simple_parameters();
FunctionLiteral* literal() const;
+ const FeedbackVectorSpec* feedback_vector_spec() const;
Scope* scope() { return scope_; }
static Register context_register();
diff --git a/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc b/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc
index 739fa0d5d4..87db6f18a4 100644
--- a/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc
@@ -121,12 +121,12 @@ void FullCodeGenerator::Generate() {
// Increment invocation count for the function.
{
Comment cmnt(masm_, "[ Increment invocation count");
- __ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset));
- __ mov(ecx, FieldOperand(ecx, LiteralsArray::kFeedbackVectorOffset));
- __ add(FieldOperand(
- ecx, FeedbackVector::kInvocationCountIndex * kPointerSize +
- FeedbackVector::kHeaderSize),
- Immediate(Smi::FromInt(1)));
+ __ mov(ecx, FieldOperand(edi, JSFunction::kFeedbackVectorOffset));
+ __ mov(ecx, FieldOperand(ecx, Cell::kValueOffset));
+ __ add(
+ FieldOperand(ecx, FeedbackVector::kInvocationCountIndex * kPointerSize +
+ FeedbackVector::kHeaderSize),
+ Immediate(Smi::FromInt(1)));
}
{ Comment cmnt(masm_, "[ Allocate locals");
@@ -263,14 +263,16 @@ void FullCodeGenerator::Generate() {
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
}
if (is_strict(language_mode()) || !has_simple_parameters()) {
- FastNewStrictArgumentsStub stub(isolate());
- __ CallStub(&stub);
+ __ call(isolate()->builtins()->FastNewStrictArguments(),
+ RelocInfo::CODE_TARGET);
+ RestoreContext();
} else if (literal()->has_duplicate_parameters()) {
__ Push(edi);
__ CallRuntime(Runtime::kNewSloppyArguments_Generic);
} else {
- FastNewSloppyArgumentsStub stub(isolate());
- __ CallStub(&stub);
+ __ call(isolate()->builtins()->FastNewSloppyArguments(),
+ RelocInfo::CODE_TARGET);
+ RestoreContext();
}
SetVar(arguments, eax, ebx, edx);
@@ -705,10 +707,11 @@ void FullCodeGenerator::VisitVariableDeclaration(
case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
globals_->Add(variable->name(), zone());
- FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+ FeedbackSlot slot = proxy->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
globals_->Add(isolate()->factory()->undefined_value(), zone());
+ globals_->Add(isolate()->factory()->undefined_value(), zone());
break;
}
case VariableLocation::PARAMETER:
@@ -745,9 +748,15 @@ void FullCodeGenerator::VisitFunctionDeclaration(
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
globals_->Add(variable->name(), zone());
- FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+ FeedbackSlot slot = proxy->VariableFeedbackSlot();
+ DCHECK(!slot.IsInvalid());
+ globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+
+ // We need the slot where the literals array lives, too.
+ slot = declaration->fun()->LiteralFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+
Handle<SharedFunctionInfo> function =
Compiler::GetSharedFunctionInfo(declaration->fun(), script(), info_);
// Check for stack-overflow exception.
@@ -896,7 +905,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
SetStatementPosition(stmt, SKIP_BREAK);
- FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
+ FeedbackSlot slot = stmt->ForInFeedbackSlot();
// Get the object to enumerate over.
SetExpressionAsStatementPosition(stmt->enumerable());
@@ -1048,19 +1057,17 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
decrement_loop_depth();
}
-
void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
- FeedbackVectorSlot slot) {
+ FeedbackSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
__ mov(StoreDescriptor::ValueRegister(), Operand(esp, offset * kPointerSize));
CallStoreIC(slot, isolate()->factory()->home_object_symbol());
}
-
void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
int offset,
- FeedbackVectorSlot slot) {
+ FeedbackSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ mov(StoreDescriptor::ReceiverRegister(), eax);
__ mov(StoreDescriptor::ValueRegister(), Operand(esp, offset * kPointerSize));
@@ -1132,20 +1139,20 @@ void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
- Handle<FixedArray> constant_properties =
+ Handle<BoilerplateDescription> constant_properties =
expr->GetOrBuildConstantProperties(isolate());
int flags = expr->ComputeFlags();
// If any of the keys would store to the elements array, then we shouldn't
// allow it.
if (MustCreateObjectLiteralWithRuntime(expr)) {
__ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(Immediate(Smi::FromInt(expr->literal_index())));
+ __ push(Immediate(SmiFromSlot(expr->literal_slot())));
__ push(Immediate(constant_properties));
__ push(Immediate(Smi::FromInt(flags)));
__ CallRuntime(Runtime::kCreateObjectLiteral);
} else {
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
+ __ mov(ebx, Immediate(SmiFromSlot(expr->literal_slot())));
__ mov(ecx, Immediate(constant_properties));
__ mov(edx, Immediate(Smi::FromInt(flags)));
Callable callable = CodeFactory::FastCloneShallowObject(
@@ -1187,7 +1194,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForAccumulatorValue(value);
DCHECK(StoreDescriptor::ValueRegister().is(eax));
__ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
- CallStoreIC(property->GetSlot(0), key->value());
+ CallStoreIC(property->GetSlot(0), key->value(), true);
PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
if (NeedsHomeObject(value)) {
EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
@@ -1264,28 +1271,19 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Handle<ConstantElementsPair> constant_elements =
expr->GetOrBuildConstantElements(isolate());
- bool has_constant_fast_elements =
- IsFastObjectElementsKind(expr->constant_elements_kind());
-
- AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
- if (has_constant_fast_elements && !FLAG_allocation_site_pretenuring) {
- // If the only customer of allocation sites is transitioning, then
- // we can turn it off if we don't have anywhere else to transition to.
- allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
- }
if (MustCreateArrayLiteralWithRuntime(expr)) {
__ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(Immediate(Smi::FromInt(expr->literal_index())));
+ __ push(Immediate(SmiFromSlot(expr->literal_slot())));
__ push(Immediate(constant_elements));
__ push(Immediate(Smi::FromInt(expr->ComputeFlags())));
__ CallRuntime(Runtime::kCreateArrayLiteral);
} else {
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
+ __ mov(ebx, Immediate(SmiFromSlot(expr->literal_slot())));
__ mov(ecx, Immediate(constant_elements));
Callable callable =
- CodeFactory::FastCloneShallowArray(isolate(), allocation_site_mode);
+ CodeFactory::FastCloneShallowArray(isolate(), TRACK_ALLOCATION_SITE);
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
@@ -1589,9 +1587,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
context()->Plug(eax);
}
-
-void FullCodeGenerator::EmitAssignment(Expression* expr,
- FeedbackVectorSlot slot) {
+void FullCodeGenerator::EmitAssignment(Expression* expr, FeedbackSlot slot) {
DCHECK(expr->IsValidReferenceExpressionOrThis());
Property* prop = expr->AsProperty();
@@ -1643,7 +1639,7 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
}
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
- FeedbackVectorSlot slot,
+ FeedbackSlot slot,
HoleCheckMode hole_check_mode) {
if (var->IsUnallocated()) {
// Global var, const, or let.
@@ -1808,8 +1804,9 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
EmitProfilingCounterHandlingForReturnSequence(true);
}
Handle<Code> code =
- CodeFactory::CallIC(isolate(), mode, expr->tail_call_mode()).code();
- __ Move(edx, Immediate(SmiFromSlot(expr->CallFeedbackICSlot())));
+ CodeFactory::CallICTrampoline(isolate(), mode, expr->tail_call_mode())
+ .code();
+ __ Move(edx, Immediate(IntFromSlot(expr->CallFeedbackICSlot())));
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
__ Move(eax, Immediate(arg_count));
CallIC(code);
@@ -2536,16 +2533,6 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ test_b(FieldOperand(edx, Map::kBitFieldOffset),
Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
Split(zero, if_true, if_false, fall_through);
-// clang-format off
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
- } else if (String::Equals(check, factory->type##_string())) { \
- __ JumpIfSmi(eax, if_false); \
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset), \
- isolate()->factory()->type##_map()); \
- Split(equal, if_true, if_false, fall_through);
- SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
- // clang-format on
} else {
if (if_false != fall_through) __ jmp(if_false);
}
@@ -2586,6 +2573,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
SetExpressionPosition(expr);
PopOperand(edx);
__ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
+ RestoreContext();
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ cmp(eax, isolate()->factory()->true_value());
Split(equal, if_true, if_false, fall_through);
diff --git a/deps/v8/src/full-codegen/mips/full-codegen-mips.cc b/deps/v8/src/full-codegen/mips/full-codegen-mips.cc
index 451d2fa8d0..cfc9952d08 100644
--- a/deps/v8/src/full-codegen/mips/full-codegen-mips.cc
+++ b/deps/v8/src/full-codegen/mips/full-codegen-mips.cc
@@ -141,8 +141,8 @@ void FullCodeGenerator::Generate() {
// Increment invocation count for the function.
{
Comment cmnt(masm_, "[ Increment invocation count");
- __ lw(a0, FieldMemOperand(a1, JSFunction::kLiteralsOffset));
- __ lw(a0, FieldMemOperand(a0, LiteralsArray::kFeedbackVectorOffset));
+ __ lw(a0, FieldMemOperand(a1, JSFunction::kFeedbackVectorOffset));
+ __ lw(a0, FieldMemOperand(a0, Cell::kValueOffset));
__ lw(t0, FieldMemOperand(
a0, FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
@@ -281,14 +281,16 @@ void FullCodeGenerator::Generate() {
__ lw(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
if (is_strict(language_mode()) || !has_simple_parameters()) {
- FastNewStrictArgumentsStub stub(isolate());
- __ CallStub(&stub);
+ Callable callable = CodeFactory::FastNewStrictArguments(isolate());
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+ RestoreContext();
} else if (literal()->has_duplicate_parameters()) {
__ Push(a1);
__ CallRuntime(Runtime::kNewSloppyArguments_Generic);
} else {
- FastNewSloppyArgumentsStub stub(isolate());
- __ CallStub(&stub);
+ Callable callable = CodeFactory::FastNewSloppyArguments(isolate());
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+ RestoreContext();
}
SetVar(arguments, v0, a1, a2);
@@ -756,10 +758,11 @@ void FullCodeGenerator::VisitVariableDeclaration(
case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
globals_->Add(variable->name(), zone());
- FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+ FeedbackSlot slot = proxy->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
globals_->Add(isolate()->factory()->undefined_value(), zone());
+ globals_->Add(isolate()->factory()->undefined_value(), zone());
break;
}
case VariableLocation::PARAMETER:
@@ -796,9 +799,15 @@ void FullCodeGenerator::VisitFunctionDeclaration(
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
globals_->Add(variable->name(), zone());
- FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+ FeedbackSlot slot = proxy->VariableFeedbackSlot();
+ DCHECK(!slot.IsInvalid());
+ globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+
+ // We need the slot where the literals array lives, too.
+ slot = declaration->fun()->LiteralFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+
Handle<SharedFunctionInfo> function =
Compiler::GetSharedFunctionInfo(declaration->fun(), script(), info_);
// Check for stack-overflow exception.
@@ -949,7 +958,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
SetStatementPosition(stmt, SKIP_BREAK);
- FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
+ FeedbackSlot slot = stmt->ForInFeedbackSlot();
// Get the object to enumerate over.
SetExpressionAsStatementPosition(stmt->enumerable());
@@ -1117,9 +1126,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
decrement_loop_depth();
}
-
void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
- FeedbackVectorSlot slot) {
+ FeedbackSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ lw(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
__ lw(StoreDescriptor::ValueRegister(),
@@ -1127,10 +1135,9 @@ void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
CallStoreIC(slot, isolate()->factory()->home_object_symbol());
}
-
void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
int offset,
- FeedbackVectorSlot slot) {
+ FeedbackSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ Move(StoreDescriptor::ReceiverRegister(), v0);
__ lw(StoreDescriptor::ValueRegister(),
@@ -1206,10 +1213,10 @@ void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
- Handle<FixedArray> constant_properties =
+ Handle<BoilerplateDescription> constant_properties =
expr->GetOrBuildConstantProperties(isolate());
__ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ li(a2, Operand(Smi::FromInt(expr->literal_index())));
+ __ li(a2, Operand(SmiFromSlot(expr->literal_slot())));
__ li(a1, Operand(constant_properties));
__ li(a0, Operand(Smi::FromInt(expr->ComputeFlags())));
if (MustCreateObjectLiteralWithRuntime(expr)) {
@@ -1256,7 +1263,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ mov(StoreDescriptor::ValueRegister(), result_register());
DCHECK(StoreDescriptor::ValueRegister().is(a0));
__ lw(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
- CallStoreIC(property->GetSlot(0), key->value());
+ CallStoreIC(property->GetSlot(0), key->value(), true);
PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
if (NeedsHomeObject(value)) {
@@ -1339,19 +1346,10 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Handle<ConstantElementsPair> constant_elements =
expr->GetOrBuildConstantElements(isolate());
- bool has_fast_elements =
- IsFastObjectElementsKind(expr->constant_elements_kind());
-
- AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
- if (has_fast_elements && !FLAG_allocation_site_pretenuring) {
- // If the only customer of allocation sites is transitioning, then
- // we can turn it off if we don't have anywhere else to transition to.
- allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
- }
__ mov(a0, result_register());
__ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ li(a2, Operand(Smi::FromInt(expr->literal_index())));
+ __ li(a2, Operand(SmiFromSlot(expr->literal_slot())));
__ li(a1, Operand(constant_elements));
if (MustCreateArrayLiteralWithRuntime(expr)) {
__ li(a0, Operand(Smi::FromInt(expr->ComputeFlags())));
@@ -1359,7 +1357,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ CallRuntime(Runtime::kCreateArrayLiteral);
} else {
Callable callable =
- CodeFactory::FastCloneShallowArray(isolate(), allocation_site_mode);
+ CodeFactory::FastCloneShallowArray(isolate(), TRACK_ALLOCATION_SITE);
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
@@ -1677,9 +1675,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
context()->Plug(v0);
}
-
-void FullCodeGenerator::EmitAssignment(Expression* expr,
- FeedbackVectorSlot slot) {
+void FullCodeGenerator::EmitAssignment(Expression* expr, FeedbackSlot slot) {
DCHECK(expr->IsValidReferenceExpressionOrThis());
Property* prop = expr->AsProperty();
@@ -1733,7 +1729,7 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
}
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
- FeedbackVectorSlot slot,
+ FeedbackSlot slot,
HoleCheckMode hole_check_mode) {
if (var->IsUnallocated()) {
// Global var, const, or let.
@@ -1906,8 +1902,9 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
EmitProfilingCounterHandlingForReturnSequence(true);
}
Handle<Code> code =
- CodeFactory::CallIC(isolate(), mode, expr->tail_call_mode()).code();
- __ li(a3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
+ CodeFactory::CallICTrampoline(isolate(), mode, expr->tail_call_mode())
+ .code();
+ __ li(a3, Operand(IntFromSlot(expr->CallFeedbackICSlot())));
__ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ li(a0, Operand(arg_count));
CallIC(code);
@@ -2628,16 +2625,6 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ And(a1, a1,
Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
Split(eq, a1, Operand(zero_reg), if_true, if_false, fall_through);
-// clang-format off
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
- } else if (String::Equals(check, factory->type##_string())) { \
- __ JumpIfSmi(v0, if_false); \
- __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset)); \
- __ LoadRoot(at, Heap::k##Type##MapRootIndex); \
- Split(eq, v0, Operand(at), if_true, if_false, fall_through);
- SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
- // clang-format on
} else {
if (if_false != fall_through) __ jmp(if_false);
}
@@ -2679,6 +2666,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
__ mov(a0, result_register());
PopOperand(a1);
__ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
+ RestoreContext();
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ LoadRoot(at, Heap::kTrueValueRootIndex);
Split(eq, v0, Operand(at), if_true, if_false, fall_through);
diff --git a/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc b/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc
index 2d14f5b334..37e2d8037c 100644
--- a/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc
+++ b/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc
@@ -140,8 +140,8 @@ void FullCodeGenerator::Generate() {
// Increment invocation count for the function.
{
Comment cmnt(masm_, "[ Increment invocation count");
- __ ld(a0, FieldMemOperand(a1, JSFunction::kLiteralsOffset));
- __ ld(a0, FieldMemOperand(a0, LiteralsArray::kFeedbackVectorOffset));
+ __ ld(a0, FieldMemOperand(a1, JSFunction::kFeedbackVectorOffset));
+ __ ld(a0, FieldMemOperand(a0, Cell::kValueOffset));
__ ld(a4, FieldMemOperand(
a0, FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
@@ -280,14 +280,16 @@ void FullCodeGenerator::Generate() {
__ ld(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
if (is_strict(language_mode()) || !has_simple_parameters()) {
- FastNewStrictArgumentsStub stub(isolate());
- __ CallStub(&stub);
+ Callable callable = CodeFactory::FastNewStrictArguments(isolate());
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+ RestoreContext();
} else if (literal()->has_duplicate_parameters()) {
__ Push(a1);
__ CallRuntime(Runtime::kNewSloppyArguments_Generic);
} else {
- FastNewSloppyArgumentsStub stub(isolate());
- __ CallStub(&stub);
+ Callable callable = CodeFactory::FastNewSloppyArguments(isolate());
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+ RestoreContext();
}
SetVar(arguments, v0, a1, a2);
@@ -756,10 +758,11 @@ void FullCodeGenerator::VisitVariableDeclaration(
case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
globals_->Add(variable->name(), zone());
- FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+ FeedbackSlot slot = proxy->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
globals_->Add(isolate()->factory()->undefined_value(), zone());
+ globals_->Add(isolate()->factory()->undefined_value(), zone());
break;
}
case VariableLocation::PARAMETER:
@@ -796,9 +799,15 @@ void FullCodeGenerator::VisitFunctionDeclaration(
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
globals_->Add(variable->name(), zone());
- FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+ FeedbackSlot slot = proxy->VariableFeedbackSlot();
+ DCHECK(!slot.IsInvalid());
+ globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+
+ // We need the slot where the literals array lives, too.
+ slot = declaration->fun()->LiteralFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+
Handle<SharedFunctionInfo> function =
Compiler::GetSharedFunctionInfo(declaration->fun(), script(), info_);
// Check for stack-overflow exception.
@@ -949,7 +958,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
SetStatementPosition(stmt, SKIP_BREAK);
- FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
+ FeedbackSlot slot = stmt->ForInFeedbackSlot();
// Get the object to enumerate over. If the object is null or undefined, skip
// over the loop. See ECMA-262 version 5, section 12.6.4.
@@ -1119,9 +1128,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
decrement_loop_depth();
}
-
void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
- FeedbackVectorSlot slot) {
+ FeedbackSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ ld(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
__ ld(StoreDescriptor::ValueRegister(),
@@ -1129,10 +1137,9 @@ void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
CallStoreIC(slot, isolate()->factory()->home_object_symbol());
}
-
void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
int offset,
- FeedbackVectorSlot slot) {
+ FeedbackSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ Move(StoreDescriptor::ReceiverRegister(), v0);
__ ld(StoreDescriptor::ValueRegister(),
@@ -1208,10 +1215,10 @@ void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
- Handle<FixedArray> constant_properties =
+ Handle<BoilerplateDescription> constant_properties =
expr->GetOrBuildConstantProperties(isolate());
__ ld(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ li(a2, Operand(Smi::FromInt(expr->literal_index())));
+ __ li(a2, Operand(SmiFromSlot(expr->literal_slot())));
__ li(a1, Operand(constant_properties));
__ li(a0, Operand(Smi::FromInt(expr->ComputeFlags())));
if (MustCreateObjectLiteralWithRuntime(expr)) {
@@ -1258,7 +1265,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ mov(StoreDescriptor::ValueRegister(), result_register());
DCHECK(StoreDescriptor::ValueRegister().is(a0));
__ ld(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
- CallStoreIC(property->GetSlot(0), key->value());
+ CallStoreIC(property->GetSlot(0), key->value(), true);
PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
if (NeedsHomeObject(value)) {
@@ -1341,19 +1348,10 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Handle<ConstantElementsPair> constant_elements =
expr->GetOrBuildConstantElements(isolate());
- bool has_fast_elements =
- IsFastObjectElementsKind(expr->constant_elements_kind());
-
- AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
- if (has_fast_elements && !FLAG_allocation_site_pretenuring) {
- // If the only customer of allocation sites is transitioning, then
- // we can turn it off if we don't have anywhere else to transition to.
- allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
- }
__ mov(a0, result_register());
__ ld(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ li(a2, Operand(Smi::FromInt(expr->literal_index())));
+ __ li(a2, Operand(SmiFromSlot(expr->literal_slot())));
__ li(a1, Operand(constant_elements));
if (MustCreateArrayLiteralWithRuntime(expr)) {
__ li(a0, Operand(Smi::FromInt(expr->ComputeFlags())));
@@ -1361,7 +1359,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ CallRuntime(Runtime::kCreateArrayLiteral);
} else {
Callable callable =
- CodeFactory::FastCloneShallowArray(isolate(), allocation_site_mode);
+ CodeFactory::FastCloneShallowArray(isolate(), TRACK_ALLOCATION_SITE);
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
@@ -1678,9 +1676,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
context()->Plug(v0);
}
-
-void FullCodeGenerator::EmitAssignment(Expression* expr,
- FeedbackVectorSlot slot) {
+void FullCodeGenerator::EmitAssignment(Expression* expr, FeedbackSlot slot) {
DCHECK(expr->IsValidReferenceExpressionOrThis());
Property* prop = expr->AsProperty();
@@ -1734,7 +1730,7 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
}
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
- FeedbackVectorSlot slot,
+ FeedbackSlot slot,
HoleCheckMode hole_check_mode) {
if (var->IsUnallocated()) {
// Global var, const, or let.
@@ -1907,8 +1903,9 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
EmitProfilingCounterHandlingForReturnSequence(true);
}
Handle<Code> code =
- CodeFactory::CallIC(isolate(), mode, expr->tail_call_mode()).code();
- __ li(a3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
+ CodeFactory::CallICTrampoline(isolate(), mode, expr->tail_call_mode())
+ .code();
+ __ li(a3, Operand(IntFromSlot(expr->CallFeedbackICSlot())));
__ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ li(a0, Operand(arg_count));
CallIC(code);
@@ -2629,16 +2626,6 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ And(a1, a1,
Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
Split(eq, a1, Operand(zero_reg), if_true, if_false, fall_through);
-// clang-format off
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
- } else if (String::Equals(check, factory->type##_string())) { \
- __ JumpIfSmi(v0, if_false); \
- __ ld(v0, FieldMemOperand(v0, HeapObject::kMapOffset)); \
- __ LoadRoot(at, Heap::k##Type##MapRootIndex); \
- Split(eq, v0, Operand(at), if_true, if_false, fall_through);
- SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
- // clang-format on
} else {
if (if_false != fall_through) __ jmp(if_false);
}
@@ -2680,6 +2667,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
__ mov(a0, result_register());
PopOperand(a1);
__ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
+ RestoreContext();
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ LoadRoot(a4, Heap::kTrueValueRootIndex);
Split(eq, v0, Operand(a4), if_true, if_false, fall_through);
diff --git a/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc b/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc
index 46d86d83f9..bd69582cda 100644
--- a/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc
+++ b/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc
@@ -137,11 +137,11 @@ void FullCodeGenerator::Generate() {
// Increment invocation count for the function.
{
Comment cmnt(masm_, "[ Increment invocation count");
- __ LoadP(r7, FieldMemOperand(r4, JSFunction::kLiteralsOffset));
- __ LoadP(r7, FieldMemOperand(r7, LiteralsArray::kFeedbackVectorOffset));
- __ LoadP(r8, FieldMemOperand(r7, FeedbackVector::kInvocationCountIndex *
- kPointerSize +
- FeedbackVector::kHeaderSize));
+ __ LoadP(r7, FieldMemOperand(r4, JSFunction::kFeedbackVectorOffset));
+ __ LoadP(r7, FieldMemOperand(r7, Cell::kValueOffset));
+ __ LoadP(r8, FieldMemOperand(
+ r7, FeedbackVector::kInvocationCountIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
__ AddSmiLiteral(r8, r8, Smi::FromInt(1), r0);
__ StoreP(r8, FieldMemOperand(
r7, FeedbackVector::kInvocationCountIndex * kPointerSize +
@@ -278,14 +278,16 @@ void FullCodeGenerator::Generate() {
__ LoadP(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
if (is_strict(language_mode()) || !has_simple_parameters()) {
- FastNewStrictArgumentsStub stub(isolate());
- __ CallStub(&stub);
+ Callable callable = CodeFactory::FastNewStrictArguments(isolate());
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+ RestoreContext();
} else if (literal()->has_duplicate_parameters()) {
__ Push(r4);
__ CallRuntime(Runtime::kNewSloppyArguments_Generic);
} else {
- FastNewSloppyArgumentsStub stub(isolate());
- __ CallStub(&stub);
+ Callable callable = CodeFactory::FastNewSloppyArguments(isolate());
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+ RestoreContext();
}
SetVar(arguments, r3, r4, r5);
@@ -724,10 +726,11 @@ void FullCodeGenerator::VisitVariableDeclaration(
case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
globals_->Add(variable->name(), zone());
- FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+ FeedbackSlot slot = proxy->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
globals_->Add(isolate()->factory()->undefined_value(), zone());
+ globals_->Add(isolate()->factory()->undefined_value(), zone());
break;
}
case VariableLocation::PARAMETER:
@@ -764,9 +767,15 @@ void FullCodeGenerator::VisitFunctionDeclaration(
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
globals_->Add(variable->name(), zone());
- FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+ FeedbackSlot slot = proxy->VariableFeedbackSlot();
+ DCHECK(!slot.IsInvalid());
+ globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+
+ // We need the slot where the literals array lives, too.
+ slot = declaration->fun()->LiteralFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+
Handle<SharedFunctionInfo> function =
Compiler::GetSharedFunctionInfo(declaration->fun(), script(), info_);
// Check for stack-overflow exception.
@@ -914,7 +923,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
SetStatementPosition(stmt, SKIP_BREAK);
- FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
+ FeedbackSlot slot = stmt->ForInFeedbackSlot();
// Get the object to enumerate over.
SetExpressionAsStatementPosition(stmt->enumerable());
@@ -1088,9 +1097,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
decrement_loop_depth();
}
-
void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
- FeedbackVectorSlot slot) {
+ FeedbackSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
__ LoadP(StoreDescriptor::ValueRegister(),
@@ -1098,10 +1106,9 @@ void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
CallStoreIC(slot, isolate()->factory()->home_object_symbol());
}
-
void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
int offset,
- FeedbackVectorSlot slot) {
+ FeedbackSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ Move(StoreDescriptor::ReceiverRegister(), r3);
__ LoadP(StoreDescriptor::ValueRegister(),
@@ -1176,10 +1183,10 @@ void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
- Handle<FixedArray> constant_properties =
+ Handle<BoilerplateDescription> constant_properties =
expr->GetOrBuildConstantProperties(isolate());
__ LoadP(r6, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ LoadSmiLiteral(r5, Smi::FromInt(expr->literal_index()));
+ __ LoadSmiLiteral(r5, SmiFromSlot(expr->literal_slot()));
__ mov(r4, Operand(constant_properties));
int flags = expr->ComputeFlags();
__ LoadSmiLiteral(r3, Smi::FromInt(flags));
@@ -1226,7 +1233,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForAccumulatorValue(value);
DCHECK(StoreDescriptor::ValueRegister().is(r3));
__ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
- CallStoreIC(property->GetSlot(0), key->value());
+ CallStoreIC(property->GetSlot(0), key->value(), true);
PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
if (NeedsHomeObject(value)) {
@@ -1308,18 +1315,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Handle<ConstantElementsPair> constant_elements =
expr->GetOrBuildConstantElements(isolate());
- bool has_fast_elements =
- IsFastObjectElementsKind(expr->constant_elements_kind());
-
- AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
- if (has_fast_elements && !FLAG_allocation_site_pretenuring) {
- // If the only customer of allocation sites is transitioning, then
- // we can turn it off if we don't have anywhere else to transition to.
- allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
- }
__ LoadP(r6, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ LoadSmiLiteral(r5, Smi::FromInt(expr->literal_index()));
+ __ LoadSmiLiteral(r5, SmiFromSlot(expr->literal_slot()));
__ mov(r4, Operand(constant_elements));
if (MustCreateArrayLiteralWithRuntime(expr)) {
__ LoadSmiLiteral(r3, Smi::FromInt(expr->ComputeFlags()));
@@ -1327,7 +1325,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ CallRuntime(Runtime::kCreateArrayLiteral);
} else {
Callable callable =
- CodeFactory::FastCloneShallowArray(isolate(), allocation_site_mode);
+ CodeFactory::FastCloneShallowArray(isolate(), TRACK_ALLOCATION_SITE);
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
@@ -1676,9 +1674,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
context()->Plug(r3);
}
-
-void FullCodeGenerator::EmitAssignment(Expression* expr,
- FeedbackVectorSlot slot) {
+void FullCodeGenerator::EmitAssignment(Expression* expr, FeedbackSlot slot) {
DCHECK(expr->IsValidReferenceExpressionOrThis());
Property* prop = expr->AsProperty();
@@ -1732,7 +1728,7 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
}
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
- FeedbackVectorSlot slot,
+ FeedbackSlot slot,
HoleCheckMode hole_check_mode) {
if (var->IsUnallocated()) {
// Global var, const, or let.
@@ -1896,8 +1892,9 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
EmitProfilingCounterHandlingForReturnSequence(true);
}
Handle<Code> code =
- CodeFactory::CallIC(isolate(), mode, expr->tail_call_mode()).code();
- __ LoadSmiLiteral(r6, SmiFromSlot(expr->CallFeedbackICSlot()));
+ CodeFactory::CallICTrampoline(isolate(), mode, expr->tail_call_mode())
+ .code();
+ __ mov(r6, Operand(IntFromSlot(expr->CallFeedbackICSlot())));
__ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
__ mov(r3, Operand(arg_count));
CallIC(code);
@@ -2615,16 +2612,6 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ andi(r0, r4,
Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
Split(eq, if_true, if_false, fall_through, cr0);
-// clang-format off
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
- } else if (String::Equals(check, factory->type##_string())) { \
- __ JumpIfSmi(r3, if_false); \
- __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset)); \
- __ CompareRoot(r3, Heap::k##Type##MapRootIndex); \
- Split(eq, if_true, if_false, fall_through);
- SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
- // clang-format on
} else {
if (if_false != fall_through) __ b(if_false);
}
@@ -2665,6 +2652,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
SetExpressionPosition(expr);
PopOperand(r4);
__ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
+ RestoreContext();
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(r3, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
diff --git a/deps/v8/src/full-codegen/s390/full-codegen-s390.cc b/deps/v8/src/full-codegen/s390/full-codegen-s390.cc
index f91d9e119b..340082affc 100644
--- a/deps/v8/src/full-codegen/s390/full-codegen-s390.cc
+++ b/deps/v8/src/full-codegen/s390/full-codegen-s390.cc
@@ -137,11 +137,11 @@ void FullCodeGenerator::Generate() {
// Increment invocation count for the function.
{
Comment cmnt(masm_, "[ Increment invocation count");
- __ LoadP(r6, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
- __ LoadP(r6, FieldMemOperand(r6, LiteralsArray::kFeedbackVectorOffset));
- __ LoadP(r1, FieldMemOperand(r6, FeedbackVector::kInvocationCountIndex *
- kPointerSize +
- FeedbackVector::kHeaderSize));
+ __ LoadP(r6, FieldMemOperand(r3, JSFunction::kFeedbackVectorOffset));
+ __ LoadP(r6, FieldMemOperand(r6, Cell::kValueOffset));
+ __ LoadP(r1, FieldMemOperand(
+ r6, FeedbackVector::kInvocationCountIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
__ AddSmiLiteral(r1, r1, Smi::FromInt(1), r0);
__ StoreP(r1, FieldMemOperand(
r6, FeedbackVector::kInvocationCountIndex * kPointerSize +
@@ -282,14 +282,16 @@ void FullCodeGenerator::Generate() {
__ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
if (is_strict(language_mode()) || !has_simple_parameters()) {
- FastNewStrictArgumentsStub stub(isolate());
- __ CallStub(&stub);
+ Callable callable = CodeFactory::FastNewStrictArguments(isolate());
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+ RestoreContext();
} else if (literal()->has_duplicate_parameters()) {
__ Push(r3);
__ CallRuntime(Runtime::kNewSloppyArguments_Generic);
} else {
- FastNewSloppyArgumentsStub stub(isolate());
- __ CallStub(&stub);
+ Callable callable = CodeFactory::FastNewSloppyArguments(isolate());
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+ RestoreContext();
}
SetVar(arguments, r2, r3, r4);
@@ -697,10 +699,11 @@ void FullCodeGenerator::VisitVariableDeclaration(
case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
globals_->Add(variable->name(), zone());
- FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+ FeedbackSlot slot = proxy->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
globals_->Add(isolate()->factory()->undefined_value(), zone());
+ globals_->Add(isolate()->factory()->undefined_value(), zone());
break;
}
case VariableLocation::PARAMETER:
@@ -736,9 +739,15 @@ void FullCodeGenerator::VisitFunctionDeclaration(
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
globals_->Add(variable->name(), zone());
- FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+ FeedbackSlot slot = proxy->VariableFeedbackSlot();
+ DCHECK(!slot.IsInvalid());
+ globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+
+ // We need the slot where the literals array lives, too.
+ slot = declaration->fun()->LiteralFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+
Handle<SharedFunctionInfo> function =
Compiler::GetSharedFunctionInfo(declaration->fun(), script(), info_);
// Check for stack-overflow exception.
@@ -882,7 +891,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
SetStatementPosition(stmt, SKIP_BREAK);
- FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
+ FeedbackSlot slot = stmt->ForInFeedbackSlot();
// Get the object to enumerate over.
SetExpressionAsStatementPosition(stmt->enumerable());
@@ -1056,7 +1065,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
}
void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
- FeedbackVectorSlot slot) {
+ FeedbackSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
__ LoadP(StoreDescriptor::ValueRegister(),
@@ -1066,7 +1075,7 @@ void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
int offset,
- FeedbackVectorSlot slot) {
+ FeedbackSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ Move(StoreDescriptor::ReceiverRegister(), r2);
__ LoadP(StoreDescriptor::ValueRegister(),
@@ -1139,10 +1148,10 @@ void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
- Handle<FixedArray> constant_properties =
+ Handle<BoilerplateDescription> constant_properties =
expr->GetOrBuildConstantProperties(isolate());
__ LoadP(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ LoadSmiLiteral(r4, Smi::FromInt(expr->literal_index()));
+ __ LoadSmiLiteral(r4, SmiFromSlot(expr->literal_slot()));
__ mov(r3, Operand(constant_properties));
int flags = expr->ComputeFlags();
__ LoadSmiLiteral(r2, Smi::FromInt(flags));
@@ -1189,7 +1198,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForAccumulatorValue(value);
DCHECK(StoreDescriptor::ValueRegister().is(r2));
__ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
- CallStoreIC(property->GetSlot(0), key->value());
+ CallStoreIC(property->GetSlot(0), key->value(), true);
PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
if (NeedsHomeObject(value)) {
@@ -1270,18 +1279,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Handle<ConstantElementsPair> constant_elements =
expr->GetOrBuildConstantElements(isolate());
- bool has_fast_elements =
- IsFastObjectElementsKind(expr->constant_elements_kind());
-
- AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
- if (has_fast_elements && !FLAG_allocation_site_pretenuring) {
- // If the only customer of allocation sites is transitioning, then
- // we can turn it off if we don't have anywhere else to transition to.
- allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
- }
__ LoadP(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ LoadSmiLiteral(r4, Smi::FromInt(expr->literal_index()));
+ __ LoadSmiLiteral(r4, SmiFromSlot(expr->literal_slot()));
__ mov(r3, Operand(constant_elements));
if (MustCreateArrayLiteralWithRuntime(expr)) {
__ LoadSmiLiteral(r2, Smi::FromInt(expr->ComputeFlags()));
@@ -1289,7 +1289,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ CallRuntime(Runtime::kCreateArrayLiteral);
} else {
Callable callable =
- CodeFactory::FastCloneShallowArray(isolate(), allocation_site_mode);
+ CodeFactory::FastCloneShallowArray(isolate(), TRACK_ALLOCATION_SITE);
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
@@ -1645,8 +1645,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
context()->Plug(r2);
}
-void FullCodeGenerator::EmitAssignment(Expression* expr,
- FeedbackVectorSlot slot) {
+void FullCodeGenerator::EmitAssignment(Expression* expr, FeedbackSlot slot) {
DCHECK(expr->IsValidReferenceExpressionOrThis());
Property* prop = expr->AsProperty();
@@ -1699,7 +1698,7 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
}
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
- FeedbackVectorSlot slot,
+ FeedbackSlot slot,
HoleCheckMode hole_check_mode) {
if (var->IsUnallocated()) {
// Global var, const, or let.
@@ -1859,8 +1858,9 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
EmitProfilingCounterHandlingForReturnSequence(true);
}
Handle<Code> code =
- CodeFactory::CallIC(isolate(), mode, expr->tail_call_mode()).code();
- __ LoadSmiLiteral(r5, SmiFromSlot(expr->CallFeedbackICSlot()));
+ CodeFactory::CallICTrampoline(isolate(), mode, expr->tail_call_mode())
+ .code();
+ __ Load(r5, Operand(IntFromSlot(expr->CallFeedbackICSlot())));
__ LoadP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
__ mov(r2, Operand(arg_count));
CallIC(code);
@@ -2561,16 +2561,6 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ tm(FieldMemOperand(r2, Map::kBitFieldOffset),
Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
Split(eq, if_true, if_false, fall_through);
-// clang-format off
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
- } else if (String::Equals(check, factory->type##_string())) { \
- __ JumpIfSmi(r2, if_false); \
- __ LoadP(r2, FieldMemOperand(r2, HeapObject::kMapOffset)); \
- __ CompareRoot(r2, Heap::k##Type##MapRootIndex); \
- Split(eq, if_true, if_false, fall_through);
- SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
- // clang-format on
} else {
if (if_false != fall_through) __ b(if_false);
}
@@ -2610,6 +2600,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
SetExpressionPosition(expr);
PopOperand(r3);
__ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
+ RestoreContext();
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(r2, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
diff --git a/deps/v8/src/full-codegen/x64/full-codegen-x64.cc b/deps/v8/src/full-codegen/x64/full-codegen-x64.cc
index da042fd243..d4d78edcf6 100644
--- a/deps/v8/src/full-codegen/x64/full-codegen-x64.cc
+++ b/deps/v8/src/full-codegen/x64/full-codegen-x64.cc
@@ -4,6 +4,7 @@
#if V8_TARGET_ARCH_X64
+#include "src/assembler-inl.h"
#include "src/ast/compile-time-value.h"
#include "src/ast/scopes.h"
#include "src/builtins/builtins-constructor.h"
@@ -14,7 +15,9 @@
#include "src/compiler.h"
#include "src/debug/debug.h"
#include "src/full-codegen/full-codegen.h"
+#include "src/heap/heap-inl.h"
#include "src/ic/ic.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -121,8 +124,8 @@ void FullCodeGenerator::Generate() {
// Increment invocation count for the function.
{
Comment cmnt(masm_, "[ Increment invocation count");
- __ movp(rcx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
- __ movp(rcx, FieldOperand(rcx, LiteralsArray::kFeedbackVectorOffset));
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kFeedbackVectorOffset));
+ __ movp(rcx, FieldOperand(rcx, Cell::kValueOffset));
__ SmiAddConstant(
FieldOperand(rcx, FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize),
@@ -260,14 +263,16 @@ void FullCodeGenerator::Generate() {
__ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
}
if (is_strict(language_mode()) || !has_simple_parameters()) {
- FastNewStrictArgumentsStub stub(isolate());
- __ CallStub(&stub);
+ __ call(isolate()->builtins()->FastNewStrictArguments(),
+ RelocInfo::CODE_TARGET);
+ RestoreContext();
} else if (literal()->has_duplicate_parameters()) {
__ Push(rdi);
__ CallRuntime(Runtime::kNewSloppyArguments_Generic);
} else {
- FastNewSloppyArgumentsStub stub(isolate());
- __ CallStub(&stub);
+ __ call(isolate()->builtins()->FastNewSloppyArguments(),
+ RelocInfo::CODE_TARGET);
+ RestoreContext();
}
SetVar(arguments, rax, rbx, rdx);
@@ -718,10 +723,11 @@ void FullCodeGenerator::VisitVariableDeclaration(
case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
globals_->Add(variable->name(), zone());
- FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+ FeedbackSlot slot = proxy->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
globals_->Add(isolate()->factory()->undefined_value(), zone());
+ globals_->Add(isolate()->factory()->undefined_value(), zone());
break;
}
case VariableLocation::PARAMETER:
@@ -758,9 +764,15 @@ void FullCodeGenerator::VisitFunctionDeclaration(
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
globals_->Add(variable->name(), zone());
- FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+ FeedbackSlot slot = proxy->VariableFeedbackSlot();
+ DCHECK(!slot.IsInvalid());
+ globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+
+ // We need the slot where the literals array lives, too.
+ slot = declaration->fun()->LiteralFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+
Handle<SharedFunctionInfo> function =
Compiler::GetSharedFunctionInfo(declaration->fun(), script(), info_);
// Check for stack-overflow exception.
@@ -911,7 +923,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
SetStatementPosition(stmt, SKIP_BREAK);
- FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
+ FeedbackSlot slot = stmt->ForInFeedbackSlot();
// Get the object to enumerate over.
SetExpressionAsStatementPosition(stmt->enumerable());
@@ -1074,9 +1086,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
decrement_loop_depth();
}
-
void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
- FeedbackVectorSlot slot) {
+ FeedbackSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ movp(StoreDescriptor::ReceiverRegister(), Operand(rsp, 0));
__ movp(StoreDescriptor::ValueRegister(),
@@ -1084,10 +1095,9 @@ void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
CallStoreIC(slot, isolate()->factory()->home_object_symbol());
}
-
void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
int offset,
- FeedbackVectorSlot slot) {
+ FeedbackSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ movp(StoreDescriptor::ReceiverRegister(), rax);
__ movp(StoreDescriptor::ValueRegister(),
@@ -1162,18 +1172,18 @@ void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
- Handle<FixedArray> constant_properties =
+ Handle<BoilerplateDescription> constant_properties =
expr->GetOrBuildConstantProperties(isolate());
int flags = expr->ComputeFlags();
if (MustCreateObjectLiteralWithRuntime(expr)) {
__ Push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ Push(Smi::FromInt(expr->literal_index()));
+ __ Push(SmiFromSlot(expr->literal_slot()));
__ Push(constant_properties);
__ Push(Smi::FromInt(flags));
__ CallRuntime(Runtime::kCreateObjectLiteral);
} else {
__ movp(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ Move(rbx, Smi::FromInt(expr->literal_index()));
+ __ Move(rbx, SmiFromSlot(expr->literal_slot()));
__ Move(rcx, constant_properties);
__ Move(rdx, Smi::FromInt(flags));
Callable callable = CodeFactory::FastCloneShallowObject(
@@ -1215,7 +1225,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForAccumulatorValue(value);
DCHECK(StoreDescriptor::ValueRegister().is(rax));
__ movp(StoreDescriptor::ReceiverRegister(), Operand(rsp, 0));
- CallStoreIC(property->GetSlot(0), key->value());
+ CallStoreIC(property->GetSlot(0), key->value(), true);
PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
if (NeedsHomeObject(value)) {
@@ -1291,28 +1301,19 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Handle<ConstantElementsPair> constant_elements =
expr->GetOrBuildConstantElements(isolate());
- bool has_constant_fast_elements =
- IsFastObjectElementsKind(expr->constant_elements_kind());
-
- AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
- if (has_constant_fast_elements && !FLAG_allocation_site_pretenuring) {
- // If the only customer of allocation sites is transitioning, then
- // we can turn it off if we don't have anywhere else to transition to.
- allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
- }
if (MustCreateArrayLiteralWithRuntime(expr)) {
__ Push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ Push(Smi::FromInt(expr->literal_index()));
+ __ Push(SmiFromSlot(expr->literal_slot()));
__ Push(constant_elements);
__ Push(Smi::FromInt(expr->ComputeFlags()));
__ CallRuntime(Runtime::kCreateArrayLiteral);
} else {
__ movp(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ Move(rbx, Smi::FromInt(expr->literal_index()));
+ __ Move(rbx, SmiFromSlot(expr->literal_slot()));
__ Move(rcx, constant_elements);
Callable callable =
- CodeFactory::FastCloneShallowArray(isolate(), allocation_site_mode);
+ CodeFactory::FastCloneShallowArray(isolate(), TRACK_ALLOCATION_SITE);
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
@@ -1580,9 +1581,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
context()->Plug(rax);
}
-
-void FullCodeGenerator::EmitAssignment(Expression* expr,
- FeedbackVectorSlot slot) {
+void FullCodeGenerator::EmitAssignment(Expression* expr, FeedbackSlot slot) {
DCHECK(expr->IsValidReferenceExpressionOrThis());
Property* prop = expr->AsProperty();
@@ -1634,7 +1633,7 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
}
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
- FeedbackVectorSlot slot,
+ FeedbackSlot slot,
HoleCheckMode hole_check_mode) {
if (var->IsUnallocated()) {
// Global var, const, or let.
@@ -1793,8 +1792,9 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
EmitProfilingCounterHandlingForReturnSequence(true);
}
Handle<Code> code =
- CodeFactory::CallIC(isolate(), mode, expr->tail_call_mode()).code();
- __ Move(rdx, SmiFromSlot(expr->CallFeedbackICSlot()));
+ CodeFactory::CallICTrampoline(isolate(), mode, expr->tail_call_mode())
+ .code();
+ __ Set(rdx, IntFromSlot(expr->CallFeedbackICSlot()));
__ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
__ Set(rax, arg_count);
CallIC(code);
@@ -2520,16 +2520,6 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ testb(FieldOperand(rdx, Map::kBitFieldOffset),
Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
Split(zero, if_true, if_false, fall_through);
-// clang-format off
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
- } else if (String::Equals(check, factory->type##_string())) { \
- __ JumpIfSmi(rax, if_false); \
- __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset)); \
- __ CompareRoot(rax, Heap::k##Type##MapRootIndex); \
- Split(equal, if_true, if_false, fall_through);
- SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
- // clang-format on
} else {
if (if_false != fall_through) __ jmp(if_false);
}
@@ -2570,6 +2560,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
SetExpressionPosition(expr);
PopOperand(rdx);
__ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
+ RestoreContext();
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(rax, Heap::kTrueValueRootIndex);
Split(equal, if_true, if_false, fall_through);
diff --git a/deps/v8/src/full-codegen/x87/full-codegen-x87.cc b/deps/v8/src/full-codegen/x87/full-codegen-x87.cc
index e9acb49983..25d3f216a8 100644
--- a/deps/v8/src/full-codegen/x87/full-codegen-x87.cc
+++ b/deps/v8/src/full-codegen/x87/full-codegen-x87.cc
@@ -121,12 +121,12 @@ void FullCodeGenerator::Generate() {
// Increment invocation count for the function.
{
Comment cmnt(masm_, "[ Increment invocation count");
- __ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset));
- __ mov(ecx, FieldOperand(ecx, LiteralsArray::kFeedbackVectorOffset));
- __ add(FieldOperand(
- ecx, FeedbackVector::kInvocationCountIndex * kPointerSize +
- FeedbackVector::kHeaderSize),
- Immediate(Smi::FromInt(1)));
+ __ mov(ecx, FieldOperand(edi, JSFunction::kFeedbackVectorOffset));
+ __ mov(ecx, FieldOperand(ecx, Cell::kValueOffset));
+ __ add(
+ FieldOperand(ecx, FeedbackVector::kInvocationCountIndex * kPointerSize +
+ FeedbackVector::kHeaderSize),
+ Immediate(Smi::FromInt(1)));
}
{ Comment cmnt(masm_, "[ Allocate locals");
@@ -702,10 +702,11 @@ void FullCodeGenerator::VisitVariableDeclaration(
case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
globals_->Add(variable->name(), zone());
- FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+ FeedbackSlot slot = proxy->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
globals_->Add(isolate()->factory()->undefined_value(), zone());
+ globals_->Add(isolate()->factory()->undefined_value(), zone());
break;
}
case VariableLocation::PARAMETER:
@@ -741,9 +742,15 @@ void FullCodeGenerator::VisitFunctionDeclaration(
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
globals_->Add(variable->name(), zone());
- FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+ FeedbackSlot slot = proxy->VariableFeedbackSlot();
+ DCHECK(!slot.IsInvalid());
+ globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+
+ // We need the slot where the literals array lives, too.
+ slot = declaration->fun()->LiteralFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+
Handle<SharedFunctionInfo> function =
Compiler::GetSharedFunctionInfo(declaration->fun(), script(), info_);
// Check for stack-overflow exception.
@@ -888,7 +895,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
SetStatementPosition(stmt, SKIP_BREAK);
- FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
+ FeedbackSlot slot = stmt->ForInFeedbackSlot();
// Get the object to enumerate over.
SetExpressionAsStatementPosition(stmt->enumerable());
@@ -1040,19 +1047,17 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
decrement_loop_depth();
}
-
void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
- FeedbackVectorSlot slot) {
+ FeedbackSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
__ mov(StoreDescriptor::ValueRegister(), Operand(esp, offset * kPointerSize));
CallStoreIC(slot, isolate()->factory()->home_object_symbol());
}
-
void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
int offset,
- FeedbackVectorSlot slot) {
+ FeedbackSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ mov(StoreDescriptor::ReceiverRegister(), eax);
__ mov(StoreDescriptor::ValueRegister(), Operand(esp, offset * kPointerSize));
@@ -1124,7 +1129,7 @@ void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
- Handle<FixedArray> constant_properties =
+ Handle<BoilerplateDescription> constant_properties =
expr->GetOrBuildConstantProperties(isolate());
int flags = expr->ComputeFlags();
// If any of the keys would store to the elements array, then we shouldn't
@@ -1179,7 +1184,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForAccumulatorValue(value);
DCHECK(StoreDescriptor::ValueRegister().is(eax));
__ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
- CallStoreIC(property->GetSlot(0), key->value());
+ CallStoreIC(property->GetSlot(0), key->value(), true);
PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
if (NeedsHomeObject(value)) {
EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
@@ -1256,15 +1261,6 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Handle<ConstantElementsPair> constant_elements =
expr->GetOrBuildConstantElements(isolate());
- bool has_constant_fast_elements =
- IsFastObjectElementsKind(expr->constant_elements_kind());
-
- AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
- if (has_constant_fast_elements && !FLAG_allocation_site_pretenuring) {
- // If the only customer of allocation sites is transitioning, then
- // we can turn it off if we don't have anywhere else to transition to.
- allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
- }
if (MustCreateArrayLiteralWithRuntime(expr)) {
__ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
@@ -1277,7 +1273,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
__ mov(ecx, Immediate(constant_elements));
Callable callable =
- CodeFactory::FastCloneShallowArray(isolate(), allocation_site_mode);
+ CodeFactory::FastCloneShallowArray(isolate(), TRACK_ALLOCATION_SITE);
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
@@ -1581,9 +1577,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
context()->Plug(eax);
}
-
-void FullCodeGenerator::EmitAssignment(Expression* expr,
- FeedbackVectorSlot slot) {
+void FullCodeGenerator::EmitAssignment(Expression* expr, FeedbackSlot slot) {
DCHECK(expr->IsValidReferenceExpressionOrThis());
Property* prop = expr->AsProperty();
@@ -1635,7 +1629,7 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
}
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
- FeedbackVectorSlot slot,
+ FeedbackSlot slot,
HoleCheckMode hole_check_mode) {
if (var->IsUnallocated()) {
// Global var, const, or let.
@@ -1800,7 +1794,8 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
EmitProfilingCounterHandlingForReturnSequence(true);
}
Handle<Code> code =
- CodeFactory::CallIC(isolate(), mode, expr->tail_call_mode()).code();
+ CodeFactory::CallICTrampoline(isolate(), mode, expr->tail_call_mode())
+ .code();
__ Move(edx, Immediate(SmiFromSlot(expr->CallFeedbackICSlot())));
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
__ Move(eax, Immediate(arg_count));
@@ -2528,16 +2523,6 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ test_b(FieldOperand(edx, Map::kBitFieldOffset),
Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
Split(zero, if_true, if_false, fall_through);
-// clang-format off
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
- } else if (String::Equals(check, factory->type##_string())) { \
- __ JumpIfSmi(eax, if_false); \
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset), \
- isolate()->factory()->type##_map()); \
- Split(equal, if_true, if_false, fall_through);
- SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
- // clang-format on
} else {
if (if_false != fall_through) __ jmp(if_false);
}
@@ -2578,6 +2563,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
SetExpressionPosition(expr);
PopOperand(edx);
__ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
+ RestoreContext();
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ cmp(eax, isolate()->factory()->true_value());
Split(equal, if_true, if_false, fall_through);
diff --git a/deps/v8/src/futex-emulation.cc b/deps/v8/src/futex-emulation.cc
index 9374986cde..63ad213f8d 100644
--- a/deps/v8/src/futex-emulation.cc
+++ b/deps/v8/src/futex-emulation.cc
@@ -188,10 +188,9 @@ Object* FutexEmulation::Wait(Isolate* isolate,
return result;
}
-
Object* FutexEmulation::Wake(Isolate* isolate,
Handle<JSArrayBuffer> array_buffer, size_t addr,
- int num_waiters_to_wake) {
+ uint32_t num_waiters_to_wake) {
DCHECK(addr < NumberToSize(array_buffer->byte_length()));
int waiters_woken = 0;
@@ -203,7 +202,9 @@ Object* FutexEmulation::Wake(Isolate* isolate,
if (backing_store == node->backing_store_ && addr == node->wait_addr_) {
node->waiting_ = false;
node->cond_.NotifyOne();
- --num_waiters_to_wake;
+ if (num_waiters_to_wake != kWakeAll) {
+ --num_waiters_to_wake;
+ }
waiters_woken++;
}
diff --git a/deps/v8/src/futex-emulation.h b/deps/v8/src/futex-emulation.h
index a0e2b18bdc..801198fab8 100644
--- a/deps/v8/src/futex-emulation.h
+++ b/deps/v8/src/futex-emulation.h
@@ -13,7 +13,6 @@
#include "src/base/macros.h"
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h"
-#include "src/handles.h"
// Support for emulating futexes, a low-level synchronization primitive. They
// are natively supported by Linux, but must be emulated for other platforms.
@@ -31,6 +30,8 @@ class TimeDelta;
namespace internal {
+template <typename T>
+class Handle;
class Isolate;
class JSArrayBuffer;
@@ -81,6 +82,9 @@ class FutexWaitList {
class FutexEmulation : public AllStatic {
public:
+ // Pass to Wake() to wake all waiters.
+ static const uint32_t kWakeAll = UINT32_MAX;
+
// Check that array_buffer[addr] == value, and return "not-equal" if not. If
// they are equal, block execution on |isolate|'s thread until woken via
// |Wake|, or when the time given in |rel_timeout_ms| elapses. Note that
@@ -91,10 +95,11 @@ class FutexEmulation : public AllStatic {
size_t addr, int32_t value, double rel_timeout_ms);
// Wake |num_waiters_to_wake| threads that are waiting on the given |addr|.
- // The rest of the waiters will continue to wait. The return value is the
- // number of woken waiters.
+ // |num_waiters_to_wake| can be kWakeAll, in which case all waiters are
+ // woken. The rest of the waiters will continue to wait. The return value is
+ // the number of woken waiters.
static Object* Wake(Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
- size_t addr, int num_waiters_to_wake);
+ size_t addr, uint32_t num_waiters_to_wake);
// Return the number of threads waiting on |addr|. Should only be used for
// testing.
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index aa6b4eec9b..a90e624ec5 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -339,6 +339,22 @@ const int kNoSourcePosition = -1;
// This constant is used to indicate missing deoptimization information.
const int kNoDeoptimizationId = -1;
+// Deoptimize bailout kind.
+enum class DeoptimizeKind : uint8_t { kEager, kSoft };
+inline size_t hash_value(DeoptimizeKind kind) {
+ return static_cast<size_t>(kind);
+}
+inline std::ostream& operator<<(std::ostream& os, DeoptimizeKind kind) {
+ switch (kind) {
+ case DeoptimizeKind::kEager:
+ return os << "Eager";
+ case DeoptimizeKind::kSoft:
+ return os << "Soft";
+ }
+ UNREACHABLE();
+ return os;
+}
+
// Mask for the sign bit in a smi.
const intptr_t kSmiSignMask = kIntptrSignBit;
@@ -354,10 +370,6 @@ const intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
const intptr_t kDoubleAlignment = 8;
const intptr_t kDoubleAlignmentMask = kDoubleAlignment - 1;
-// Desired alignment for 128 bit SIMD values.
-const intptr_t kSimd128Alignment = 16;
-const intptr_t kSimd128AlignmentMask = kSimd128Alignment - 1;
-
// Desired alignment for generated code is 32 bytes (to improve cache line
// utilization).
const int kCodeAlignmentBits = 5;
@@ -500,12 +512,7 @@ enum AllocationSpace {
const int kSpaceTagSize = 3;
const int kSpaceTagMask = (1 << kSpaceTagSize) - 1;
-enum AllocationAlignment {
- kWordAligned,
- kDoubleAligned,
- kDoubleUnaligned,
- kSimd128Unaligned
-};
+enum AllocationAlignment { kWordAligned, kDoubleAligned, kDoubleUnaligned };
// Possible outcomes for decisions.
enum class Decision : uint8_t { kUnknown, kTrue, kFalse };
@@ -1201,16 +1208,25 @@ inline bool IsConstructable(FunctionKind kind, LanguageMode mode) {
return true;
}
-enum class CallableType : unsigned { kJSFunction, kAny };
+enum class InterpreterPushArgsMode : unsigned {
+ kJSFunction,
+ kWithFinalSpread,
+ kOther
+};
-inline size_t hash_value(CallableType type) { return bit_cast<unsigned>(type); }
+inline size_t hash_value(InterpreterPushArgsMode mode) {
+ return bit_cast<unsigned>(mode);
+}
-inline std::ostream& operator<<(std::ostream& os, CallableType function_type) {
- switch (function_type) {
- case CallableType::kJSFunction:
+inline std::ostream& operator<<(std::ostream& os,
+ InterpreterPushArgsMode mode) {
+ switch (mode) {
+ case InterpreterPushArgsMode::kJSFunction:
return os << "JSFunction";
- case CallableType::kAny:
- return os << "Any";
+ case InterpreterPushArgsMode::kWithFinalSpread:
+ return os << "WithFinalSpread";
+ case InterpreterPushArgsMode::kOther:
+ return os << "Other";
}
UNREACHABLE();
return os;
@@ -1248,8 +1264,9 @@ class BinaryOperationFeedback {
// Type feedback is encoded in such a way that, we can combine the feedback
// at different points by performing an 'OR' operation. Type feedback moves
// to a more generic type when we combine feedback.
-// kSignedSmall -> kNumber -> kAny
-// kInternalizedString -> kString -> kAny
+// kSignedSmall -> kNumber -> kAny
+// kInternalizedString -> kString -> kAny
+// kReceiver -> kAny
// TODO(epertoso): consider unifying this with BinaryOperationFeedback.
class CompareOperationFeedback {
public:
@@ -1260,24 +1277,11 @@ class CompareOperationFeedback {
kNumberOrOddball = 0x7,
kInternalizedString = 0x8,
kString = 0x18,
- kAny = 0x3F
+ kReceiver = 0x20,
+ kAny = 0x7F
};
};
-// Describes how exactly a frame has been dropped from stack.
-enum LiveEditFrameDropMode {
- // No frame has been dropped.
- LIVE_EDIT_FRAMES_UNTOUCHED,
- // The top JS frame had been calling debug break slot stub. Patch the
- // address this stub jumps to in the end.
- LIVE_EDIT_FRAME_DROPPED_IN_DEBUG_SLOT_CALL,
- // The top JS frame had been calling some C++ function. The return address
- // gets patched automatically.
- LIVE_EDIT_FRAME_DROPPED_IN_DIRECT_CALL,
- LIVE_EDIT_FRAME_DROPPED_IN_RETURN_CALL,
- LIVE_EDIT_CURRENTLY_SET_MODE
-};
-
enum class UnicodeEncoding : uint8_t {
// Different unicode encodings in a |word32|:
UTF16, // hi 16bits -> trailing surrogate or 0, low 16bits -> lead surrogate
@@ -1325,6 +1329,18 @@ enum class DataPropertyInLiteralFlag {
typedef base::Flags<DataPropertyInLiteralFlag> DataPropertyInLiteralFlags;
DEFINE_OPERATORS_FOR_FLAGS(DataPropertyInLiteralFlags)
+enum ExternalArrayType {
+ kExternalInt8Array = 1,
+ kExternalUint8Array,
+ kExternalInt16Array,
+ kExternalUint16Array,
+ kExternalInt32Array,
+ kExternalUint32Array,
+ kExternalFloat32Array,
+ kExternalFloat64Array,
+ kExternalUint8ClampedArray,
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/handles-inl.h b/deps/v8/src/handles-inl.h
index cfaf4fb6eb..aefdc0ebc4 100644
--- a/deps/v8/src/handles-inl.h
+++ b/deps/v8/src/handles-inl.h
@@ -94,7 +94,6 @@ Handle<T> HandleScope::CloseAndEscape(Handle<T> handle_value) {
return result;
}
-
Object** HandleScope::CreateHandle(Isolate* isolate, Object* value) {
DCHECK(AllowHandleAllocation::IsAllowed());
HandleScopeData* data = isolate->handle_scope_data();
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index 3b1902e076..26e11b3b1a 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -124,7 +124,8 @@ CanonicalHandleScope::CanonicalHandleScope(Isolate* isolate)
prev_canonical_scope_ = handle_scope_data->canonical_scope;
handle_scope_data->canonical_scope = this;
root_index_map_ = new RootIndexMap(isolate);
- identity_map_ = new IdentityMap<Object**>(isolate->heap(), &zone_);
+ identity_map_ = new IdentityMap<Object**, ZoneAllocationPolicy>(
+ isolate->heap(), ZoneAllocationPolicy(&zone_));
canonical_level_ = handle_scope_data->level;
}
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index 8664a3ff89..416200b0d2 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -331,7 +331,7 @@ class HandleScope {
// Forward declarations for CanonicalHandleScope.
-template <typename V>
+template <typename V, class AllocationPolicy>
class IdentityMap;
class RootIndexMap;
@@ -352,7 +352,7 @@ class V8_EXPORT_PRIVATE CanonicalHandleScope final {
Isolate* isolate_;
Zone zone_;
RootIndexMap* root_index_map_;
- IdentityMap<Object**>* identity_map_;
+ IdentityMap<Object**, ZoneAllocationPolicy>* identity_map_;
// Ordinary nested handle scopes within the current one are not canonical.
int canonical_level_;
// We may have nested canonical scopes. Handles are canonical within each one.
@@ -361,8 +361,7 @@ class V8_EXPORT_PRIVATE CanonicalHandleScope final {
friend class HandleScope;
};
-
-class DeferredHandleScope final {
+class V8_EXPORT_PRIVATE DeferredHandleScope final {
public:
explicit DeferredHandleScope(Isolate* isolate);
// The DeferredHandles object returned stores the Handles created
diff --git a/deps/v8/src/heap-symbols.h b/deps/v8/src/heap-symbols.h
index cf8739167e..49285eec69 100644
--- a/deps/v8/src/heap-symbols.h
+++ b/deps/v8/src/heap-symbols.h
@@ -27,12 +27,6 @@
V(regexp_to_string, "[object RegExp]") \
V(string_to_string, "[object String]") \
V(bind_string, "bind") \
- V(bool16x8_string, "bool16x8") \
- V(Bool16x8_string, "Bool16x8") \
- V(bool32x4_string, "bool32x4") \
- V(Bool32x4_string, "Bool32x4") \
- V(bool8x16_string, "bool8x16") \
- V(Bool8x16_string, "Bool8x16") \
V(boolean_string, "boolean") \
V(Boolean_string, "Boolean") \
V(bound__string, "bound ") \
@@ -77,8 +71,6 @@
V(EvalError_string, "EvalError") \
V(false_string, "false") \
V(flags_string, "flags") \
- V(float32x4_string, "float32x4") \
- V(Float32x4_string, "Float32x4") \
V(function_string, "function") \
V(Function_string, "Function") \
V(Generator_string, "Generator") \
@@ -88,6 +80,7 @@
V(get_string, "get") \
V(get_space_string, "get ") \
V(global_string, "global") \
+ V(group_string, "group") \
V(has_string, "has") \
V(hour_string, "hour") \
V(ignoreCase_string, "ignoreCase") \
@@ -96,12 +89,6 @@
V(index_string, "index") \
V(infinity_string, "Infinity") \
V(input_string, "input") \
- V(int16x8_string, "int16x8") \
- V(Int16x8_string, "Int16x8") \
- V(int32x4_string, "int32x4") \
- V(Int32x4_string, "Int32x4") \
- V(int8x16_string, "int8x16") \
- V(Int8x16_string, "Int8x16") \
V(isExtensible_string, "isExtensible") \
V(isView_string, "isView") \
V(KeyedLoadMonomorphic_string, "KeyedLoadMonomorphic") \
@@ -184,12 +171,6 @@
V(CompileError_string, "CompileError") \
V(LinkError_string, "LinkError") \
V(RuntimeError_string, "RuntimeError") \
- V(uint16x8_string, "uint16x8") \
- V(Uint16x8_string, "Uint16x8") \
- V(uint32x4_string, "uint32x4") \
- V(Uint32x4_string, "Uint32x4") \
- V(uint8x16_string, "uint8x16") \
- V(Uint8x16_string, "Uint8x16") \
V(undefined_string, "undefined") \
V(undefined_to_string, "[object Undefined]") \
V(unicode_string, "unicode") \
@@ -206,51 +187,54 @@
V(writable_string, "writable") \
V(year_string, "year")
-#define PRIVATE_SYMBOL_LIST(V) \
- V(array_iteration_kind_symbol) \
- V(array_iterator_next_symbol) \
- V(array_iterator_object_symbol) \
- V(call_site_frame_array_symbol) \
- V(call_site_frame_index_symbol) \
- V(class_end_position_symbol) \
- V(class_start_position_symbol) \
- V(detailed_stack_trace_symbol) \
- V(elements_transition_symbol) \
- V(error_end_pos_symbol) \
- V(error_script_symbol) \
- V(error_start_pos_symbol) \
- V(frozen_symbol) \
- V(hash_code_symbol) \
- V(home_object_symbol) \
- V(intl_initialized_marker_symbol) \
- V(intl_pattern_symbol) \
- V(intl_resolved_symbol) \
- V(megamorphic_symbol) \
- V(native_context_index_symbol) \
- V(nonexistent_symbol) \
- V(nonextensible_symbol) \
- V(normal_ic_symbol) \
- V(not_mapped_symbol) \
- V(premonomorphic_symbol) \
- V(promise_async_stack_id_symbol) \
- V(promise_debug_marker_symbol) \
- V(promise_forwarding_handler_symbol) \
- V(promise_handled_by_symbol) \
- V(promise_async_id_symbol) \
- V(sealed_symbol) \
- V(stack_trace_symbol) \
- V(strict_function_transition_symbol) \
+#define PRIVATE_SYMBOL_LIST(V) \
+ V(array_iteration_kind_symbol) \
+ V(array_iterator_next_symbol) \
+ V(array_iterator_object_symbol) \
+ V(call_site_frame_array_symbol) \
+ V(call_site_frame_index_symbol) \
+ V(class_end_position_symbol) \
+ V(class_start_position_symbol) \
+ V(detailed_stack_trace_symbol) \
+ V(elements_transition_symbol) \
+ V(error_end_pos_symbol) \
+ V(error_script_symbol) \
+ V(error_start_pos_symbol) \
+ V(frozen_symbol) \
+ V(hash_code_symbol) \
+ V(home_object_symbol) \
+ V(intl_initialized_marker_symbol) \
+ V(intl_pattern_symbol) \
+ V(intl_resolved_symbol) \
+ V(megamorphic_symbol) \
+ V(native_context_index_symbol) \
+ V(nonexistent_symbol) \
+ V(nonextensible_symbol) \
+ V(normal_ic_symbol) \
+ V(not_mapped_symbol) \
+ V(premonomorphic_symbol) \
+ V(promise_async_stack_id_symbol) \
+ V(promise_debug_marker_symbol) \
+ V(promise_forwarding_handler_symbol) \
+ V(promise_handled_by_symbol) \
+ V(promise_async_id_symbol) \
+ V(promise_default_resolve_handler_symbol) \
+ V(promise_default_reject_handler_symbol) \
+ V(sealed_symbol) \
+ V(stack_trace_symbol) \
+ V(strict_function_transition_symbol) \
V(uninitialized_symbol)
-#define PUBLIC_SYMBOL_LIST(V) \
- V(iterator_symbol, Symbol.iterator) \
- V(intl_fallback_symbol, IntlFallback) \
- V(match_symbol, Symbol.match) \
- V(replace_symbol, Symbol.replace) \
- V(search_symbol, Symbol.search) \
- V(species_symbol, Symbol.species) \
- V(split_symbol, Symbol.split) \
- V(to_primitive_symbol, Symbol.toPrimitive) \
+#define PUBLIC_SYMBOL_LIST(V) \
+ V(async_iterator_symbol, Symbol.asyncIterator) \
+ V(iterator_symbol, Symbol.iterator) \
+ V(intl_fallback_symbol, IntlFallback) \
+ V(match_symbol, Symbol.match) \
+ V(replace_symbol, Symbol.replace) \
+ V(search_symbol, Symbol.search) \
+ V(species_symbol, Symbol.species) \
+ V(split_symbol, Symbol.split) \
+ V(to_primitive_symbol, Symbol.toPrimitive) \
V(unscopables_symbol, Symbol.unscopables)
// Well-Known Symbols are "Public" symbols, which have a bit set which causes
diff --git a/deps/v8/src/heap/array-buffer-tracker-inl.h b/deps/v8/src/heap/array-buffer-tracker-inl.h
index f5bdead89a..d20f128002 100644
--- a/deps/v8/src/heap/array-buffer-tracker-inl.h
+++ b/deps/v8/src/heap/array-buffer-tracker-inl.h
@@ -17,7 +17,7 @@ void ArrayBufferTracker::RegisterNew(Heap* heap, JSArrayBuffer* buffer) {
size_t length = NumberToSize(buffer->byte_length());
Page* page = Page::FromAddress(buffer->address());
{
- base::LockGuard<base::Mutex> guard(page->mutex());
+ base::LockGuard<base::RecursiveMutex> guard(page->mutex());
LocalArrayBufferTracker* tracker = page->local_tracker();
if (tracker == nullptr) {
page->AllocateLocalTracker();
@@ -39,7 +39,7 @@ void ArrayBufferTracker::Unregister(Heap* heap, JSArrayBuffer* buffer) {
Page* page = Page::FromAddress(buffer->address());
size_t length = 0;
{
- base::LockGuard<base::Mutex> guard(page->mutex());
+ base::LockGuard<base::RecursiveMutex> guard(page->mutex());
LocalArrayBufferTracker* tracker = page->local_tracker();
DCHECK_NOT_NULL(tracker);
length = tracker->Remove(buffer);
diff --git a/deps/v8/src/heap/array-buffer-tracker.cc b/deps/v8/src/heap/array-buffer-tracker.cc
index def84572b6..543d81d23d 100644
--- a/deps/v8/src/heap/array-buffer-tracker.cc
+++ b/deps/v8/src/heap/array-buffer-tracker.cc
@@ -19,8 +19,7 @@ void LocalArrayBufferTracker::Free() {
for (TrackingData::iterator it = array_buffers_.begin();
it != array_buffers_.end();) {
JSArrayBuffer* buffer = reinterpret_cast<JSArrayBuffer*>(it->first);
- if ((free_mode == kFreeAll) ||
- Marking::IsWhite(ObjectMarking::MarkBitFrom(buffer))) {
+ if ((free_mode == kFreeAll) || ObjectMarking::IsWhite(buffer)) {
const size_t len = it->second;
heap_->isolate()->array_buffer_allocator()->Free(buffer->backing_store(),
len);
@@ -128,7 +127,7 @@ bool ArrayBufferTracker::ProcessBuffers(Page* page, ProcessingMode mode) {
bool ArrayBufferTracker::IsTracked(JSArrayBuffer* buffer) {
Page* page = Page::FromAddress(buffer->address());
{
- base::LockGuard<base::Mutex> guard(page->mutex());
+ base::LockGuard<base::RecursiveMutex> guard(page->mutex());
LocalArrayBufferTracker* tracker = page->local_tracker();
if (tracker == nullptr) return false;
return tracker->IsTracked(buffer);
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index cf881c473b..2c1024f09a 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -140,6 +140,13 @@ void GCTracer::ResetForTesting() {
start_counter_ = 0;
}
+void GCTracer::NotifyYoungGenerationHandling(
+ YoungGenerationHandling young_generation_handling) {
+ DCHECK(current_.type == Event::SCAVENGER || start_counter_ > 1);
+ heap_->isolate()->counters()->young_generation_handling()->AddSample(
+ static_cast<int>(young_generation_handling));
+}
+
void GCTracer::Start(GarbageCollector collector,
GarbageCollectionReason gc_reason,
const char* collector_reason) {
@@ -445,6 +452,7 @@ void GCTracer::PrintNVP() const {
"gc=%s "
"reduce_memory=%d "
"scavenge=%.2f "
+ "evacuate=%.2f "
"old_new=%.2f "
"weak=%.2f "
"roots=%.2f "
@@ -481,6 +489,7 @@ void GCTracer::PrintNVP() const {
"context_disposal_rate=%.1f\n",
duration, spent_in_mutator, current_.TypeName(true),
current_.reduce_memory, current_.scopes[Scope::SCAVENGER_SCAVENGE],
+ current_.scopes[Scope::SCAVENGER_EVACUATE],
current_.scopes[Scope::SCAVENGER_OLD_TO_NEW_POINTERS],
current_.scopes[Scope::SCAVENGER_WEAK],
current_.scopes[Scope::SCAVENGER_ROOTS],
@@ -541,6 +550,9 @@ void GCTracer::PrintNVP() const {
"evacuate.candidates=%.1f "
"evacuate.clean_up=%.1f "
"evacuate.copy=%.1f "
+ "evacuate.prologue=%.1f "
+ "evacuate.epilogue=%.1f "
+ "evacuate.rebalance=%.1f "
"evacuate.update_pointers=%.1f "
"evacuate.update_pointers.to_evacuated=%.1f "
"evacuate.update_pointers.to_new=%.1f "
@@ -624,6 +636,9 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MC_EVACUATE_CANDIDATES],
current_.scopes[Scope::MC_EVACUATE_CLEAN_UP],
current_.scopes[Scope::MC_EVACUATE_COPY],
+ current_.scopes[Scope::MC_EVACUATE_PROLOGUE],
+ current_.scopes[Scope::MC_EVACUATE_EPILOGUE],
+ current_.scopes[Scope::MC_EVACUATE_REBALANCE],
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS],
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED],
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW],
diff --git a/deps/v8/src/heap/gc-tracer.h b/deps/v8/src/heap/gc-tracer.h
index 7aff1cf59d..b206286168 100644
--- a/deps/v8/src/heap/gc-tracer.h
+++ b/deps/v8/src/heap/gc-tracer.h
@@ -56,6 +56,9 @@ enum ScavengeSpeedMode { kForAllObjects, kForSurvivedObjects };
F(MC_EVACUATE_CANDIDATES) \
F(MC_EVACUATE_CLEAN_UP) \
F(MC_EVACUATE_COPY) \
+ F(MC_EVACUATE_EPILOGUE) \
+ F(MC_EVACUATE_PROLOGUE) \
+ F(MC_EVACUATE_REBALANCE) \
F(MC_EVACUATE_UPDATE_POINTERS) \
F(MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED) \
F(MC_EVACUATE_UPDATE_POINTERS_TO_NEW) \
@@ -86,6 +89,7 @@ enum ScavengeSpeedMode { kForAllObjects, kForSurvivedObjects };
F(MINOR_MC_MARK_ROOTS) \
F(MINOR_MC_MARK_WEAK) \
F(SCAVENGER_CODE_FLUSH_CANDIDATES) \
+ F(SCAVENGER_EVACUATE) \
F(SCAVENGER_OLD_TO_NEW_POINTERS) \
F(SCAVENGER_ROOTS) \
F(SCAVENGER_SCAVENGE) \
@@ -234,6 +238,9 @@ class V8_EXPORT_PRIVATE GCTracer {
// Stop collecting data and print results.
void Stop(GarbageCollector collector);
+ void NotifyYoungGenerationHandling(
+ YoungGenerationHandling young_generation_handling);
+
// Sample and accumulate bytes allocated since the last GC.
void SampleAllocation(double current_ms, size_t new_space_counter_bytes,
size_t old_generation_counter_bytes);
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index b8f3498b42..9cf0475e43 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -225,6 +225,8 @@ AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars,
AllocationResult Heap::AllocateOneByteInternalizedString(
Vector<const uint8_t> str, uint32_t hash_field) {
CHECK_GE(String::kMaxLength, str.length());
+ // The canonical empty_string is the only zero-length string we allow.
+ DCHECK_IMPLIES(str.length() == 0, roots_[kempty_stringRootIndex] == nullptr);
// Compute map and object size.
Map* map = one_byte_internalized_string_map();
int size = SeqOneByteString::SizeFor(str.length());
@@ -256,6 +258,7 @@ AllocationResult Heap::AllocateOneByteInternalizedString(
AllocationResult Heap::AllocateTwoByteInternalizedString(Vector<const uc16> str,
uint32_t hash_field) {
CHECK_GE(String::kMaxLength, str.length());
+ DCHECK_NE(0, str.length()); // Use Heap::empty_string() instead.
// Compute map and object size.
Map* map = internalized_string_map();
int size = SeqTwoByteString::SizeFor(str.length());
@@ -689,6 +692,10 @@ Isolate* Heap::isolate() {
reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(16)->heap()) + 16);
}
+void Heap::ExternalStringTable::PromoteAllNewSpaceStrings() {
+ old_space_strings_.AddAll(new_space_strings_);
+ new_space_strings_.Clear();
+}
void Heap::ExternalStringTable::AddString(String* string) {
DCHECK(string->IsExternalString());
@@ -785,9 +792,14 @@ void Heap::SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
}
-void Heap::SetConstructStubDeoptPCOffset(int pc_offset) {
- DCHECK(construct_stub_deopt_pc_offset() == Smi::kZero);
- set_construct_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
+void Heap::SetConstructStubCreateDeoptPCOffset(int pc_offset) {
+ DCHECK(construct_stub_create_deopt_pc_offset() == Smi::kZero);
+ set_construct_stub_create_deopt_pc_offset(Smi::FromInt(pc_offset));
+}
+
+void Heap::SetConstructStubInvokeDeoptPCOffset(int pc_offset) {
+ DCHECK(construct_stub_invoke_deopt_pc_offset() == Smi::kZero);
+ set_construct_stub_invoke_deopt_pc_offset(Smi::FromInt(pc_offset));
}
void Heap::SetGetterStubDeoptPCOffset(int pc_offset) {
@@ -850,6 +862,8 @@ void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) {
HeapObject* object = HeapObject::cast(*current);
CHECK(object->GetIsolate()->heap()->Contains(object));
CHECK(object->map()->IsMap());
+ } else {
+ CHECK((*current)->IsSmi());
}
}
}
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index e84f56e076..25cb56d20c 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -6,6 +6,7 @@
#include "src/accessors.h"
#include "src/api.h"
+#include "src/assembler-inl.h"
#include "src/ast/context-slot-cache.h"
#include "src/base/bits.h"
#include "src/base/once.h"
@@ -117,7 +118,6 @@ Heap::Heap()
#endif // DEBUG
old_generation_allocation_limit_(initial_old_generation_size_),
inline_allocation_disabled_(false),
- total_regexp_code_generated_(0),
tracer_(nullptr),
promoted_objects_size_(0),
promotion_ratio_(0),
@@ -141,8 +141,6 @@ Heap::Heap()
dead_object_stats_(nullptr),
scavenge_job_(nullptr),
idle_scavenge_observer_(nullptr),
- full_codegen_bytes_generated_(0),
- crankshaft_codegen_bytes_generated_(0),
new_space_allocation_counter_(0),
old_generation_allocation_counter_at_last_gc_(0),
old_generation_size_at_last_gc_(0),
@@ -160,8 +158,10 @@ Heap::Heap()
strong_roots_list_(NULL),
heap_iterator_depth_(0),
local_embedder_heap_tracer_(nullptr),
+ fast_promotion_mode_(false),
force_oom_(false),
- delay_sweeper_tasks_for_testing_(false) {
+ delay_sweeper_tasks_for_testing_(false),
+ pending_layout_change_object_(nullptr) {
// Allow build-time customization of the max semispace size. Building
// V8 with snapshots and a non-default max semispace size is much
// easier if you can define it as part of the build environment.
@@ -990,24 +990,6 @@ bool Heap::CollectGarbage(GarbageCollector collector,
}
}
- if (collector == MARK_COMPACTOR && FLAG_incremental_marking &&
- !ShouldFinalizeIncrementalMarking() && !ShouldAbortIncrementalMarking() &&
- !incremental_marking()->IsStopped() &&
- !incremental_marking()->should_hurry() &&
- !incremental_marking()->NeedsFinalization() &&
- !IsCloseToOutOfMemory(new_space_->Capacity())) {
- if (!incremental_marking()->IsComplete() &&
- !mark_compact_collector()->marking_deque()->IsEmpty() &&
- !FLAG_gc_global) {
- if (FLAG_trace_incremental_marking) {
- isolate()->PrintWithTimestamp(
- "[IncrementalMarking] Delaying MarkSweep.\n");
- }
- collector = YoungGenerationCollector();
- collector_reason = "incremental marking delaying mark-sweep";
- }
- }
-
bool next_gc_likely_to_collect_more = false;
size_t committed_memory_before = 0;
@@ -1360,7 +1342,17 @@ bool Heap::PerformGarbageCollection(
MinorMarkCompact();
break;
case SCAVENGER:
- Scavenge();
+ if (fast_promotion_mode_ &&
+ CanExpandOldGeneration(new_space()->Size())) {
+ tracer()->NotifyYoungGenerationHandling(
+ YoungGenerationHandling::kFastPromotionDuringScavenge);
+ EvacuateYoungGeneration();
+ } else {
+ tracer()->NotifyYoungGenerationHandling(
+ YoungGenerationHandling::kRegularScavenge);
+
+ Scavenge();
+ }
break;
}
@@ -1370,6 +1362,10 @@ bool Heap::PerformGarbageCollection(
UpdateSurvivalStatistics(start_new_space_size);
ConfigureInitialOldGenerationSize();
+ if (!fast_promotion_mode_ || collector == MARK_COMPACTOR) {
+ ComputeFastPromotionMode(promotion_ratio_ + semi_space_copied_rate_);
+ }
+
isolate_->counters()->objs_since_last_young()->Set(0);
gc_post_processing_depth_++;
@@ -1607,6 +1603,44 @@ class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
Heap* heap_;
};
+void Heap::EvacuateYoungGeneration() {
+ TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_EVACUATE);
+ DCHECK(fast_promotion_mode_);
+ DCHECK(CanExpandOldGeneration(new_space()->Size()));
+
+ mark_compact_collector()->sweeper().EnsureNewSpaceCompleted();
+
+ SetGCState(SCAVENGE);
+ LOG(isolate_, ResourceEvent("scavenge", "begin"));
+
+ // Move pages from new->old generation.
+ PageRange range(new_space()->bottom(), new_space()->top());
+ for (auto it = range.begin(); it != range.end();) {
+ Page* p = (*++it)->prev_page();
+ p->Unlink();
+ Page::ConvertNewToOld(p);
+ if (incremental_marking()->IsMarking())
+ mark_compact_collector()->RecordLiveSlotsOnPage(p);
+ }
+
+ // Reset new space.
+ if (!new_space()->Rebalance()) {
+ FatalProcessOutOfMemory("NewSpace::Rebalance");
+ }
+ new_space()->ResetAllocationInfo();
+ new_space()->set_age_mark(new_space()->top());
+
+ // Fix up special trackers.
+ external_string_table_.PromoteAllNewSpaceStrings();
+ // GlobalHandles are updated in PostGarbageCollectonProcessing
+
+ IncrementYoungSurvivorsCounter(new_space()->Size());
+ IncrementPromotedObjectsSize(new_space()->Size());
+ IncrementSemiSpaceCopiedObjectSize(0);
+
+ LOG(isolate_, ResourceEvent("scavenge", "end"));
+ SetGCState(NOT_IN_GC);
+}
void Heap::Scavenge() {
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
@@ -1671,12 +1705,14 @@ void Heap::Scavenge() {
{
// Copy objects reachable from the old generation.
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_OLD_TO_NEW_POINTERS);
- RememberedSet<OLD_TO_NEW>::Iterate(this, [this](Address addr) {
- return Scavenger::CheckAndScavengeObject(this, addr);
- });
+ RememberedSet<OLD_TO_NEW>::Iterate(
+ this, SYNCHRONIZED, [this](Address addr) {
+ return Scavenger::CheckAndScavengeObject(this, addr);
+ });
RememberedSet<OLD_TO_NEW>::IterateTyped(
- this, [this](SlotType type, Address host_addr, Address addr) {
+ this, SYNCHRONIZED,
+ [this](SlotType type, Address host_addr, Address addr) {
return UpdateTypedSlotHelper::UpdateTypedSlot(
isolate(), type, addr, [this](Object** addr) {
// We expect that objects referenced by code are long living.
@@ -1748,6 +1784,19 @@ void Heap::Scavenge() {
SetGCState(NOT_IN_GC);
}
+void Heap::ComputeFastPromotionMode(double survival_rate) {
+ const size_t survived_in_new_space =
+ survived_last_scavenge_ * 100 / new_space_->Capacity();
+ fast_promotion_mode_ =
+ !FLAG_optimize_for_size && FLAG_fast_promotion_new_space &&
+ !ShouldReduceMemory() && new_space_->IsAtMaximumCapacity() &&
+ survived_in_new_space >= kMinPromotedPercentForFastPromotionMode;
+ if (FLAG_trace_gc_verbose) {
+ PrintIsolate(
+ isolate(), "Fast promotion mode: %s survival rate: %" PRIuS "%%\n",
+ fast_promotion_mode_ ? "true" : "false", survived_in_new_space);
+ }
+}
String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
Object** p) {
@@ -1755,12 +1804,21 @@ String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
if (!first_word.IsForwardingAddress()) {
// Unreachable external string can be finalized.
- heap->FinalizeExternalString(String::cast(*p));
+ String* string = String::cast(*p);
+ if (!string->IsExternalString()) {
+ // Original external string has been internalized.
+ DCHECK(string->IsThinString());
+ return NULL;
+ }
+ heap->FinalizeExternalString(string);
return NULL;
}
// String is still reachable.
- return String::cast(first_word.ToForwardingAddress());
+ String* string = String::cast(first_word.ToForwardingAddress());
+ if (string->IsThinString()) string = ThinString::cast(string)->actual();
+ // Internalization can replace external strings with non-external strings.
+ return string->IsExternalString() ? string : nullptr;
}
@@ -1964,8 +2022,6 @@ int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) {
case kDoubleAligned:
case kDoubleUnaligned:
return kDoubleSize - kPointerSize;
- case kSimd128Unaligned:
- return kSimd128Size - kPointerSize;
default:
UNREACHABLE();
}
@@ -1979,10 +2035,6 @@ int Heap::GetFillToAlign(Address address, AllocationAlignment alignment) {
return kPointerSize;
if (alignment == kDoubleUnaligned && (offset & kDoubleAlignmentMask) == 0)
return kDoubleSize - kPointerSize; // No fill if double is always aligned.
- if (alignment == kSimd128Unaligned) {
- return (kSimd128Size - (static_cast<int>(offset) + kPointerSize)) &
- kSimd128AlignmentMask;
- }
return 0;
}
@@ -2276,11 +2328,6 @@ bool Heap::CreateInitialMaps() {
mutable_heap_number)
ALLOCATE_PRIMITIVE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol,
Context::SYMBOL_FUNCTION_INDEX)
-#define ALLOCATE_SIMD128_MAP(TYPE, Type, type, lane_count, lane_type) \
- ALLOCATE_PRIMITIVE_MAP(SIMD128_VALUE_TYPE, Type::kSize, type, \
- Context::TYPE##_FUNCTION_INDEX)
- SIMD128_TYPES(ALLOCATE_SIMD128_MAP)
-#undef ALLOCATE_SIMD128_MAP
ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
ALLOCATE_PRIMITIVE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean,
@@ -2339,6 +2386,9 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
ALLOCATE_MAP(WEAK_CELL_TYPE, WeakCell::kSize, weak_cell)
+ ALLOCATE_MAP(CELL_TYPE, Cell::kSize, no_closures_cell)
+ ALLOCATE_MAP(CELL_TYPE, Cell::kSize, one_closure_cell)
+ ALLOCATE_MAP(CELL_TYPE, Cell::kSize, many_closures_cell)
ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
@@ -2423,8 +2473,7 @@ bool Heap::CreateInitialMaps() {
return true;
}
-
-AllocationResult Heap::AllocateHeapNumber(double value, MutableMode mode,
+AllocationResult Heap::AllocateHeapNumber(MutableMode mode,
PretenureFlag pretenure) {
// Statically ensure that it is safe to allocate heap numbers in paged
// spaces.
@@ -2441,36 +2490,9 @@ AllocationResult Heap::AllocateHeapNumber(double value, MutableMode mode,
Map* map = mode == MUTABLE ? mutable_heap_number_map() : heap_number_map();
HeapObject::cast(result)->set_map_no_write_barrier(map);
- HeapNumber::cast(result)->set_value(value);
return result;
}
-#define SIMD_ALLOCATE_DEFINITION(TYPE, Type, type, lane_count, lane_type) \
- AllocationResult Heap::Allocate##Type(lane_type lanes[lane_count], \
- PretenureFlag pretenure) { \
- int size = Type::kSize; \
- STATIC_ASSERT(Type::kSize <= kMaxRegularHeapObjectSize); \
- \
- AllocationSpace space = SelectSpace(pretenure); \
- \
- HeapObject* result = nullptr; \
- { \
- AllocationResult allocation = \
- AllocateRaw(size, space, kSimd128Unaligned); \
- if (!allocation.To(&result)) return allocation; \
- } \
- \
- result->set_map_no_write_barrier(type##_map()); \
- Type* instance = Type::cast(result); \
- for (int i = 0; i < lane_count; i++) { \
- instance->set_lane(i, lanes[i]); \
- } \
- return result; \
- }
-SIMD128_TYPES(SIMD_ALLOCATE_DEFINITION)
-#undef SIMD_ALLOCATE_DEFINITION
-
-
AllocationResult Heap::AllocateCell(Object* value) {
int size = Cell::kSize;
STATIC_ASSERT(Cell::kSize <= kMaxRegularHeapObjectSize);
@@ -2608,8 +2630,8 @@ void Heap::CreateInitialObjects() {
set_nan_value(*factory->NewHeapNumber(
std::numeric_limits<double>::quiet_NaN(), IMMUTABLE, TENURED));
- set_hole_nan_value(*factory->NewHeapNumber(bit_cast<double>(kHoleNanInt64),
- IMMUTABLE, TENURED));
+ set_hole_nan_value(
+ *factory->NewHeapNumberFromBits(kHoleNanInt64, IMMUTABLE, TENURED));
set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, IMMUTABLE, TENURED));
set_minus_infinity_value(
*factory->NewHeapNumber(-V8_INFINITY, IMMUTABLE, TENURED));
@@ -2764,61 +2786,6 @@ void Heap::CreateInitialObjects() {
set_microtask_queue(empty_fixed_array());
{
- StaticFeedbackVectorSpec spec;
- FeedbackVectorSlot slot = spec.AddLoadICSlot();
- DCHECK_EQ(slot, FeedbackVectorSlot(FeedbackVector::kDummyLoadICSlot));
-
- slot = spec.AddKeyedLoadICSlot();
- DCHECK_EQ(slot,
- FeedbackVectorSlot(FeedbackVector::kDummyKeyedLoadICSlot));
-
- slot = spec.AddStoreICSlot();
- DCHECK_EQ(slot, FeedbackVectorSlot(FeedbackVector::kDummyStoreICSlot));
-
- slot = spec.AddKeyedStoreICSlot();
- DCHECK_EQ(slot,
- FeedbackVectorSlot(FeedbackVector::kDummyKeyedStoreICSlot));
-
- Handle<FeedbackMetadata> dummy_metadata =
- FeedbackMetadata::New(isolate(), &spec);
- Handle<FeedbackVector> dummy_vector =
- FeedbackVector::New(isolate(), dummy_metadata);
-
- set_dummy_vector(*dummy_vector);
-
- // Now initialize dummy vector's entries.
- LoadICNexus(isolate()).ConfigureMegamorphic();
- StoreICNexus(isolate()).ConfigureMegamorphic();
- KeyedLoadICNexus(isolate()).ConfigureMegamorphicKeyed(PROPERTY);
- KeyedStoreICNexus(isolate()).ConfigureMegamorphicKeyed(PROPERTY);
- }
-
- {
- // Create a canonical empty FeedbackVector, which is shared by all
- // functions that don't need actual type feedback slots. Note however
- // that all these functions will share the same invocation count, but
- // that shouldn't matter since we only use the invocation count to
- // relativize the absolute call counts, but we can only have call counts
- // if we have actual feedback slots.
- Handle<FixedArray> empty_feedback_vector = factory->NewFixedArray(
- FeedbackVector::kReservedIndexCount, TENURED);
- empty_feedback_vector->set(FeedbackVector::kMetadataIndex,
- empty_fixed_array());
- empty_feedback_vector->set(FeedbackVector::kInvocationCountIndex,
- Smi::kZero);
- empty_feedback_vector->set_map(feedback_vector_map());
- set_empty_feedback_vector(*empty_feedback_vector);
-
- // We use a canonical empty LiteralsArray for all functions that neither
- // have literals nor need a FeedbackVector (besides the invocation
- // count special slot).
- Handle<FixedArray> empty_literals_array =
- factory->NewFixedArray(1, TENURED);
- empty_literals_array->set(0, *empty_feedback_vector);
- set_empty_literals_array(*empty_literals_array);
- }
-
- {
Handle<FixedArray> empty_sloppy_arguments_elements =
factory->NewFixedArray(2, TENURED);
empty_sloppy_arguments_elements->set_map(sloppy_arguments_elements_map());
@@ -2842,6 +2809,8 @@ void Heap::CreateInitialObjects() {
ArrayList::cast(*(factory->NewFixedArray(16, TENURED))));
weak_new_space_object_to_code_list()->SetLength(0);
+ set_code_coverage_list(undefined_value());
+
set_script_list(Smi::kZero);
Handle<SeededNumberDictionary> slow_element_dictionary =
@@ -2870,7 +2839,7 @@ void Heap::CreateInitialObjects() {
cell = factory->NewPropertyCell();
cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
- set_has_instance_protector(*cell);
+ set_array_iterator_protector(*cell);
Handle<Cell> is_concat_spreadable_cell = factory->NewCell(
handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
@@ -2888,10 +2857,6 @@ void Heap::CreateInitialObjects() {
handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
set_fast_array_iteration_protector(*fast_array_iteration_cell);
- Handle<Cell> array_iterator_cell = factory->NewCell(
- handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
- set_array_iterator_protector(*array_iterator_cell);
-
cell = factory->NewPropertyCell();
cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
set_array_buffer_neutering_protector(*cell);
@@ -2964,6 +2929,7 @@ bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
case kWeakObjectToCodeTableRootIndex:
case kWeakNewSpaceObjectToCodeListRootIndex:
case kRetainedMapsRootIndex:
+ case kCodeCoverageListRootIndex:
case kNoScriptSharedFunctionInfosRootIndex:
case kWeakStackTraceListRootIndex:
case kSerializedTemplatesRootIndex:
@@ -2984,7 +2950,6 @@ bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
}
}
-
bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
return !RootCanBeWrittenAfterInitialization(root_index) &&
!InNewSpace(root(root_index));
@@ -3191,7 +3156,7 @@ void Heap::AdjustLiveBytes(HeapObject* object, int by) {
lo_space()->AdjustLiveBytes(by);
} else if (!in_heap_iterator() &&
!mark_compact_collector()->sweeping_in_progress() &&
- Marking::IsBlack(ObjectMarking::MarkBitFrom(object))) {
+ ObjectMarking::IsBlack(object)) {
DCHECK(MemoryChunk::FromAddress(object->address())->SweepingDone());
MemoryChunk::IncrementLiveBytes(object, by);
}
@@ -3201,6 +3166,7 @@ void Heap::AdjustLiveBytes(HeapObject* object, int by) {
FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
int elements_to_trim) {
CHECK_NOT_NULL(object);
+ DCHECK(CanMoveObjectStart(object));
DCHECK(!object->IsFixedTypedArrayBase());
DCHECK(!object->IsByteArray());
const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
@@ -3251,7 +3217,6 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
// Initialize header of the trimmed array. Since left trimming is only
// performed on pages which are not concurrently swept creating a filler
// object does not require synchronization.
- DCHECK(CanMoveObjectStart(object));
Object** former_start = HeapObject::RawField(object, 0);
int new_start_index = elements_to_trim * (element_size / kPointerSize);
former_start[new_start_index] = map;
@@ -3322,7 +3287,7 @@ void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
// Clear the mark bits of the black area that belongs now to the filler.
// This is an optimization. The sweeper will release black fillers anyway.
if (incremental_marking()->black_allocation() &&
- Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(filler))) {
+ ObjectMarking::IsBlackOrGrey(filler)) {
Page* page = Page::FromAddress(new_end);
page->markbits()->ClearRange(
page->AddressToMarkbitIndex(new_end),
@@ -4131,21 +4096,8 @@ AllocationResult Heap::AllocateStruct(InstanceType type) {
}
-bool Heap::IsHeapIterable() {
- // TODO(hpayer): This function is not correct. Allocation folding in old
- // space breaks the iterability.
- return new_space_top_after_last_gc_ == new_space()->top();
-}
-
-
void Heap::MakeHeapIterable() {
- DCHECK(AllowHeapAllocation::IsAllowed());
- if (!IsHeapIterable()) {
- CollectAllGarbage(kMakeHeapIterableMask,
- GarbageCollectionReason::kMakeHeapIterable);
- }
mark_compact_collector()->EnsureSweepingCompleted();
- DCHECK(IsHeapIterable());
}
@@ -4313,28 +4265,21 @@ void Heap::RegisterReservationsForBlackAllocation(Reservation* reservations) {
// for marking. We just have to execute the special visiting side effect
// code that adds objects to global data structures, e.g. for array buffers.
- // Code space, map space, and large object space do not use black pages.
- // Hence we have to color all objects of the reservation first black to avoid
- // unnecessary marking deque load.
if (incremental_marking()->black_allocation()) {
+ // Iterate black objects in old space, code space, map space, and large
+ // object space for side effects.
for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) {
const Heap::Reservation& res = reservations[i];
for (auto& chunk : res) {
Address addr = chunk.start;
while (addr < chunk.end) {
HeapObject* obj = HeapObject::FromAddress(addr);
- Marking::MarkBlack(ObjectMarking::MarkBitFrom(obj));
- addr += obj->Size();
- }
- }
- }
- for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) {
- const Heap::Reservation& res = reservations[i];
- for (auto& chunk : res) {
- Address addr = chunk.start;
- while (addr < chunk.end) {
- HeapObject* obj = HeapObject::FromAddress(addr);
- incremental_marking()->IterateBlackObject(obj);
+ // There might be grey objects due to black to grey transitions in
+ // incremental marking. E.g. see VisitNativeContextIncremental.
+ DCHECK(ObjectMarking::IsBlackOrGrey(obj));
+ if (ObjectMarking::IsBlack(obj)) {
+ incremental_marking()->IterateBlackObject(obj);
+ }
addr += obj->Size();
}
}
@@ -4342,6 +4287,29 @@ void Heap::RegisterReservationsForBlackAllocation(Reservation* reservations) {
}
}
+void Heap::NotifyObjectLayoutChange(HeapObject* object,
+ const DisallowHeapAllocation&) {
+ if (FLAG_incremental_marking && incremental_marking()->IsMarking()) {
+ incremental_marking()->MarkGrey(this, object);
+ }
+#ifdef VERIFY_HEAP
+ DCHECK(pending_layout_change_object_ == nullptr);
+ pending_layout_change_object_ = object;
+#endif
+}
+
+#ifdef VERIFY_HEAP
+void Heap::VerifyObjectLayoutChange(HeapObject* object, Map* new_map) {
+ if (pending_layout_change_object_ == nullptr) {
+ DCHECK(!object->IsJSObject() ||
+ !object->map()->TransitionRequiresSynchronizationWithGC(new_map));
+ } else {
+ DCHECK_EQ(pending_layout_change_object_, object);
+ pending_layout_change_object_ = nullptr;
+ }
+}
+#endif
+
GCIdleTimeHeapState Heap::ComputeHeapState() {
GCIdleTimeHeapState heap_state;
heap_state.contexts_disposed = contexts_disposed_;
@@ -4907,8 +4875,7 @@ void Heap::IterateAndScavengePromotedObject(HeapObject* target, int size,
// it would be a violation of the invariant to record it's slots.
bool record_slots = false;
if (incremental_marking()->IsCompacting()) {
- MarkBit mark_bit = ObjectMarking::MarkBitFrom(target);
- record_slots = Marking::IsBlack(mark_bit);
+ record_slots = ObjectMarking::IsBlack(target);
}
IterateAndScavengePromotedObjectsVisitor visitor(this, target, record_slots);
@@ -5279,7 +5246,6 @@ const double Heap::kMaxHeapGrowingFactorIdle = 1.5;
const double Heap::kConservativeHeapGrowingFactor = 1.3;
const double Heap::kTargetMutatorUtilization = 0.97;
-
// Given GC speed in bytes per ms, the allocation throughput in bytes per ms
// (mutator speed), this function returns the heap growing factor that will
// achieve the kTargetMutatorUtilisation if the GC speed and the mutator speed
@@ -5697,8 +5663,7 @@ void Heap::RegisterExternallyReferencedObject(Object** object) {
IncrementalMarking::MarkGrey(this, heap_object);
} else {
DCHECK(mark_compact_collector()->in_use());
- MarkBit mark_bit = ObjectMarking::MarkBitFrom(heap_object);
- mark_compact_collector()->MarkObject(heap_object, mark_bit);
+ mark_compact_collector()->MarkObject(heap_object);
}
}
@@ -5711,22 +5676,6 @@ void Heap::TearDown() {
UpdateMaximumCommitted();
- if (FLAG_print_max_heap_committed) {
- PrintF("\n");
- PrintF("maximum_committed_by_heap=%" PRIuS " ", MaximumCommittedMemory());
- PrintF("maximum_committed_by_new_space=%" PRIuS " ",
- new_space_->MaximumCommittedMemory());
- PrintF("maximum_committed_by_old_space=%" PRIuS " ",
- old_space_->MaximumCommittedMemory());
- PrintF("maximum_committed_by_code_space=%" PRIuS " ",
- code_space_->MaximumCommittedMemory());
- PrintF("maximum_committed_by_map_space=%" PRIuS " ",
- map_space_->MaximumCommittedMemory());
- PrintF("maximum_committed_by_lo_space=%" PRIuS " ",
- lo_space_->MaximumCommittedMemory());
- PrintF("\n\n");
- }
-
if (FLAG_verify_predictable) {
PrintAlloctionsHash();
}
@@ -6146,8 +6095,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
bool SkipObject(HeapObject* object) {
if (object->IsFiller()) return true;
- MarkBit mark_bit = ObjectMarking::MarkBitFrom(object);
- return Marking::IsWhite(mark_bit);
+ return ObjectMarking::IsWhite(object);
}
private:
@@ -6159,6 +6107,8 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
for (Object** p = start; p < end; p++) {
if (!(*p)->IsHeapObject()) continue;
HeapObject* obj = HeapObject::cast(*p);
+ // Use Marking instead of ObjectMarking to avoid adjusting live bytes
+ // counter.
MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
if (Marking::IsWhite(mark_bit)) {
Marking::WhiteToBlack(mark_bit);
@@ -6188,16 +6138,15 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
DisallowHeapAllocation no_allocation_;
};
-
HeapIterator::HeapIterator(Heap* heap,
HeapIterator::HeapObjectsFiltering filtering)
- : make_heap_iterable_helper_(heap),
- no_heap_allocation_(),
+ : no_heap_allocation_(),
heap_(heap),
filtering_(filtering),
filter_(nullptr),
space_iterator_(nullptr),
object_iterator_(nullptr) {
+ heap_->MakeHeapIterable();
heap_->heap_iterator_start();
// Start the iteration.
space_iterator_ = new SpaceIterator(heap_);
@@ -6257,194 +6206,6 @@ HeapObject* HeapIterator::NextObject() {
}
-#ifdef DEBUG
-
-Object* const PathTracer::kAnyGlobalObject = NULL;
-
-class PathTracer::MarkVisitor : public ObjectVisitor {
- public:
- explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
-
- void VisitPointers(Object** start, Object** end) override {
- // Scan all HeapObject pointers in [start, end)
- for (Object** p = start; !tracer_->found() && (p < end); p++) {
- if ((*p)->IsHeapObject()) tracer_->MarkRecursively(p, this);
- }
- }
-
- private:
- PathTracer* tracer_;
-};
-
-
-class PathTracer::UnmarkVisitor : public ObjectVisitor {
- public:
- explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
-
- void VisitPointers(Object** start, Object** end) override {
- // Scan all HeapObject pointers in [start, end)
- for (Object** p = start; p < end; p++) {
- if ((*p)->IsHeapObject()) tracer_->UnmarkRecursively(p, this);
- }
- }
-
- private:
- PathTracer* tracer_;
-};
-
-
-void PathTracer::VisitPointers(Object** start, Object** end) {
- bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
- // Visit all HeapObject pointers in [start, end)
- for (Object** p = start; !done && (p < end); p++) {
- if ((*p)->IsHeapObject()) {
- TracePathFrom(p);
- done = ((what_to_find_ == FIND_FIRST) && found_target_);
- }
- }
-}
-
-
-void PathTracer::Reset() {
- found_target_ = false;
- object_stack_.Clear();
-}
-
-
-void PathTracer::TracePathFrom(Object** root) {
- DCHECK((search_target_ == kAnyGlobalObject) ||
- search_target_->IsHeapObject());
- found_target_in_trace_ = false;
- Reset();
-
- MarkVisitor mark_visitor(this);
- MarkRecursively(root, &mark_visitor);
-
- UnmarkVisitor unmark_visitor(this);
- UnmarkRecursively(root, &unmark_visitor);
-
- ProcessResults();
-}
-
-
-static bool SafeIsNativeContext(HeapObject* obj) {
- return obj->map() == obj->GetHeap()->root(Heap::kNativeContextMapRootIndex);
-}
-
-
-void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
- if (!(*p)->IsHeapObject()) return;
-
- HeapObject* obj = HeapObject::cast(*p);
-
- MapWord map_word = obj->map_word();
- if (!map_word.ToMap()->IsHeapObject()) return; // visited before
-
- if (found_target_in_trace_) return; // stop if target found
- object_stack_.Add(obj);
- if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
- (obj == search_target_)) {
- found_target_in_trace_ = true;
- found_target_ = true;
- return;
- }
-
- bool is_native_context = SafeIsNativeContext(obj);
-
- // not visited yet
- Map* map = Map::cast(map_word.ToMap());
-
- MapWord marked_map_word =
- MapWord::FromRawValue(obj->map_word().ToRawValue() + kMarkTag);
- obj->set_map_word(marked_map_word);
-
- // Scan the object body.
- if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
- // This is specialized to scan Context's properly.
- Object** start =
- reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize);
- Object** end =
- reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize +
- Context::FIRST_WEAK_SLOT * kPointerSize);
- mark_visitor->VisitPointers(start, end);
- } else {
- obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), mark_visitor);
- }
-
- // Scan the map after the body because the body is a lot more interesting
- // when doing leak detection.
- MarkRecursively(reinterpret_cast<Object**>(&map), mark_visitor);
-
- if (!found_target_in_trace_) { // don't pop if found the target
- object_stack_.RemoveLast();
- }
-}
-
-
-void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
- if (!(*p)->IsHeapObject()) return;
-
- HeapObject* obj = HeapObject::cast(*p);
-
- MapWord map_word = obj->map_word();
- if (map_word.ToMap()->IsHeapObject()) return; // unmarked already
-
- MapWord unmarked_map_word =
- MapWord::FromRawValue(map_word.ToRawValue() - kMarkTag);
- obj->set_map_word(unmarked_map_word);
-
- Map* map = Map::cast(unmarked_map_word.ToMap());
-
- UnmarkRecursively(reinterpret_cast<Object**>(&map), unmark_visitor);
-
- obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), unmark_visitor);
-}
-
-
-void PathTracer::ProcessResults() {
- if (found_target_) {
- OFStream os(stdout);
- os << "=====================================\n"
- << "==== Path to object ====\n"
- << "=====================================\n\n";
-
- DCHECK(!object_stack_.is_empty());
- for (int i = 0; i < object_stack_.length(); i++) {
- if (i > 0) os << "\n |\n |\n V\n\n";
- object_stack_[i]->Print(os);
- }
- os << "=====================================\n";
- }
-}
-
-
-// Triggers a depth-first traversal of reachable objects from one
-// given root object and finds a path to a specific heap object and
-// prints it.
-void Heap::TracePathToObjectFrom(Object* target, Object* root) {
- PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
- tracer.VisitPointer(&root);
-}
-
-
-// Triggers a depth-first traversal of reachable objects from roots
-// and finds a path to a specific heap object and prints it.
-void Heap::TracePathToObject(Object* target) {
- PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
- IterateRoots(&tracer, VISIT_ONLY_STRONG);
-}
-
-
-// Triggers a depth-first traversal of reachable objects from roots
-// and finds a path to any global object and prints it. Useful for
-// determining the source for leaks of global objects.
-void Heap::TracePathToGlobal() {
- PathTracer tracer(PathTracer::kAnyGlobalObject, PathTracer::FIND_ALL,
- VISIT_ALL);
- IterateRoots(&tracer, VISIT_ONLY_STRONG);
-}
-#endif
-
void Heap::UpdateTotalGCTime(double duration) {
if (FLAG_trace_gc_verbose) {
total_gc_time_ms_ += duration;
@@ -6455,14 +6216,19 @@ void Heap::ExternalStringTable::CleanUpNewSpaceStrings() {
int last = 0;
Isolate* isolate = heap_->isolate();
for (int i = 0; i < new_space_strings_.length(); ++i) {
- if (new_space_strings_[i]->IsTheHole(isolate)) {
+ Object* o = new_space_strings_[i];
+ if (o->IsTheHole(isolate)) {
continue;
}
- DCHECK(new_space_strings_[i]->IsExternalString());
- if (heap_->InNewSpace(new_space_strings_[i])) {
- new_space_strings_[last++] = new_space_strings_[i];
+ if (o->IsThinString()) {
+ o = ThinString::cast(o)->actual();
+ if (!o->IsExternalString()) continue;
+ }
+ DCHECK(o->IsExternalString());
+ if (heap_->InNewSpace(o)) {
+ new_space_strings_[last++] = o;
} else {
- old_space_strings_.Add(new_space_strings_[i]);
+ old_space_strings_.Add(o);
}
}
new_space_strings_.Rewind(last);
@@ -6474,12 +6240,17 @@ void Heap::ExternalStringTable::CleanUpAll() {
int last = 0;
Isolate* isolate = heap_->isolate();
for (int i = 0; i < old_space_strings_.length(); ++i) {
- if (old_space_strings_[i]->IsTheHole(isolate)) {
+ Object* o = old_space_strings_[i];
+ if (o->IsTheHole(isolate)) {
continue;
}
- DCHECK(old_space_strings_[i]->IsExternalString());
- DCHECK(!heap_->InNewSpace(old_space_strings_[i]));
- old_space_strings_[last++] = old_space_strings_[i];
+ if (o->IsThinString()) {
+ o = ThinString::cast(o)->actual();
+ if (!o->IsExternalString()) continue;
+ }
+ DCHECK(o->IsExternalString());
+ DCHECK(!heap_->InNewSpace(o));
+ old_space_strings_[last++] = o;
}
old_space_strings_.Rewind(last);
old_space_strings_.Trim();
@@ -6492,11 +6263,21 @@ void Heap::ExternalStringTable::CleanUpAll() {
void Heap::ExternalStringTable::TearDown() {
for (int i = 0; i < new_space_strings_.length(); ++i) {
- heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i]));
+ Object* o = new_space_strings_[i];
+ if (o->IsThinString()) {
+ o = ThinString::cast(o)->actual();
+ if (!o->IsExternalString()) continue;
+ }
+ heap_->FinalizeExternalString(ExternalString::cast(o));
}
new_space_strings_.Free();
for (int i = 0; i < old_space_strings_.length(); ++i) {
- heap_->FinalizeExternalString(ExternalString::cast(old_space_strings_[i]));
+ Object* o = old_space_strings_[i];
+ if (o->IsThinString()) {
+ o = ThinString::cast(o)->actual();
+ if (!o->IsExternalString()) continue;
+ }
+ heap_->FinalizeExternalString(ExternalString::cast(o));
}
old_space_strings_.Free();
}
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index e2f436a908..ad2623964f 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -58,16 +58,14 @@ using v8::MemoryPressureLevel;
V(Map, foreign_map, ForeignMap) \
V(Map, heap_number_map, HeapNumberMap) \
V(Map, transition_array_map, TransitionArrayMap) \
- V(FixedArray, empty_literals_array, EmptyLiteralsArray) \
- V(FixedArray, empty_feedback_vector, EmptyFeedbackVector) \
+ V(Map, feedback_vector_map, FeedbackVectorMap) \
+ V(ScopeInfo, empty_scope_info, EmptyScopeInfo) \
V(FixedArray, empty_fixed_array, EmptyFixedArray) \
V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
/* Entries beyond the first 32 */ \
/* The roots above this line should be boring from a GC point of view. */ \
/* This means they are never in new space and never on a page that is */ \
/* being compacted. */ \
- /* Empty scope info */ \
- V(ScopeInfo, empty_scope_info, EmptyScopeInfo) \
/* Oddballs */ \
V(Oddball, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \
V(Oddball, arguments_marker, ArgumentsMarker) \
@@ -95,12 +93,16 @@ using v8::MemoryPressureLevel;
V(Map, external_map, ExternalMap) \
V(Map, bytecode_array_map, BytecodeArrayMap) \
V(Map, module_info_map, ModuleInfoMap) \
- V(Map, feedback_vector_map, FeedbackVectorMap) \
+ V(Map, no_closures_cell_map, NoClosuresCellMap) \
+ V(Map, one_closure_cell_map, OneClosureCellMap) \
+ V(Map, many_closures_cell_map, ManyClosuresCellMap) \
/* String maps */ \
V(Map, native_source_string_map, NativeSourceStringMap) \
V(Map, string_map, StringMap) \
V(Map, cons_one_byte_string_map, ConsOneByteStringMap) \
V(Map, cons_string_map, ConsStringMap) \
+ V(Map, thin_one_byte_string_map, ThinOneByteStringMap) \
+ V(Map, thin_string_map, ThinStringMap) \
V(Map, sliced_string_map, SlicedStringMap) \
V(Map, sliced_one_byte_string_map, SlicedOneByteStringMap) \
V(Map, external_string_map, ExternalStringMap) \
@@ -133,16 +135,6 @@ using v8::MemoryPressureLevel;
V(Map, fixed_float32_array_map, FixedFloat32ArrayMap) \
V(Map, fixed_float64_array_map, FixedFloat64ArrayMap) \
V(Map, fixed_uint8_clamped_array_map, FixedUint8ClampedArrayMap) \
- V(Map, float32x4_map, Float32x4Map) \
- V(Map, int32x4_map, Int32x4Map) \
- V(Map, uint32x4_map, Uint32x4Map) \
- V(Map, bool32x4_map, Bool32x4Map) \
- V(Map, int16x8_map, Int16x8Map) \
- V(Map, uint16x8_map, Uint16x8Map) \
- V(Map, bool16x8_map, Bool16x8Map) \
- V(Map, int8x16_map, Int8x16Map) \
- V(Map, uint8x16_map, Uint8x16Map) \
- V(Map, bool8x16_map, Bool8x16Map) \
/* Canonical empty values */ \
V(ByteArray, empty_byte_array, EmptyByteArray) \
V(FixedTypedArrayBase, empty_fixed_uint8_array, EmptyFixedUint8Array) \
@@ -160,17 +152,15 @@ using v8::MemoryPressureLevel;
V(FixedArray, empty_sloppy_arguments_elements, EmptySloppyArgumentsElements) \
V(SeededNumberDictionary, empty_slow_element_dictionary, \
EmptySlowElementDictionary) \
- V(FeedbackVector, dummy_vector, DummyVector) \
V(PropertyCell, empty_property_cell, EmptyPropertyCell) \
V(WeakCell, empty_weak_cell, EmptyWeakCell) \
/* Protectors */ \
V(PropertyCell, array_protector, ArrayProtector) \
V(Cell, is_concat_spreadable_protector, IsConcatSpreadableProtector) \
- V(PropertyCell, has_instance_protector, HasInstanceProtector) \
V(Cell, species_protector, SpeciesProtector) \
V(PropertyCell, string_length_protector, StringLengthProtector) \
V(Cell, fast_array_iteration_protector, FastArrayIterationProtector) \
- V(Cell, array_iterator_protector, ArrayIteratorProtector) \
+ V(PropertyCell, array_iterator_protector, ArrayIteratorProtector) \
V(PropertyCell, array_buffer_neutering_protector, \
ArrayBufferNeuteringProtector) \
/* Special numbers */ \
@@ -210,6 +200,8 @@ using v8::MemoryPressureLevel;
/* slots refer to the code with the reference to the weak object. */ \
V(ArrayList, weak_new_space_object_to_code_list, \
WeakNewSpaceObjectToCodeList) \
+ /* List to hold onto feedback vectors that we need for code coverage */ \
+ V(Object, code_coverage_list, CodeCoverageList) \
V(Object, weak_stack_trace_list, WeakStackTraceList) \
V(Object, noscript_shared_function_infos, NoScriptSharedFunctionInfos) \
V(FixedArray, serialized_templates, SerializedTemplates) \
@@ -245,7 +237,10 @@ using v8::MemoryPressureLevel;
/* function cache of the native context. */ \
V(Smi, next_template_serial_number, NextTemplateSerialNumber) \
V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \
- V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset) \
+ V(Smi, construct_stub_create_deopt_pc_offset, \
+ ConstructStubCreateDeoptPCOffset) \
+ V(Smi, construct_stub_invoke_deopt_pc_offset, \
+ ConstructStubInvokeDeoptPCOffset) \
V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \
V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset) \
V(Smi, interpreter_entry_return_pc_offset, InterpreterEntryReturnPCOffset)
@@ -276,16 +271,6 @@ using v8::MemoryPressureLevel;
V(MetaMap) \
V(HeapNumberMap) \
V(MutableHeapNumberMap) \
- V(Float32x4Map) \
- V(Int32x4Map) \
- V(Uint32x4Map) \
- V(Bool32x4Map) \
- V(Int16x8Map) \
- V(Uint16x8Map) \
- V(Bool16x8Map) \
- V(Int8x16Map) \
- V(Uint8x16Map) \
- V(Bool8x16Map) \
V(NativeContextMap) \
V(FixedArrayMap) \
V(CodeMap) \
@@ -319,6 +304,9 @@ using v8::MemoryPressureLevel;
V(ArgumentsMarkerMap) \
V(JSMessageObjectMap) \
V(ForeignMap) \
+ V(NoClosuresCellMap) \
+ V(OneClosureCellMap) \
+ V(ManyClosuresCellMap) \
V(NanValue) \
V(InfinityValue) \
V(MinusZeroValue) \
@@ -389,6 +377,17 @@ enum class GarbageCollectionReason {
// Also update src/tools/metrics/histograms/histograms.xml in chromium.
};
+enum class YoungGenerationHandling {
+ kRegularScavenge = 0,
+ kFastPromotionDuringScavenge = 1,
+ // Histogram::InspectConstructionArguments in chromium requires us to have at
+ // least three buckets.
+ kUnusedBucket = 2,
+ // If you add new items here, then update the young_generation_handling in
+ // counters.h.
+ // Also update src/tools/metrics/histograms/histograms.xml in chromium.
+};
+
// A queue of objects promoted during scavenge. Each object is accompanied by
// its size to avoid dereferencing a map pointer for scanning. The last page in
// to-space is used for the promotion queue. On conflict during scavenge, the
@@ -649,6 +648,8 @@ class Heap {
// The minimum size of a HeapObject on the heap.
static const int kMinObjectSizeInWords = 2;
+ static const int kMinPromotedPercentForFastPromotionMode = 90;
+
STATIC_ASSERT(kUndefinedValueRootIndex ==
Internals::kUndefinedValueRootIndex);
STATIC_ASSERT(kTheHoleValueRootIndex == Internals::kTheHoleValueRootIndex);
@@ -768,9 +769,6 @@ class Heap {
// Converts the given boolean condition to JavaScript boolean value.
inline Oddball* ToBoolean(bool condition);
- // Check whether the heap is currently iterable.
- bool IsHeapIterable();
-
// Notify the heap that a context has been disposed.
int NotifyContextDisposed(bool dependant_context);
@@ -880,7 +878,8 @@ class Heap {
inline int NextScriptId();
inline void SetArgumentsAdaptorDeoptPCOffset(int pc_offset);
- inline void SetConstructStubDeoptPCOffset(int pc_offset);
+ inline void SetConstructStubCreateDeoptPCOffset(int pc_offset);
+ inline void SetConstructStubInvokeDeoptPCOffset(int pc_offset);
inline void SetGetterStubDeoptPCOffset(int pc_offset);
inline void SetSetterStubDeoptPCOffset(int pc_offset);
inline void SetInterpreterEntryReturnPCOffset(int pc_offset);
@@ -1239,6 +1238,20 @@ class Heap {
IncrementalMarking* incremental_marking() { return incremental_marking_; }
+ // The runtime uses this function to notify potentially unsafe object layout
+ // changes that require special synchronization with the concurrent marker.
+ // A layout change is unsafe if
+ // - it removes a tagged in-object field.
+ // - it replaces a tagged in-objects field with an untagged in-object field.
+ void NotifyObjectLayoutChange(HeapObject* object,
+ const DisallowHeapAllocation&);
+#ifdef VERIFY_HEAP
+ // This function checks that either
+ // - the map transition is safe,
+ // - or it was communicated to GC using NotifyObjectLayoutChange.
+ void VerifyObjectLayoutChange(HeapObject* object, Map* new_map);
+#endif
+
// ===========================================================================
// Embedder heap tracer support. =============================================
// ===========================================================================
@@ -1427,19 +1440,6 @@ class Heap {
// Returns the size of objects residing in non new spaces.
size_t PromotedSpaceSizeOfObjects();
- double total_regexp_code_generated() { return total_regexp_code_generated_; }
- void IncreaseTotalRegexpCodeGenerated(int size) {
- total_regexp_code_generated_ += size;
- }
-
- void IncrementCodeGeneratedBytes(bool is_crankshafted, int size) {
- if (is_crankshafted) {
- crankshaft_codegen_bytes_generated_ += size;
- } else {
- full_codegen_bytes_generated_ += size;
- }
- }
-
// ===========================================================================
// Prologue/epilogue callback methods.========================================
// ===========================================================================
@@ -1514,10 +1514,6 @@ class Heap {
#ifdef DEBUG
void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; }
- void TracePathToObjectFrom(Object* target, Object* root);
- void TracePathToObject(Object* target);
- void TracePathToGlobal();
-
void Print();
void PrintHandles();
@@ -1543,6 +1539,7 @@ class Heap {
inline void IterateAll(ObjectVisitor* v);
inline void IterateNewSpaceStrings(ObjectVisitor* v);
+ inline void PromoteAllNewSpaceStrings();
// Restores internal invariant and gets rid of collected strings. Must be
// called after each Iterate*() that modified the strings.
@@ -1777,6 +1774,8 @@ class Heap {
void InvokeOutOfMemoryCallback();
+ void ComputeFastPromotionMode(double survival_rate);
+
// Attempt to over-approximate the weak closure by marking object groups and
// implicit references from global handles, but don't atomically complete
// marking. If we continue to mark incrementally, we might have marked
@@ -1820,6 +1819,7 @@ class Heap {
// Performs a minor collection in new generation.
void Scavenge();
+ void EvacuateYoungGeneration();
Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front);
@@ -1900,7 +1900,7 @@ class Heap {
bool always_allocate() { return always_allocate_scope_count_.Value() != 0; }
- bool CanExpandOldGeneration(int size) {
+ bool CanExpandOldGeneration(size_t size) {
if (force_oom_) return false;
return (OldGenerationCapacity() + size) < MaxOldGenerationSize();
}
@@ -1962,16 +1962,8 @@ class Heap {
AllocationSite* allocation_site = NULL);
// Allocates a HeapNumber from value.
- MUST_USE_RESULT AllocationResult
- AllocateHeapNumber(double value, MutableMode mode = IMMUTABLE,
- PretenureFlag pretenure = NOT_TENURED);
-
-// Allocates SIMD values from the given lane values.
-#define SIMD_ALLOCATE_DECLARATION(TYPE, Type, type, lane_count, lane_type) \
- AllocationResult Allocate##Type(lane_type lanes[lane_count], \
- PretenureFlag pretenure = NOT_TENURED);
- SIMD128_TYPES(SIMD_ALLOCATE_DECLARATION)
-#undef SIMD_ALLOCATE_DECLARATION
+ MUST_USE_RESULT AllocationResult AllocateHeapNumber(
+ MutableMode mode = IMMUTABLE, PretenureFlag pretenure = NOT_TENURED);
// Allocates a byte array of the specified length
MUST_USE_RESULT AllocationResult
@@ -2136,10 +2128,6 @@ class Heap {
MUST_USE_RESULT AllocationResult
AllocateCode(int object_size, bool immovable);
- MUST_USE_RESULT AllocationResult InternalizeStringWithKey(HashTableKey* key);
-
- MUST_USE_RESULT AllocationResult InternalizeString(String* str);
-
// ===========================================================================
void set_force_oom(bool value) { force_oom_ = value; }
@@ -2264,9 +2252,6 @@ class Heap {
List<GCCallbackPair> gc_epilogue_callbacks_;
List<GCCallbackPair> gc_prologue_callbacks_;
- // Total RegExp code ever generated
- double total_regexp_code_generated_;
-
int deferred_counters_[v8::Isolate::kUseCounterFeatureCount];
GCTracer* tracer_;
@@ -2317,10 +2302,6 @@ class Heap {
AllocationObserver* idle_scavenge_observer_;
- // These two counters are monotomically increasing and never reset.
- size_t full_codegen_bytes_generated_;
- size_t crankshaft_codegen_bytes_generated_;
-
// This counter is increased before each GC and never reset.
// To account for the bytes allocated since the last GC, use the
// NewSpaceAllocationCounter() function.
@@ -2382,10 +2363,14 @@ class Heap {
LocalEmbedderHeapTracer* local_embedder_heap_tracer_;
+ bool fast_promotion_mode_;
+
// Used for testing purposes.
bool force_oom_;
bool delay_sweeper_tasks_for_testing_;
+ HeapObject* pending_layout_change_object_;
+
// Classes in "heap" can be friends.
friend class AlwaysAllocateScope;
friend class GCCallbacksScope;
@@ -2556,17 +2541,8 @@ class HeapIterator BASE_EMBEDDED {
HeapObject* next();
private:
- struct MakeHeapIterableHelper {
- explicit MakeHeapIterableHelper(Heap* heap) { heap->MakeHeapIterable(); }
- };
-
HeapObject* NextObject();
- // The following two fields need to be declared in this order. Initialization
- // order guarantees that we first make the heap iterable (which may involve
- // allocations) and only then lock it down by not allowing further
- // allocations.
- MakeHeapIterableHelper make_heap_iterable_helper_;
DisallowHeapAllocation no_heap_allocation_;
Heap* heap_;
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index b0418686bf..9e8fdc726e 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -9,6 +9,7 @@
#include "src/conversions.h"
#include "src/heap/gc-idle-time-handler.h"
#include "src/heap/gc-tracer.h"
+#include "src/heap/heap-inl.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
@@ -39,15 +40,12 @@ IncrementalMarking::IncrementalMarking(Heap* heap)
bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
HeapObject* value_heap_obj = HeapObject::cast(value);
- MarkBit value_bit = ObjectMarking::MarkBitFrom(value_heap_obj);
- DCHECK(!Marking::IsImpossible(value_bit));
+ DCHECK(!ObjectMarking::IsImpossible(value_heap_obj));
+ DCHECK(!ObjectMarking::IsImpossible(obj));
+ const bool is_black = ObjectMarking::IsBlack(obj);
- MarkBit obj_bit = ObjectMarking::MarkBitFrom(obj);
- DCHECK(!Marking::IsImpossible(obj_bit));
- bool is_black = Marking::IsBlack(obj_bit);
-
- if (is_black && Marking::IsWhite(value_bit)) {
- WhiteToGreyAndPush(value_heap_obj, value_bit);
+ if (is_black && ObjectMarking::IsWhite(value_heap_obj)) {
+ WhiteToGreyAndPush(value_heap_obj);
RestartIfNotMarking();
}
return is_compacting_ && is_black;
@@ -118,9 +116,8 @@ void IncrementalMarking::RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo,
}
}
-
-void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) {
- Marking::WhiteToGrey(mark_bit);
+void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj) {
+ ObjectMarking::WhiteToGrey(obj);
heap_->mark_compact_collector()->marking_deque()->Push(obj);
}
@@ -128,16 +125,13 @@ void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) {
static void MarkObjectGreyDoNotEnqueue(Object* obj) {
if (obj->IsHeapObject()) {
HeapObject* heap_obj = HeapObject::cast(obj);
- MarkBit mark_bit = ObjectMarking::MarkBitFrom(HeapObject::cast(obj));
- if (Marking::IsBlack(mark_bit)) {
- MemoryChunk::IncrementLiveBytes(heap_obj, -heap_obj->Size());
- }
- Marking::AnyToGrey(mark_bit);
+ ObjectMarking::AnyToGrey(heap_obj);
}
}
void IncrementalMarking::TransferMark(Heap* heap, HeapObject* from,
HeapObject* to) {
+ DCHECK(MemoryChunk::FromAddress(from->address())->SweepingDone());
// This is only used when resizing an object.
DCHECK(MemoryChunk::FromAddress(from->address()) ==
MemoryChunk::FromAddress(to->address()));
@@ -158,11 +152,12 @@ void IncrementalMarking::TransferMark(Heap* heap, HeapObject* from,
if (Marking::IsBlack(old_mark_bit)) {
Marking::BlackToWhite(old_mark_bit);
- Marking::MarkBlack(new_mark_bit);
+ Marking::WhiteToBlack(new_mark_bit);
return;
} else if (Marking::IsGrey(old_mark_bit)) {
Marking::GreyToWhite(old_mark_bit);
- heap->incremental_marking()->WhiteToGreyAndPush(to, new_mark_bit);
+ Marking::WhiteToGrey(new_mark_bit);
+ heap->mark_compact_collector()->marking_deque()->Push(to);
heap->incremental_marking()->RestartIfNotMarking();
}
@@ -210,10 +205,10 @@ class IncrementalMarkingMarkingVisitor
} while (scan_until_end && start_offset < object_size);
chunk->set_progress_bar(start_offset);
if (start_offset < object_size) {
- if (Marking::IsGrey(ObjectMarking::MarkBitFrom(object))) {
+ if (ObjectMarking::IsGrey(object)) {
heap->mark_compact_collector()->marking_deque()->Unshift(object);
} else {
- DCHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
+ DCHECK(ObjectMarking::IsBlack(object));
heap->mark_compact_collector()->UnshiftBlack(object);
}
heap->incremental_marking()->NotifyIncompleteScanOfObject(
@@ -265,10 +260,8 @@ class IncrementalMarkingMarkingVisitor
// Returns true if object needed marking and false otherwise.
INLINE(static bool MarkObjectWithoutPush(Heap* heap, Object* obj)) {
HeapObject* heap_object = HeapObject::cast(obj);
- MarkBit mark_bit = ObjectMarking::MarkBitFrom(heap_object);
- if (Marking::IsWhite(mark_bit)) {
- Marking::MarkBlack(mark_bit);
- MemoryChunk::IncrementLiveBytes(heap_object, heap_object->Size());
+ if (ObjectMarking::IsWhite(heap_object)) {
+ ObjectMarking::WhiteToBlack(heap_object);
return true;
}
return false;
@@ -276,7 +269,7 @@ class IncrementalMarkingMarkingVisitor
};
void IncrementalMarking::IterateBlackObject(HeapObject* object) {
- if (IsMarking() && Marking::IsBlack(ObjectMarking::MarkBitFrom(object))) {
+ if (IsMarking() && ObjectMarking::IsBlack(object)) {
Page* page = Page::FromAddress(object->address());
if ((page->owner() != nullptr) && (page->owner()->identity() == LO_SPACE)) {
// IterateBlackObject requires us to visit the whole object.
@@ -631,7 +624,7 @@ void IncrementalMarking::ProcessWeakCells() {
HeapObject* value = HeapObject::cast(weak_cell->value());
// Remove weak cells with live objects from the list, they do not need
// clearing.
- if (MarkCompactCollector::IsMarked(value)) {
+ if (ObjectMarking::IsBlackOrGrey(value)) {
// Record slot, if value is pointing to an evacuation candidate.
Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
heap_->mark_compact_collector()->RecordSlot(weak_cell, slot, *slot);
@@ -661,8 +654,7 @@ bool ShouldRetainMap(Map* map, int age) {
}
Object* constructor = map->GetConstructor();
if (!constructor->IsHeapObject() ||
- Marking::IsWhite(
- ObjectMarking::MarkBitFrom(HeapObject::cast(constructor)))) {
+ ObjectMarking::IsWhite(HeapObject::cast(constructor))) {
// The constructor is dead, no new objects with this map can
// be created. Do not retain this map.
return false;
@@ -691,16 +683,14 @@ void IncrementalMarking::RetainMaps() {
int age = Smi::cast(retained_maps->Get(i + 1))->value();
int new_age;
Map* map = Map::cast(cell->value());
- MarkBit map_mark = ObjectMarking::MarkBitFrom(map);
if (i >= number_of_disposed_maps && !map_retaining_is_disabled &&
- Marking::IsWhite(map_mark)) {
+ ObjectMarking::IsWhite(map)) {
if (ShouldRetainMap(map, age)) {
MarkGrey(heap(), map);
}
Object* prototype = map->prototype();
if (age > 0 && prototype->IsHeapObject() &&
- Marking::IsWhite(
- ObjectMarking::MarkBitFrom(HeapObject::cast(prototype)))) {
+ ObjectMarking::IsWhite(HeapObject::cast(prototype))) {
// The prototype is not marked, age the map.
new_age = age - 1;
} else {
@@ -736,8 +726,7 @@ void IncrementalMarking::FinalizeIncrementally() {
// 4) Remove weak cell with live values from the list of weak cells, they
// do not need processing during GC.
MarkRoots();
- if (!heap_->local_embedder_heap_tracer()->InUse() &&
- FLAG_object_grouping_in_incremental_finalization) {
+ if (!heap_->local_embedder_heap_tracer()->InUse()) {
MarkObjectGroups();
}
if (incremental_marking_finalization_rounds_ == 0) {
@@ -808,15 +797,12 @@ void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
// them.
if (map_word.IsForwardingAddress()) {
HeapObject* dest = map_word.ToForwardingAddress();
- if (Marking::IsBlack(ObjectMarking::MarkBitFrom(dest))) continue;
+ if (ObjectMarking::IsBlack(dest)) continue;
array[new_top] = dest;
new_top = ((new_top + 1) & mask);
DCHECK(new_top != marking_deque->bottom());
-#ifdef DEBUG
- MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
- DCHECK(Marking::IsGrey(mark_bit) ||
- (obj->IsFiller() && Marking::IsWhite(mark_bit)));
-#endif
+ DCHECK(ObjectMarking::IsGrey(obj) ||
+ (obj->IsFiller() && ObjectMarking::IsWhite(obj)));
}
} else if (obj->map() != filler_map) {
// Skip one word filler objects that appear on the
@@ -824,14 +810,11 @@ void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
array[new_top] = obj;
new_top = ((new_top + 1) & mask);
DCHECK(new_top != marking_deque->bottom());
-#ifdef DEBUG
- MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
- MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
- DCHECK(Marking::IsGrey(mark_bit) ||
- (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
- (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
- Marking::IsBlack(mark_bit)));
-#endif
+ DCHECK(ObjectMarking::IsGrey(obj) ||
+ (obj->IsFiller() && ObjectMarking::IsWhite(obj)) ||
+ (MemoryChunk::FromAddress(obj->address())
+ ->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
+ ObjectMarking::IsBlack(obj)));
}
}
marking_deque->set_top(new_top);
@@ -855,17 +838,14 @@ void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
}
void IncrementalMarking::MarkGrey(Heap* heap, HeapObject* object) {
- MarkBit mark_bit = ObjectMarking::MarkBitFrom(object);
- if (Marking::IsWhite(mark_bit)) {
- heap->incremental_marking()->WhiteToGreyAndPush(object, mark_bit);
+ if (ObjectMarking::IsWhite(object)) {
+ heap->incremental_marking()->WhiteToGreyAndPush(object);
}
}
void IncrementalMarking::MarkBlack(HeapObject* obj, int size) {
- MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
- if (Marking::IsBlack(mark_bit)) return;
- Marking::GreyToBlack(mark_bit);
- MemoryChunk::IncrementLiveBytes(obj, size);
+ if (ObjectMarking::IsBlack(obj)) return;
+ ObjectMarking::GreyToBlack(obj);
}
intptr_t IncrementalMarking::ProcessMarkingDeque(
@@ -880,8 +860,7 @@ intptr_t IncrementalMarking::ProcessMarkingDeque(
// Left trimming may result in white filler objects on the marking deque.
// Ignore these objects.
if (obj->IsFiller()) {
- DCHECK(Marking::IsImpossible(ObjectMarking::MarkBitFrom(obj)) ||
- Marking::IsWhite(ObjectMarking::MarkBitFrom(obj)));
+ DCHECK(ObjectMarking::IsImpossible(obj) || ObjectMarking::IsWhite(obj));
continue;
}
@@ -936,10 +915,8 @@ void IncrementalMarking::Hurry() {
HeapObject* cache = HeapObject::cast(
Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX));
if (!cache->IsUndefined(heap_->isolate())) {
- MarkBit mark_bit = ObjectMarking::MarkBitFrom(cache);
- if (Marking::IsGrey(mark_bit)) {
- Marking::GreyToBlack(mark_bit);
- MemoryChunk::IncrementLiveBytes(cache, cache->Size());
+ if (ObjectMarking::IsGrey(cache)) {
+ ObjectMarking::GreyToBlack(cache);
}
}
context = Context::cast(context)->next_context_link();
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index 5464f129a7..37f1e5cec1 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -157,7 +157,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
void RecordCodeTargetPatch(Code* host, Address pc, HeapObject* value);
void RecordCodeTargetPatch(Address pc, HeapObject* value);
- void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit);
+ void WhiteToGreyAndPush(HeapObject* obj);
inline void SetOldSpacePageFlags(MemoryChunk* chunk) {
SetOldSpacePageFlags(chunk, IsMarking(), IsCompacting());
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index 1973753b0c..3104ea2311 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -13,58 +13,34 @@ namespace v8 {
namespace internal {
void MarkCompactCollector::PushBlack(HeapObject* obj) {
- DCHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(obj)));
- if (marking_deque()->Push(obj)) {
- MemoryChunk::IncrementLiveBytes(obj, obj->Size());
- } else {
- MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
- Marking::BlackToGrey(mark_bit);
+ DCHECK(ObjectMarking::IsBlack(obj));
+ if (!marking_deque()->Push(obj)) {
+ ObjectMarking::BlackToGrey(obj);
}
}
void MarkCompactCollector::UnshiftBlack(HeapObject* obj) {
- DCHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(obj)));
+ DCHECK(ObjectMarking::IsBlack(obj));
if (!marking_deque()->Unshift(obj)) {
- MemoryChunk::IncrementLiveBytes(obj, -obj->Size());
- MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
- Marking::BlackToGrey(mark_bit);
+ ObjectMarking::BlackToGrey(obj);
}
}
-
-void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) {
- DCHECK(ObjectMarking::MarkBitFrom(obj) == mark_bit);
- if (Marking::IsWhite(mark_bit)) {
- Marking::WhiteToBlack(mark_bit);
- DCHECK(obj->GetIsolate()->heap()->Contains(obj));
+void MarkCompactCollector::MarkObject(HeapObject* obj) {
+ if (ObjectMarking::IsWhite(obj)) {
+ ObjectMarking::WhiteToBlack(obj);
PushBlack(obj);
}
}
-
-void MarkCompactCollector::SetMark(HeapObject* obj, MarkBit mark_bit) {
- DCHECK(Marking::IsWhite(mark_bit));
- DCHECK(ObjectMarking::MarkBitFrom(obj) == mark_bit);
- Marking::WhiteToBlack(mark_bit);
- MemoryChunk::IncrementLiveBytes(obj, obj->Size());
-}
-
-
-bool MarkCompactCollector::IsMarked(Object* obj) {
- DCHECK(obj->IsHeapObject());
- HeapObject* heap_object = HeapObject::cast(obj);
- return Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(heap_object));
-}
-
-
void MarkCompactCollector::RecordSlot(HeapObject* object, Object** slot,
Object* target) {
Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
Page* source_page = Page::FromAddress(reinterpret_cast<Address>(object));
if (target_page->IsEvacuationCandidate() &&
!ShouldSkipEvacuationSlotRecording(object)) {
- DCHECK(Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(object)));
+ DCHECK(ObjectMarking::IsBlackOrGrey(object));
RememberedSet<OLD_TO_OLD>::Insert(source_page,
reinterpret_cast<Address>(slot));
}
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index c931f520b7..cf6bdff35d 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -28,6 +28,7 @@
#include "src/tracing/tracing-category-observer.h"
#include "src/utils-inl.h"
#include "src/v8.h"
+#include "src/v8threads.h"
namespace v8 {
namespace internal {
@@ -66,13 +67,11 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
#ifdef VERIFY_HEAP
class VerifyMarkingVisitor : public ObjectVisitor {
public:
- explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {}
-
void VisitPointers(Object** start, Object** end) override {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*current);
- CHECK(heap_->mark_compact_collector()->IsMarked(object));
+ CHECK(ObjectMarking::IsBlackOrGrey(object));
}
}
}
@@ -92,22 +91,19 @@ class VerifyMarkingVisitor : public ObjectVisitor {
ObjectVisitor::VisitCell(rinfo);
}
}
-
- private:
- Heap* heap_;
};
static void VerifyMarking(Heap* heap, Address bottom, Address top) {
- VerifyMarkingVisitor visitor(heap);
+ VerifyMarkingVisitor visitor;
HeapObject* object;
Address next_object_must_be_here_or_later = bottom;
for (Address current = bottom; current < top;) {
object = HeapObject::FromAddress(current);
// One word fillers at the end of a black area can be grey.
- if (MarkCompactCollector::IsMarked(object) &&
+ if (ObjectMarking::IsBlackOrGrey(object) &&
object->map() != heap->one_pointer_filler_map()) {
- CHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
+ CHECK(ObjectMarking::IsBlack(object));
CHECK(current >= next_object_must_be_here_or_later);
object->Iterate(&visitor);
next_object_must_be_here_or_later = current + object->Size();
@@ -157,11 +153,11 @@ static void VerifyMarking(Heap* heap) {
VerifyMarking(heap->map_space());
VerifyMarking(heap->new_space());
- VerifyMarkingVisitor visitor(heap);
+ VerifyMarkingVisitor visitor;
LargeObjectIterator it(heap->lo_space());
for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
- if (MarkCompactCollector::IsMarked(obj)) {
+ if (ObjectMarking::IsBlackOrGrey(obj)) {
obj->Iterate(&visitor);
}
}
@@ -348,8 +344,7 @@ void MarkCompactCollector::VerifyMarkbitsAreClean() {
LargeObjectIterator it(heap_->lo_space());
for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
- MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
- CHECK(Marking::IsWhite(mark_bit));
+ CHECK(ObjectMarking::IsWhite(obj));
CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
}
}
@@ -398,7 +393,7 @@ void MarkCompactCollector::ClearMarkbits() {
LargeObjectIterator it(heap_->lo_space());
for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
- Marking::MarkWhite(ObjectMarking::MarkBitFrom(obj));
+ ObjectMarking::ClearMarkBit(obj);
MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
chunk->ResetProgressBar();
chunk->ResetLiveBytes();
@@ -909,8 +904,7 @@ void CodeFlusher::ProcessJSFunctionCandidates() {
SharedFunctionInfo* shared = candidate->shared();
Code* code = shared->code();
- MarkBit code_mark = ObjectMarking::MarkBitFrom(code);
- if (Marking::IsWhite(code_mark)) {
+ if (ObjectMarking::IsWhite(code)) {
if (FLAG_trace_code_flushing && shared->is_compiled()) {
PrintF("[code-flushing clears: ");
shared->ShortPrint();
@@ -928,7 +922,7 @@ void CodeFlusher::ProcessJSFunctionCandidates() {
candidate->set_code(lazy_compile);
}
} else {
- DCHECK(Marking::IsBlack(code_mark));
+ DCHECK(ObjectMarking::IsBlack(code));
candidate->set_code(code);
}
@@ -962,8 +956,7 @@ void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
ClearNextCandidate(candidate);
Code* code = candidate->code();
- MarkBit code_mark = ObjectMarking::MarkBitFrom(code);
- if (Marking::IsWhite(code_mark)) {
+ if (ObjectMarking::IsWhite(code)) {
if (FLAG_trace_code_flushing && candidate->is_compiled()) {
PrintF("[code-flushing clears: ");
candidate->ShortPrint();
@@ -1088,24 +1081,17 @@ class StaticYoungGenerationMarkingVisitor
Object* target = *p;
if (heap->InNewSpace(target)) {
if (MarkRecursively(heap, HeapObject::cast(target))) return;
- PushOnMarkingDeque(heap, target);
+ heap->mark_compact_collector()->MarkObject(HeapObject::cast(target));
}
}
protected:
- inline static void PushOnMarkingDeque(Heap* heap, Object* obj) {
- HeapObject* object = HeapObject::cast(obj);
- MarkBit mark_bit = ObjectMarking::MarkBitFrom(object);
- heap->mark_compact_collector()->MarkObject(object, mark_bit);
- }
-
inline static bool MarkRecursively(Heap* heap, HeapObject* object) {
StackLimitCheck check(heap->isolate());
if (check.HasOverflowed()) return false;
- MarkBit mark = ObjectMarking::MarkBitFrom(object);
- if (Marking::IsBlackOrGrey(mark)) return true;
- heap->mark_compact_collector()->SetMark(object, mark);
+ if (ObjectMarking::IsBlackOrGrey(object)) return true;
+ ObjectMarking::WhiteToBlack(object);
IterateBody(object->map(), object);
return true;
}
@@ -1136,16 +1122,14 @@ class MarkCompactMarkingVisitor
// Marks the object black and pushes it on the marking stack.
INLINE(static void MarkObject(Heap* heap, HeapObject* object)) {
- MarkBit mark = ObjectMarking::MarkBitFrom(object);
- heap->mark_compact_collector()->MarkObject(object, mark);
+ heap->mark_compact_collector()->MarkObject(object);
}
// Marks the object black without pushing it on the marking stack.
// Returns true if object needed marking and false otherwise.
INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) {
- MarkBit mark_bit = ObjectMarking::MarkBitFrom(object);
- if (Marking::IsWhite(mark_bit)) {
- heap->mark_compact_collector()->SetMark(object, mark_bit);
+ if (ObjectMarking::IsWhite(object)) {
+ ObjectMarking::WhiteToBlack(object);
return true;
}
return false;
@@ -1157,8 +1141,7 @@ class MarkCompactMarkingVisitor
if (!(*p)->IsHeapObject()) return;
HeapObject* target_object = HeapObject::cast(*p);
collector->RecordSlot(object, p, target_object);
- MarkBit mark = ObjectMarking::MarkBitFrom(target_object);
- collector->MarkObject(target_object, mark);
+ collector->MarkObject(target_object);
}
@@ -1167,15 +1150,13 @@ class MarkCompactMarkingVisitor
HeapObject* obj)) {
#ifdef DEBUG
DCHECK(collector->heap()->Contains(obj));
- DCHECK(!collector->heap()->mark_compact_collector()->IsMarked(obj));
+ DCHECK(ObjectMarking::IsWhite(obj));
#endif
Map* map = obj->map();
Heap* heap = obj->GetHeap();
- MarkBit mark = ObjectMarking::MarkBitFrom(obj);
- heap->mark_compact_collector()->SetMark(obj, mark);
+ ObjectMarking::WhiteToBlack(obj);
// Mark the map pointer and the body.
- MarkBit map_mark = ObjectMarking::MarkBitFrom(map);
- heap->mark_compact_collector()->MarkObject(map, map_mark);
+ heap->mark_compact_collector()->MarkObject(map);
IterateBody(map, obj);
}
@@ -1194,8 +1175,7 @@ class MarkCompactMarkingVisitor
if (!o->IsHeapObject()) continue;
collector->RecordSlot(object, p, o);
HeapObject* obj = HeapObject::cast(o);
- MarkBit mark = ObjectMarking::MarkBitFrom(obj);
- if (Marking::IsBlackOrGrey(mark)) continue;
+ if (ObjectMarking::IsBlackOrGrey(obj)) continue;
VisitUnmarkedObject(collector, obj);
}
return true;
@@ -1228,7 +1208,7 @@ class MarkCompactMarkingVisitor
// was marked through the compilation cache before marker reached JSRegExp
// object.
FixedArray* data = FixedArray::cast(re->data());
- if (Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(data))) {
+ if (ObjectMarking::IsBlackOrGrey(data)) {
Object** slot =
data->data_start() + JSRegExp::saved_code_index(is_one_byte);
heap->mark_compact_collector()->RecordSlot(data, slot, code);
@@ -1312,10 +1292,8 @@ class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
Object* obj = *slot;
if (obj->IsSharedFunctionInfo()) {
SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
- MarkBit shared_mark = ObjectMarking::MarkBitFrom(shared);
- MarkBit code_mark = ObjectMarking::MarkBitFrom(shared->code());
- collector_->MarkObject(shared->code(), code_mark);
- collector_->MarkObject(shared, shared_mark);
+ collector_->MarkObject(shared->code());
+ collector_->MarkObject(shared);
}
}
@@ -1333,12 +1311,10 @@ void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
// actual optimized code object.
StackFrame* frame = it.frame();
Code* code = frame->unchecked_code();
- MarkBit code_mark = ObjectMarking::MarkBitFrom(code);
- MarkObject(code, code_mark);
+ MarkObject(code);
if (frame->is_optimized()) {
Code* optimized_code = frame->LookupCode();
- MarkBit optimized_code_mark = ObjectMarking::MarkBitFrom(optimized_code);
- MarkObject(optimized_code, optimized_code_mark);
+ MarkObject(optimized_code);
}
}
}
@@ -1394,18 +1370,16 @@ class RootMarkingVisitor : public ObjectVisitor {
!collector_->heap()->InNewSpace(object))
return;
- MarkBit mark_bit = ObjectMarking::MarkBitFrom(object);
- if (Marking::IsBlackOrGrey(mark_bit)) return;
+ if (ObjectMarking::IsBlackOrGrey(object)) return;
Map* map = object->map();
// Mark the object.
- collector_->SetMark(object, mark_bit);
+ ObjectMarking::WhiteToBlack(object);
switch (mode) {
case MarkCompactMode::FULL: {
// Mark the map pointer and body, and push them on the marking stack.
- MarkBit map_mark = ObjectMarking::MarkBitFrom(map);
- collector_->MarkObject(map, map_mark);
+ collector_->MarkObject(map);
MarkCompactMarkingVisitor::IterateBody(map, object);
} break;
case MarkCompactMode::YOUNG_GENERATION:
@@ -1437,10 +1411,14 @@ class StringTableCleaner : public ObjectVisitor {
for (Object** p = start; p < end; p++) {
Object* o = *p;
if (o->IsHeapObject()) {
- if (Marking::IsWhite(ObjectMarking::MarkBitFrom(HeapObject::cast(o)))) {
+ if (ObjectMarking::IsWhite(HeapObject::cast(o))) {
if (finalize_external_strings) {
- DCHECK(o->IsExternalString());
- heap_->FinalizeExternalString(String::cast(*p));
+ if (o->IsExternalString()) {
+ heap_->FinalizeExternalString(String::cast(*p));
+ } else {
+ // The original external string may have been internalized.
+ DCHECK(o->IsThinString());
+ }
} else {
pointers_removed_++;
}
@@ -1474,9 +1452,8 @@ typedef StringTableCleaner<true, false> ExternalStringTableCleaner;
class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
public:
virtual Object* RetainAs(Object* object) {
- MarkBit mark_bit = ObjectMarking::MarkBitFrom(HeapObject::cast(object));
- DCHECK(!Marking::IsGrey(mark_bit));
- if (Marking::IsBlack(mark_bit)) {
+ DCHECK(!ObjectMarking::IsGrey(HeapObject::cast(object)));
+ if (ObjectMarking::IsBlack(HeapObject::cast(object))) {
return object;
} else if (object->IsAllocationSite() &&
!(AllocationSite::cast(object)->IsZombie())) {
@@ -1484,7 +1461,7 @@ class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
// space. These sites get a one-time reprieve.
AllocationSite* site = AllocationSite::cast(object);
site->MarkZombie();
- site->GetHeap()->mark_compact_collector()->MarkAllocationSite(site);
+ ObjectMarking::WhiteToBlack(site);
return object;
} else {
return NULL;
@@ -1504,9 +1481,8 @@ void MarkCompactCollector::DiscoverGreyObjectsWithIterator(T* it) {
Map* filler_map = heap()->one_pointer_filler_map();
for (HeapObject* object = it->Next(); object != NULL; object = it->Next()) {
- MarkBit markbit = ObjectMarking::MarkBitFrom(object);
- if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
- Marking::GreyToBlack(markbit);
+ if ((object->map() != filler_map) && ObjectMarking::IsGrey(object)) {
+ ObjectMarking::GreyToBlack(object);
PushBlack(object);
if (marking_deque()->IsFull()) return;
}
@@ -1518,9 +1494,8 @@ void MarkCompactCollector::DiscoverGreyObjectsOnPage(MemoryChunk* p) {
LiveObjectIterator<kGreyObjects> it(p);
HeapObject* object = NULL;
while ((object = it.Next()) != NULL) {
- MarkBit markbit = ObjectMarking::MarkBitFrom(object);
- DCHECK(Marking::IsGrey(markbit));
- Marking::GreyToBlack(markbit);
+ DCHECK(ObjectMarking::IsGrey(object));
+ ObjectMarking::GreyToBlack(object);
PushBlack(object);
if (marking_deque()->IsFull()) return;
}
@@ -1970,9 +1945,7 @@ void MarkCompactCollector::DiscoverGreyObjectsInNewSpace() {
bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
Object* o = *p;
if (!o->IsHeapObject()) return false;
- HeapObject* heap_object = HeapObject::cast(o);
- MarkBit mark = ObjectMarking::MarkBitFrom(heap_object);
- return Marking::IsWhite(mark);
+ return ObjectMarking::IsWhite(HeapObject::cast(o));
}
@@ -1980,31 +1953,22 @@ bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap,
Object** p) {
Object* o = *p;
DCHECK(o->IsHeapObject());
- HeapObject* heap_object = HeapObject::cast(o);
- MarkBit mark = ObjectMarking::MarkBitFrom(heap_object);
- return Marking::IsWhite(mark);
+ return ObjectMarking::IsWhite(HeapObject::cast(o));
}
void MarkCompactCollector::MarkStringTable(
RootMarkingVisitor<MarkCompactMode::FULL>* visitor) {
StringTable* string_table = heap()->string_table();
// Mark the string table itself.
- MarkBit string_table_mark = ObjectMarking::MarkBitFrom(string_table);
- if (Marking::IsWhite(string_table_mark)) {
+ if (ObjectMarking::IsWhite(string_table)) {
// String table could have already been marked by visiting the handles list.
- SetMark(string_table, string_table_mark);
+ ObjectMarking::WhiteToBlack(string_table);
}
// Explicitly mark the prefix.
string_table->IteratePrefix(visitor);
ProcessMarkingDeque<MarkCompactMode::FULL>();
}
-
-void MarkCompactCollector::MarkAllocationSite(AllocationSite* site) {
- MarkBit mark_bit = ObjectMarking::MarkBitFrom(site);
- SetMark(site, mark_bit);
-}
-
void MarkCompactCollector::MarkRoots(
RootMarkingVisitor<MarkCompactMode::FULL>* visitor) {
// Mark the heap roots including global variables, stack variables,
@@ -2032,7 +1996,7 @@ void MarkCompactCollector::MarkImplicitRefGroups(
ImplicitRefGroup* entry = ref_groups->at(i);
DCHECK(entry != NULL);
- if (!IsMarked(*entry->parent)) {
+ if (ObjectMarking::IsWhite(*entry->parent)) {
(*ref_groups)[last++] = entry;
continue;
}
@@ -2065,17 +2029,16 @@ void MarkCompactCollector::EmptyMarkingDeque() {
DCHECK(!object->IsFiller());
DCHECK(object->IsHeapObject());
DCHECK(heap()->Contains(object));
- DCHECK(!Marking::IsWhite(ObjectMarking::MarkBitFrom(object)));
+ DCHECK(!ObjectMarking::IsWhite(object));
Map* map = object->map();
switch (mode) {
case MarkCompactMode::FULL: {
- MarkBit map_mark = ObjectMarking::MarkBitFrom(map);
- MarkObject(map, map_mark);
+ MarkObject(map);
MarkCompactMarkingVisitor::IterateBody(map, object);
} break;
case MarkCompactMode::YOUNG_GENERATION: {
- DCHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
+ DCHECK(ObjectMarking::IsBlack(object));
StaticYoungGenerationMarkingVisitor::IterateBody(map, object);
} break;
}
@@ -2276,10 +2239,10 @@ class MarkCompactCollector::ObjectStatsVisitor
}
bool Visit(HeapObject* obj) override {
- if (Marking::IsBlack(ObjectMarking::MarkBitFrom(obj))) {
+ if (ObjectMarking::IsBlack(obj)) {
live_collector_.CollectStatistics(obj);
} else {
- DCHECK(!Marking::IsGrey(ObjectMarking::MarkBitFrom(obj)));
+ DCHECK(!ObjectMarking::IsGrey(obj));
dead_collector_.CollectStatistics(obj);
}
return true;
@@ -2335,11 +2298,10 @@ SlotCallbackResult MarkCompactCollector::CheckAndMarkObject(
// has to be in ToSpace.
DCHECK(heap->InToSpace(object));
HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
- MarkBit mark_bit = ObjectMarking::MarkBitFrom(heap_object);
- if (Marking::IsBlackOrGrey(mark_bit)) {
+ if (ObjectMarking::IsBlackOrGrey(heap_object)) {
return KEEP_SLOT;
}
- heap->mark_compact_collector()->SetMark(heap_object, mark_bit);
+ ObjectMarking::WhiteToBlack(heap_object);
StaticYoungGenerationMarkingVisitor::IterateBody(heap_object->map(),
heap_object);
return KEEP_SLOT;
@@ -2349,8 +2311,7 @@ SlotCallbackResult MarkCompactCollector::CheckAndMarkObject(
static bool IsUnmarkedObject(Heap* heap, Object** p) {
DCHECK_IMPLIES(heap->InNewSpace(*p), heap->InToSpace(*p));
- return heap->InNewSpace(*p) &&
- !Marking::IsBlack(ObjectMarking::MarkBitFrom(HeapObject::cast(*p)));
+ return heap->InNewSpace(*p) && !ObjectMarking::IsBlack(HeapObject::cast(*p));
}
void MarkCompactCollector::MarkLiveObjectsInYoungGeneration() {
@@ -2375,11 +2336,12 @@ void MarkCompactCollector::MarkLiveObjectsInYoungGeneration() {
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MINOR_MC_MARK_OLD_TO_NEW_POINTERS);
- RememberedSet<OLD_TO_NEW>::Iterate(heap(), [this](Address addr) {
- return CheckAndMarkObject(heap(), addr);
- });
+ RememberedSet<OLD_TO_NEW>::Iterate(
+ heap(), NON_SYNCHRONIZED,
+ [this](Address addr) { return CheckAndMarkObject(heap(), addr); });
RememberedSet<OLD_TO_NEW>::IterateTyped(
- heap(), [this](SlotType type, Address host_addr, Address addr) {
+ heap(), NON_SYNCHRONIZED,
+ [this](SlotType type, Address host_addr, Address addr) {
return UpdateTypedSlotHelper::UpdateTypedSlot(
isolate(), type, addr, [this](Object** addr) {
return CheckAndMarkObject(heap(),
@@ -2632,11 +2594,11 @@ void MarkCompactCollector::ClearSimpleMapTransitions(
while (weak_cell_obj != Smi::kZero) {
WeakCell* weak_cell = WeakCell::cast(weak_cell_obj);
Map* map = Map::cast(weak_cell->value());
- DCHECK(Marking::IsWhite(ObjectMarking::MarkBitFrom(map)));
+ DCHECK(ObjectMarking::IsWhite(map));
Object* potential_parent = map->constructor_or_backpointer();
if (potential_parent->IsMap()) {
Map* parent = Map::cast(potential_parent);
- if (Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(parent)) &&
+ if (ObjectMarking::IsBlackOrGrey(parent) &&
parent->raw_transitions() == weak_cell) {
ClearSimpleMapTransition(parent, map);
}
@@ -2675,8 +2637,7 @@ void MarkCompactCollector::ClearFullMapTransitions() {
if (num_transitions > 0) {
Map* map = array->GetTarget(0);
Map* parent = Map::cast(map->constructor_or_backpointer());
- bool parent_is_alive =
- Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(parent));
+ bool parent_is_alive = ObjectMarking::IsBlackOrGrey(parent);
DescriptorArray* descriptors =
parent_is_alive ? parent->instance_descriptors() : nullptr;
bool descriptors_owner_died =
@@ -2701,7 +2662,7 @@ bool MarkCompactCollector::CompactTransitionArray(
for (int i = 0; i < num_transitions; ++i) {
Map* target = transitions->GetTarget(i);
DCHECK_EQ(target->constructor_or_backpointer(), map);
- if (Marking::IsWhite(ObjectMarking::MarkBitFrom(target))) {
+ if (ObjectMarking::IsWhite(target)) {
if (descriptors != nullptr &&
target->instance_descriptors() == descriptors) {
descriptors_owner_died = true;
@@ -2749,7 +2710,7 @@ void MarkCompactCollector::TrimDescriptorArray(Map* map,
int to_trim = number_of_descriptors - number_of_own_descriptors;
if (to_trim > 0) {
heap_->RightTrimFixedArray(descriptors,
- to_trim * DescriptorArray::kDescriptorSize);
+ to_trim * DescriptorArray::kEntrySize);
descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
if (descriptors->HasEnumCache()) TrimEnumCache(map, descriptors);
@@ -2793,11 +2754,11 @@ void MarkCompactCollector::ProcessWeakCollections() {
while (weak_collection_obj != Smi::kZero) {
JSWeakCollection* weak_collection =
reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
- DCHECK(MarkCompactCollector::IsMarked(weak_collection));
+ DCHECK(ObjectMarking::IsBlackOrGrey(weak_collection));
if (weak_collection->table()->IsHashTable()) {
ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
for (int i = 0; i < table->Capacity(); i++) {
- if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
+ if (ObjectMarking::IsBlackOrGrey(HeapObject::cast(table->KeyAt(i)))) {
Object** key_slot =
table->RawFieldOfElementAt(ObjectHashTable::EntryToIndex(i));
RecordSlot(table, key_slot, *key_slot);
@@ -2819,12 +2780,12 @@ void MarkCompactCollector::ClearWeakCollections() {
while (weak_collection_obj != Smi::kZero) {
JSWeakCollection* weak_collection =
reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
- DCHECK(MarkCompactCollector::IsMarked(weak_collection));
+ DCHECK(ObjectMarking::IsBlackOrGrey(weak_collection));
if (weak_collection->table()->IsHashTable()) {
ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
for (int i = 0; i < table->Capacity(); i++) {
HeapObject* key = HeapObject::cast(table->KeyAt(i));
- if (!MarkCompactCollector::IsMarked(key)) {
+ if (!ObjectMarking::IsBlackOrGrey(key)) {
table->RemoveEntry(i);
}
}
@@ -2865,7 +2826,7 @@ void MarkCompactCollector::ClearWeakCells(Object** non_live_map_list,
// We do not insert cleared weak cells into the list, so the value
// cannot be a Smi here.
HeapObject* value = HeapObject::cast(weak_cell->value());
- if (!MarkCompactCollector::IsMarked(value)) {
+ if (!ObjectMarking::IsBlackOrGrey(value)) {
// Cells for new-space objects embedded in optimized code are wrapped in
// WeakCell and put into Heap::weak_object_to_code_table.
// Such cells do not have any strong references but we want to keep them
@@ -2874,10 +2835,9 @@ void MarkCompactCollector::ClearWeakCells(Object** non_live_map_list,
if (value->IsCell()) {
Object* cell_value = Cell::cast(value)->value();
if (cell_value->IsHeapObject() &&
- MarkCompactCollector::IsMarked(HeapObject::cast(cell_value))) {
+ ObjectMarking::IsBlackOrGrey(HeapObject::cast(cell_value))) {
// Resurrect the cell.
- MarkBit mark = ObjectMarking::MarkBitFrom(value);
- SetMark(value, mark);
+ ObjectMarking::WhiteToBlack(value);
Object** slot = HeapObject::RawField(value, Cell::kValueOffset);
RecordSlot(value, slot, *slot);
slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
@@ -3036,14 +2996,26 @@ static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
return String::cast(*p);
}
-void MarkCompactCollector::EvacuateNewSpacePrologue() {
+void MarkCompactCollector::EvacuatePrologue() {
+ // New space.
NewSpace* new_space = heap()->new_space();
// Append the list of new space pages to be processed.
for (Page* p : PageRange(new_space->bottom(), new_space->top())) {
- newspace_evacuation_candidates_.Add(p);
+ new_space_evacuation_pages_.Add(p);
}
new_space->Flip();
new_space->ResetAllocationInfo();
+
+ // Old space.
+ CHECK(old_space_evacuation_pages_.is_empty());
+ old_space_evacuation_pages_.Swap(&evacuation_candidates_);
+}
+
+void MarkCompactCollector::EvacuateEpilogue() {
+ // New space.
+ heap()->new_space()->set_age_mark(heap()->new_space()->top());
+ // Old space. Deallocate evacuated candidate pages.
+ ReleaseEvacuationCandidates();
}
class MarkCompactCollector::Evacuator : public Malloced {
@@ -3297,18 +3269,19 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
int abandoned_pages = 0;
intptr_t live_bytes = 0;
- for (Page* page : evacuation_candidates_) {
+ for (Page* page : old_space_evacuation_pages_) {
live_bytes += page->LiveBytes();
job.AddPage(page, &abandoned_pages);
}
const bool reduce_memory = heap()->ShouldReduceMemory();
const Address age_mark = heap()->new_space()->age_mark();
- for (Page* page : newspace_evacuation_candidates_) {
+ for (Page* page : new_space_evacuation_pages_) {
live_bytes += page->LiveBytes();
if (!reduce_memory && !page->NeverEvacuate() &&
(page->LiveBytes() > Evacuator::PageEvacuationThreshold()) &&
- !page->Contains(age_mark)) {
+ !page->Contains(age_mark) &&
+ heap()->CanExpandOldGeneration(page->LiveBytes())) {
if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
} else {
@@ -3419,7 +3392,7 @@ int MarkCompactCollector::Sweeper::RawSweep(
HeapObject* object = NULL;
while ((object = it.Next()) != NULL) {
- DCHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
+ DCHECK(ObjectMarking::IsBlack(object));
Address free_end = object->address();
if (free_end != free_start) {
CHECK_GT(free_end, free_start);
@@ -3509,8 +3482,7 @@ void MarkCompactCollector::InvalidateCode(Code* code) {
DCHECK(compacting_);
// If the object is white than no slots were recorded on it yet.
- MarkBit mark_bit = ObjectMarking::MarkBitFrom(code);
- if (Marking::IsWhite(mark_bit)) return;
+ if (ObjectMarking::IsWhite(code)) return;
// Ignore all slots that might have been recorded in the body of the
// deoptimized code object. Assumption: no slots will be recorded for
@@ -3531,11 +3503,16 @@ static void VerifyAllBlackObjects(MemoryChunk* page) {
LiveObjectIterator<kAllLiveObjects> it(page);
HeapObject* object = NULL;
while ((object = it.Next()) != NULL) {
- CHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
+ CHECK(ObjectMarking::IsBlack(object));
}
}
#endif // VERIFY_HEAP
+void MarkCompactCollector::RecordLiveSlotsOnPage(Page* page) {
+ EvacuateRecordOnlyVisitor visitor(heap());
+ VisitLiveObjects(page, &visitor, kKeepMarking);
+}
+
template <class Visitor>
bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page, Visitor* visitor,
IterationMode mode) {
@@ -3546,7 +3523,7 @@ bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page, Visitor* visitor,
LiveObjectIterator<kBlackObjects> it(page);
HeapObject* object = nullptr;
while ((object = it.Next()) != nullptr) {
- DCHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
+ DCHECK(ObjectMarking::IsBlack(object));
if (!visitor->Visit(object)) {
if (mode == kClearMarkbits) {
page->markbits()->ClearRange(
@@ -3594,18 +3571,23 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
Heap::RelocationLock relocation_lock(heap());
{
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_PROLOGUE);
+ EvacuatePrologue();
+ }
+
+ {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY);
EvacuationScope evacuation_scope(this);
-
- EvacuateNewSpacePrologue();
EvacuatePagesInParallel();
- heap()->new_space()->set_age_mark(heap()->new_space()->top());
}
UpdatePointersAfterEvacuation();
- if (!heap()->new_space()->Rebalance()) {
- FatalProcessOutOfMemory("NewSpace::Rebalance");
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_REBALANCE);
+ if (!heap()->new_space()->Rebalance()) {
+ FatalProcessOutOfMemory("NewSpace::Rebalance");
+ }
}
// Give pages that are queued to be freed back to the OS. Note that filtering
@@ -3617,7 +3599,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
- for (Page* p : newspace_evacuation_candidates_) {
+ for (Page* p : new_space_evacuation_pages_) {
if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
sweeper().AddPage(p->owner()->identity(), p);
@@ -3628,9 +3610,9 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
sweeper().AddPage(p->owner()->identity(), p);
}
}
- newspace_evacuation_candidates_.Rewind(0);
+ new_space_evacuation_pages_.Rewind(0);
- for (Page* p : evacuation_candidates_) {
+ for (Page* p : old_space_evacuation_pages_) {
// Important: skip list should be cleared only after roots were updated
// because root iteration traverses the stack and might have to find
// code objects from non-updated pc pointing into evacuation candidate.
@@ -3641,9 +3623,11 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
p->ClearFlag(Page::COMPACTION_WAS_ABORTED);
}
}
+ }
- // Deallocate evacuated candidate pages.
- ReleaseEvacuationCandidates();
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_EPILOGUE);
+ EvacuateEpilogue();
}
#ifdef VERIFY_HEAP
@@ -3672,7 +3656,7 @@ class PointerUpdateJobTraits {
private:
static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) {
if (direction == OLD_TO_NEW) {
- RememberedSet<OLD_TO_NEW>::Iterate(chunk, [heap, chunk](Address slot) {
+ RememberedSet<OLD_TO_NEW>::Iterate(chunk, [heap](Address slot) {
return CheckAndUpdateOldToNewSlot(heap, slot);
});
} else {
@@ -3743,8 +3727,7 @@ class PointerUpdateJobTraits {
// slot has been recorded multiple times in the remembered set. Since
// there is no forwarding information present we need to check the
// markbits to determine liveness.
- if (Marking::IsBlack(ObjectMarking::MarkBitFrom(
- reinterpret_cast<HeapObject*>(slot_reference))))
+ if (ObjectMarking::IsBlack(reinterpret_cast<HeapObject*>(slot_reference)))
return KEEP_SLOT;
} else {
DCHECK(!heap->InNewSpace(slot_reference));
@@ -3871,14 +3854,14 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
void MarkCompactCollector::ReleaseEvacuationCandidates() {
- for (Page* p : evacuation_candidates_) {
+ for (Page* p : old_space_evacuation_pages_) {
if (!p->IsEvacuationCandidate()) continue;
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
p->ResetLiveBytes();
CHECK(p->SweepingDone());
space->ReleasePage(p);
}
- evacuation_candidates_.Rewind(0);
+ old_space_evacuation_pages_.Rewind(0);
compacting_ = false;
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
}
@@ -3905,7 +3888,7 @@ int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
AllocationSpace identity) {
int max_freed = 0;
{
- base::LockGuard<base::Mutex> guard(page->mutex());
+ base::LockGuard<base::RecursiveMutex> guard(page->mutex());
// If this page was already swept in the meantime, we can return here.
if (page->SweepingDone()) return 0;
DCHECK_EQ(Page::kSweepingPending,
@@ -4078,8 +4061,7 @@ void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) {
Code* host =
isolate()->inner_pointer_to_code_cache()->GcSafeFindCodeForInnerPointer(
pc);
- MarkBit mark_bit = ObjectMarking::MarkBitFrom(host);
- if (Marking::IsBlack(mark_bit)) {
+ if (ObjectMarking::IsBlack(host)) {
RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
// The target is always in old space, we don't have to record the slot in
// the old-to-new remembered set.
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index 9952b7953d..86d0b9616b 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -46,6 +46,76 @@ class ObjectMarking : public AllStatic {
return Marking::Color(ObjectMarking::MarkBitFrom(obj));
}
+ V8_INLINE static bool IsImpossible(HeapObject* obj) {
+ return Marking::IsImpossible(MarkBitFrom(obj));
+ }
+
+ V8_INLINE static bool IsBlack(HeapObject* obj) {
+ return Marking::IsBlack(MarkBitFrom(obj));
+ }
+
+ V8_INLINE static bool IsWhite(HeapObject* obj) {
+ return Marking::IsWhite(MarkBitFrom(obj));
+ }
+
+ V8_INLINE static bool IsGrey(HeapObject* obj) {
+ return Marking::IsGrey(MarkBitFrom(obj));
+ }
+
+ V8_INLINE static bool IsBlackOrGrey(HeapObject* obj) {
+ return Marking::IsBlackOrGrey(MarkBitFrom(obj));
+ }
+
+ V8_INLINE static void ClearMarkBit(HeapObject* obj) {
+ Marking::MarkWhite(MarkBitFrom(obj));
+ }
+
+ V8_INLINE static void BlackToWhite(HeapObject* obj) {
+ DCHECK(IsBlack(obj));
+ MarkBit markbit = MarkBitFrom(obj);
+ Marking::BlackToWhite(markbit);
+ MemoryChunk::IncrementLiveBytes(obj, -obj->Size());
+ }
+
+ V8_INLINE static void GreyToWhite(HeapObject* obj) {
+ DCHECK(IsGrey(obj));
+ Marking::GreyToWhite(MarkBitFrom(obj));
+ }
+
+ V8_INLINE static void BlackToGrey(HeapObject* obj) {
+ DCHECK(IsBlack(obj));
+ MarkBit markbit = MarkBitFrom(obj);
+ Marking::BlackToGrey(markbit);
+ MemoryChunk::IncrementLiveBytes(obj, -obj->Size());
+ }
+
+ V8_INLINE static void WhiteToGrey(HeapObject* obj) {
+ DCHECK(IsWhite(obj));
+ Marking::WhiteToGrey(MarkBitFrom(obj));
+ }
+
+ V8_INLINE static void WhiteToBlack(HeapObject* obj) {
+ DCHECK(IsWhite(obj));
+ MarkBit markbit = MarkBitFrom(obj);
+ Marking::WhiteToBlack(markbit);
+ MemoryChunk::IncrementLiveBytes(obj, obj->Size());
+ }
+
+ V8_INLINE static void GreyToBlack(HeapObject* obj) {
+ DCHECK(IsGrey(obj));
+ MarkBit markbit = MarkBitFrom(obj);
+ Marking::GreyToBlack(markbit);
+ MemoryChunk::IncrementLiveBytes(obj, obj->Size());
+ }
+
+ V8_INLINE static void AnyToGrey(HeapObject* obj) {
+ MarkBit markbit = MarkBitFrom(obj);
+ if (Marking::IsBlack(markbit)) {
+ MemoryChunk::IncrementLiveBytes(obj, -obj->Size());
+ }
+ Marking::AnyToGrey(markbit);
+ }
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ObjectMarking);
};
@@ -446,7 +516,6 @@ class MarkCompactCollector {
static const uint32_t kSingleFreeEncoding = 0;
static const uint32_t kMultiFreeEncoding = 1;
- static inline bool IsMarked(Object* obj);
static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p);
inline Heap* heap() const { return heap_; }
@@ -471,6 +540,7 @@ class MarkCompactCollector {
INLINE(void RecordSlot(HeapObject* object, Object** slot, Object* target));
INLINE(void ForceRecordSlot(HeapObject* object, Object** slot,
Object* target));
+ void RecordLiveSlotsOnPage(Page* page);
void UpdateSlots(SlotsBuffer* buffer);
void UpdateSlotsRecordedIn(SlotsBuffer* buffer);
@@ -499,10 +569,6 @@ class MarkCompactCollector {
bool evacuation() const { return evacuation_; }
- // Special case for processing weak references in a full collection. We need
- // to artificially keep AllocationSites alive for a time.
- void MarkAllocationSite(AllocationSite* site);
-
// Mark objects in implicit references groups if their parent object
// is marked.
void MarkImplicitRefGroups(MarkObjectFunction mark_object);
@@ -591,11 +657,7 @@ class MarkCompactCollector {
// Marks the object black and pushes it on the marking stack.
// This is for non-incremental marking only.
- INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
-
- // Marks the object black assuming that it is not yet marked.
- // This is for non-incremental marking only.
- INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
+ INLINE(void MarkObject(HeapObject* obj));
// Mark the heap roots and all objects reachable from them.
void MarkRoots(RootMarkingVisitor<MarkCompactMode::FULL>* visitor);
@@ -692,8 +754,8 @@ class MarkCompactCollector {
void StartSweepSpaces();
void StartSweepSpace(PagedSpace* space);
- void EvacuateNewSpacePrologue();
-
+ void EvacuatePrologue();
+ void EvacuateEpilogue();
void EvacuatePagesInParallel();
// The number of parallel compaction tasks, including the main thread.
@@ -757,8 +819,11 @@ class MarkCompactCollector {
CodeFlusher* code_flusher_;
+ // Candidates for pages that should be evacuated.
List<Page*> evacuation_candidates_;
- List<Page*> newspace_evacuation_candidates_;
+ // Pages that are actually processed during evacuation.
+ List<Page*> old_space_evacuation_pages_;
+ List<Page*> new_space_evacuation_pages_;
Sweeper sweeper_;
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index 1b9cdf5ecc..50d6fcc2dd 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -4,6 +4,7 @@
#include "src/heap/object-stats.h"
+#include "src/assembler-inl.h"
#include "src/compilation-cache.h"
#include "src/counters.h"
#include "src/heap/heap-inl.h"
@@ -330,7 +331,6 @@ static bool CanRecordFixedArray(Heap* heap, FixedArrayBase* array) {
array->map() != heap->fixed_double_array_map() &&
array != heap->empty_fixed_array() &&
array != heap->empty_byte_array() &&
- array != heap->empty_literals_array() &&
array != heap->empty_sloppy_arguments_elements() &&
array != heap->empty_slow_element_dictionary() &&
array != heap->empty_descriptor_array() &&
@@ -551,34 +551,15 @@ void ObjectStatsCollector::RecordSharedFunctionInfoDetails(
RecordFixedArrayHelper(sfi, optimized_code_map, OPTIMIZED_CODE_MAP_SUB_TYPE,
0);
// Optimized code map should be small, so skip accounting.
- int len = optimized_code_map->length();
- for (int i = SharedFunctionInfo::kEntriesStart; i < len;
- i += SharedFunctionInfo::kEntryLength) {
- Object* slot =
- optimized_code_map->get(i + SharedFunctionInfo::kLiteralsOffset);
- LiteralsArray* literals = nullptr;
- if (slot->IsWeakCell()) {
- WeakCell* cell = WeakCell::cast(slot);
- if (!cell->cleared()) {
- literals = LiteralsArray::cast(cell->value());
- }
- } else {
- literals = LiteralsArray::cast(slot);
- }
- if (literals != nullptr) {
- RecordFixedArrayHelper(sfi, literals, LITERALS_ARRAY_SUB_TYPE, 0);
- RecordFixedArrayHelper(sfi, literals->feedback_vector(),
- FEEDBACK_VECTOR_SUB_TYPE, 0);
- }
- }
}
}
void ObjectStatsCollector::RecordJSFunctionDetails(JSFunction* function) {
- LiteralsArray* literals = function->literals();
- RecordFixedArrayHelper(function, literals, LITERALS_ARRAY_SUB_TYPE, 0);
- RecordFixedArrayHelper(function, literals->feedback_vector(),
- FEEDBACK_VECTOR_SUB_TYPE, 0);
+ if (function->feedback_vector_cell()->value()->IsFeedbackVector()) {
+ FeedbackVector* feedback_vector = function->feedback_vector();
+ RecordFixedArrayHelper(function, feedback_vector, FEEDBACK_VECTOR_SUB_TYPE,
+ 0);
+ }
}
void ObjectStatsCollector::RecordFixedArrayDetails(FixedArray* array) {
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index d86406bf5f..493dce7179 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_VISITING_INL_H_
#include "src/heap/array-buffer-tracker.h"
+#include "src/heap/mark-compact.h"
#include "src/heap/objects-visiting.h"
#include "src/ic/ic-state.h"
#include "src/macro-assembler.h"
@@ -31,6 +32,10 @@ void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
kVisitConsString,
&FixedBodyVisitor<StaticVisitor, ConsString::BodyDescriptor, int>::Visit);
+ table_.Register(
+ kVisitThinString,
+ &FixedBodyVisitor<StaticVisitor, ThinString::BodyDescriptor, int>::Visit);
+
table_.Register(kVisitSlicedString,
&FixedBodyVisitor<StaticVisitor, SlicedString::BodyDescriptor,
int>::Visit);
@@ -117,6 +122,10 @@ void StaticMarkingVisitor<StaticVisitor>::Initialize() {
&FixedBodyVisitor<StaticVisitor, ConsString::BodyDescriptor,
void>::Visit);
+ table_.Register(kVisitThinString,
+ &FixedBodyVisitor<StaticVisitor, ThinString::BodyDescriptor,
+ void>::Visit);
+
table_.Register(kVisitSlicedString,
&FixedBodyVisitor<StaticVisitor, SlicedString::BodyDescriptor,
void>::Visit);
@@ -259,16 +268,6 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCodeTarget(Heap* heap,
RelocInfo* rinfo) {
DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- // Monomorphic ICs are preserved when possible, but need to be flushed
- // when they might be keeping a Context alive, or when the heap is about
- // to be serialized.
- if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub() &&
- (heap->isolate()->serializer_enabled() ||
- target->ic_age() != heap->global_ic_age())) {
- ICUtility::Clear(heap->isolate(), rinfo->pc(),
- rinfo->host()->constant_pool());
- target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- }
Code* host = rinfo->host();
heap->mark_compact_collector()->RecordRelocSlot(host, rinfo, target);
StaticVisitor::MarkObject(heap, target);
@@ -334,7 +333,7 @@ void StaticMarkingVisitor<StaticVisitor>::VisitWeakCell(Map* map,
// contain smi zero.
if (weak_cell->next_cleared() && !weak_cell->cleared()) {
HeapObject* value = HeapObject::cast(weak_cell->value());
- if (MarkCompactCollector::IsMarked(value)) {
+ if (ObjectMarking::IsBlackOrGrey(value)) {
// Weak cells with live values are directly processed here to reduce
// the processing time of weak cells during the main GC pause.
Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
@@ -453,9 +452,6 @@ void StaticMarkingVisitor<StaticVisitor>::VisitJSFunction(Map* map,
HeapObject* object) {
Heap* heap = map->GetHeap();
JSFunction* function = JSFunction::cast(object);
- if (FLAG_cleanup_code_caches_at_gc) {
- function->ClearTypeFeedbackInfoAtGCTime();
- }
MarkCompactCollector* collector = heap->mark_compact_collector();
if (collector->is_code_flushing_enabled()) {
if (IsFlushable(heap, function)) {
@@ -526,8 +522,7 @@ bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(Heap* heap,
// Code is either on stack, in compilation cache or referenced
// by optimized version of function.
- MarkBit code_mark = ObjectMarking::MarkBitFrom(function->code());
- if (Marking::IsBlackOrGrey(code_mark)) {
+ if (ObjectMarking::IsBlackOrGrey(function->code())) {
return false;
}
@@ -550,8 +545,7 @@ bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(
Heap* heap, SharedFunctionInfo* shared_info) {
// Code is either on stack, in compilation cache or referenced
// by optimized version of function.
- MarkBit code_mark = ObjectMarking::MarkBitFrom(shared_info->code());
- if (Marking::IsBlackOrGrey(code_mark)) {
+ if (ObjectMarking::IsBlackOrGrey(shared_info->code())) {
return false;
}
diff --git a/deps/v8/src/heap/objects-visiting.cc b/deps/v8/src/heap/objects-visiting.cc
index 146aa58675..a0df1f50c0 100644
--- a/deps/v8/src/heap/objects-visiting.cc
+++ b/deps/v8/src/heap/objects-visiting.cc
@@ -4,6 +4,7 @@
#include "src/heap/objects-visiting.h"
+#include "src/heap/heap-inl.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/objects-visiting-inl.h"
@@ -41,6 +42,9 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case kExternalStringTag:
return GetVisitorIdForSize(kVisitDataObject, kVisitDataObjectGeneric,
instance_size, has_unboxed_fields);
+
+ case kThinStringTag:
+ return kVisitThinString;
}
UNREACHABLE();
}
@@ -105,6 +109,7 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case JS_OBJECT_TYPE:
case JS_ERROR_TYPE:
case JS_ARGUMENTS_TYPE:
+ case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
case JS_MODULE_NAMESPACE_TYPE:
@@ -177,7 +182,6 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case FOREIGN_TYPE:
case HEAP_NUMBER_TYPE:
case MUTABLE_HEAP_NUMBER_TYPE:
- case SIMD128_VALUE_TYPE:
return GetVisitorIdForSize(kVisitDataObject, kVisitDataObjectGeneric,
instance_size, has_unboxed_fields);
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index e35e47c3aa..f10f370314 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -79,6 +79,7 @@ class StaticVisitorBase : public AllStatic {
V(StructGeneric) \
V(ConsString) \
V(SlicedString) \
+ V(ThinString) \
V(Symbol) \
V(Oddball) \
V(Code) \
diff --git a/deps/v8/src/heap/remembered-set.h b/deps/v8/src/heap/remembered-set.h
index cf17a46821..bdb5bfc9a0 100644
--- a/deps/v8/src/heap/remembered-set.h
+++ b/deps/v8/src/heap/remembered-set.h
@@ -14,6 +14,7 @@ namespace v8 {
namespace internal {
enum PointerDirection { OLD_TO_OLD, OLD_TO_NEW };
+enum RememberedSetIterationMode { SYNCHRONIZED, NON_SYNCHRONIZED };
// TODO(ulan): Investigate performance of de-templatizing this class.
template <PointerDirection direction>
@@ -100,9 +101,13 @@ class RememberedSet : public AllStatic {
// Iterates and filters the remembered set with the given callback.
// The callback should take (Address slot) and return SlotCallbackResult.
template <typename Callback>
- static void Iterate(Heap* heap, Callback callback) {
- IterateMemoryChunks(
- heap, [callback](MemoryChunk* chunk) { Iterate(chunk, callback); });
+ static void Iterate(Heap* heap, RememberedSetIterationMode mode,
+ Callback callback) {
+ IterateMemoryChunks(heap, [mode, callback](MemoryChunk* chunk) {
+ if (mode == SYNCHRONIZED) chunk->mutex()->Lock();
+ Iterate(chunk, callback);
+ if (mode == SYNCHRONIZED) chunk->mutex()->Unlock();
+ });
}
// Iterates over all memory chunks that contains non-empty slot sets.
@@ -180,9 +185,12 @@ class RememberedSet : public AllStatic {
// The callback should take (SlotType slot_type, SlotAddress slot) and return
// SlotCallbackResult.
template <typename Callback>
- static void IterateTyped(Heap* heap, Callback callback) {
- IterateMemoryChunks(heap, [callback](MemoryChunk* chunk) {
+ static void IterateTyped(Heap* heap, RememberedSetIterationMode mode,
+ Callback callback) {
+ IterateMemoryChunks(heap, [mode, callback](MemoryChunk* chunk) {
+ if (mode == SYNCHRONIZED) chunk->mutex()->Lock();
IterateTyped(chunk, callback);
+ if (mode == SYNCHRONIZED) chunk->mutex()->Unlock();
});
}
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index f2722e81de..c4c3e8bd13 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -5,11 +5,13 @@
#include "src/heap/scavenger.h"
#include "src/contexts.h"
-#include "src/heap/heap.h"
+#include "src/heap/heap-inl.h"
+#include "src/heap/incremental-marking.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/scavenger-inl.h"
#include "src/isolate.h"
#include "src/log.h"
+#include "src/profiler/heap-profiler.h"
namespace v8 {
namespace internal {
@@ -30,6 +32,7 @@ class ScavengingVisitor : public StaticVisitorBase {
table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
+ table_.Register(kVisitThinString, &EvacuateThinString);
table_.Register(kVisitByteArray, &EvacuateByteArray);
table_.Register(kVisitFixedArray, &EvacuateFixedArray);
table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
@@ -89,6 +92,12 @@ class ScavengingVisitor : public StaticVisitorBase {
return &table_;
}
+ static void EvacuateThinStringNoShortcut(Map* map, HeapObject** slot,
+ HeapObject* object) {
+ EvacuateObject<POINTER_OBJECT, kWordAligned>(map, slot, object,
+ ThinString::kSize);
+ }
+
private:
enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
@@ -193,9 +202,8 @@ class ScavengingVisitor : public StaticVisitorBase {
reinterpret_cast<base::AtomicWord>(target));
if (object_contents == POINTER_OBJECT) {
- heap->promotion_queue()->insert(
- target, object_size,
- Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
+ heap->promotion_queue()->insert(target, object_size,
+ ObjectMarking::IsBlack(object));
}
heap->IncrementPromotedObjectsSize(object_size);
return true;
@@ -239,8 +247,7 @@ class ScavengingVisitor : public StaticVisitorBase {
DCHECK(map_word.IsForwardingAddress());
HeapObject* target = map_word.ToForwardingAddress();
- MarkBit mark_bit = ObjectMarking::MarkBitFrom(target);
- if (Marking::IsBlack(mark_bit)) {
+ if (ObjectMarking::IsBlack(target)) {
// This object is black and it might not be rescanned by marker.
// We should explicitly record code entry slot for compaction because
// promotion queue processing (IteratePromotedObjectPointers) will
@@ -339,6 +346,22 @@ class ScavengingVisitor : public StaticVisitorBase {
object_size);
}
+ static inline void EvacuateThinString(Map* map, HeapObject** slot,
+ HeapObject* object) {
+ if (marks_handling == IGNORE_MARKS) {
+ HeapObject* actual = ThinString::cast(object)->actual();
+ *slot = actual;
+ // ThinStrings always refer to internalized strings, which are
+ // always in old space.
+ DCHECK(!map->GetHeap()->InNewSpace(actual));
+ object->set_map_word(MapWord::FromForwardingAddress(actual));
+ return;
+ }
+
+ EvacuateObject<POINTER_OBJECT, kWordAligned>(map, slot, object,
+ ThinString::kSize);
+ }
+
template <ObjectContents object_contents>
class ObjectEvacuationStrategy {
public:
@@ -423,6 +446,10 @@ void Scavenger::SelectScavengingVisitorsTable() {
StaticVisitorBase::kVisitShortcutCandidate,
scavenging_visitors_table_.GetVisitorById(
StaticVisitorBase::kVisitConsString));
+ scavenging_visitors_table_.Register(
+ StaticVisitorBase::kVisitThinString,
+ &ScavengingVisitor<TRANSFER_MARKS, LOGGING_AND_PROFILING_DISABLED>::
+ EvacuateThinStringNoShortcut);
}
}
}
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index 2079a80a0b..62d1b620c0 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -208,8 +208,9 @@ Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
}
Page* Page::ConvertNewToOld(Page* old_page) {
- OldSpace* old_space = old_page->heap()->old_space();
+ DCHECK(!old_page->is_anchor());
DCHECK(old_page->InNewSpace());
+ OldSpace* old_space = old_page->heap()->old_space();
old_page->set_owner(old_space);
old_page->SetFlags(0, ~0);
old_space->AccountCommitted(old_page->size());
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index 8d98520d43..c35864ed2f 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -9,11 +9,15 @@
#include "src/base/bits.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/semaphore.h"
+#include "src/counters.h"
#include "src/full-codegen/full-codegen.h"
#include "src/heap/array-buffer-tracker.h"
+#include "src/heap/incremental-marking.h"
+#include "src/heap/mark-compact.h"
#include "src/heap/slot-set.h"
#include "src/macro-assembler.h"
#include "src/msan.h"
+#include "src/objects-inl.h"
#include "src/snapshot/snapshot.h"
#include "src/v8.h"
@@ -531,7 +535,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->progress_bar_ = 0;
chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
chunk->concurrent_sweeping_state().SetValue(kSweepingDone);
- chunk->mutex_ = new base::Mutex();
+ chunk->mutex_ = new base::RecursiveMutex();
chunk->available_in_free_list_ = 0;
chunk->wasted_memory_ = 0;
chunk->ResetLiveBytes();
@@ -792,7 +796,7 @@ void Page::ResetFreeListStatistics() {
size_t Page::AvailableInFreeList() {
size_t sum = 0;
- ForAllFreeListCategories([this, &sum](FreeListCategory* category) {
+ ForAllFreeListCategories([&sum](FreeListCategory* category) {
sum += category->available();
});
return sum;
@@ -1477,7 +1481,7 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
// All the interior pointers should be contained in the heap.
int size = object->Size();
object->IterateBody(map->instance_type(), size, visitor);
- if (Marking::IsBlack(ObjectMarking::MarkBitFrom(object))) {
+ if (ObjectMarking::IsBlack(object)) {
black_size += size;
}
@@ -1602,6 +1606,9 @@ bool SemiSpace::EnsureCurrentCapacity() {
// Make sure we don't overtake the actual top pointer.
CHECK_NE(to_remove, current_page_);
to_remove->Unlink();
+ // Clear new space flags to avoid this page being treated as a new
+ // space page that is potentially being swept.
+ to_remove->SetFlags(0, Page::kIsInNewSpaceMask);
heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
to_remove);
}
@@ -2654,7 +2661,7 @@ HeapObject* FreeList::Allocate(size_t size_in_bytes) {
size_t FreeList::EvictFreeListItems(Page* page) {
size_t sum = 0;
page->ForAllFreeListCategories(
- [this, &sum, page](FreeListCategory* category) {
+ [this, &sum](FreeListCategory* category) {
DCHECK_EQ(this, category->owner());
sum += category->available();
RemoveCategory(category);
@@ -3008,7 +3015,8 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
AllocationStep(object->address(), object_size);
if (heap()->incremental_marking()->black_allocation()) {
- Marking::MarkBlack(ObjectMarking::MarkBitFrom(object));
+ // We cannot use ObjectMarking here as the object still lacks a size.
+ Marking::WhiteToBlack(ObjectMarking::MarkBitFrom(object));
MemoryChunk::IncrementLiveBytes(object, object_size);
}
return object;
@@ -3057,9 +3065,8 @@ void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
LargePage* current = first_page_;
while (current != NULL) {
HeapObject* object = current->GetObject();
- MarkBit mark_bit = ObjectMarking::MarkBitFrom(object);
- DCHECK(Marking::IsBlack(mark_bit));
- Marking::BlackToWhite(mark_bit);
+ DCHECK(ObjectMarking::IsBlack(object));
+ ObjectMarking::ClearMarkBit(object);
Page::FromAddress(object->address())->ResetProgressBar();
Page::FromAddress(object->address())->ResetLiveBytes();
current = current->next_page();
@@ -3104,9 +3111,8 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
LargePage* current = first_page_;
while (current != NULL) {
HeapObject* object = current->GetObject();
- MarkBit mark_bit = ObjectMarking::MarkBitFrom(object);
- DCHECK(!Marking::IsGrey(mark_bit));
- if (Marking::IsBlack(mark_bit)) {
+ DCHECK(!ObjectMarking::IsGrey(object));
+ if (ObjectMarking::IsBlack(object)) {
Address free_start;
if ((free_start = current->GetAddressToShrink()) != 0) {
// TODO(hpayer): Perform partial free concurrently.
@@ -3174,11 +3180,13 @@ void LargeObjectSpace::Verify() {
// We have only code, sequential strings, external strings
// (sequential strings that have been morphed into external
- // strings), fixed arrays, byte arrays, and constant pool arrays in the
- // large object space.
+ // strings), thin strings (sequential strings that have been
+ // morphed into thin strings), fixed arrays, byte arrays, and
+ // constant pool arrays in the large object space.
CHECK(object->IsAbstractCode() || object->IsSeqString() ||
- object->IsExternalString() || object->IsFixedArray() ||
- object->IsFixedDoubleArray() || object->IsByteArray());
+ object->IsExternalString() || object->IsThinString() ||
+ object->IsFixedArray() || object->IsFixedDoubleArray() ||
+ object->IsByteArray());
// The object itself should look OK.
object->ObjectVerify();
@@ -3239,7 +3247,7 @@ void Page::Print() {
unsigned mark_size = 0;
for (HeapObject* object = objects.Next(); object != NULL;
object = objects.Next()) {
- bool is_marked = Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(object));
+ bool is_marked = ObjectMarking::IsBlackOrGrey(object);
PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little.
if (is_marked) {
mark_size += object->Size();
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index 48551fa264..4bb2db69d0 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -339,7 +339,7 @@ class MemoryChunk {
+ kPointerSize // TypedSlotSet* typed_old_to_old_slots_
+ kPointerSize // SkipList* skip_list_
+ kPointerSize // AtomicValue high_water_mark_
- + kPointerSize // base::Mutex* mutex_
+ + kPointerSize // base::RecursiveMutex* mutex_
+ kPointerSize // base::AtomicWord concurrent_sweeping_
+ 2 * kSizetSize // AtomicNumber free-list statistics
+ kPointerSize // AtomicValue next_chunk_
@@ -397,7 +397,7 @@ class MemoryChunk {
Address address() { return reinterpret_cast<Address>(this); }
- base::Mutex* mutex() { return mutex_; }
+ base::RecursiveMutex* mutex() { return mutex_; }
bool Contains(Address addr) {
return addr >= area_start() && addr < area_end();
@@ -626,7 +626,7 @@ class MemoryChunk {
// count highest number of bytes ever allocated on the page.
base::AtomicValue<intptr_t> high_water_mark_;
- base::Mutex* mutex_;
+ base::RecursiveMutex* mutex_;
base::AtomicValue<ConcurrentSweepingState> concurrent_sweeping_;
@@ -1108,7 +1108,10 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
explicit Unmapper(MemoryAllocator* allocator)
: allocator_(allocator),
pending_unmapping_tasks_semaphore_(0),
- concurrent_unmapping_tasks_active_(0) {}
+ concurrent_unmapping_tasks_active_(0) {
+ chunks_[kRegular].reserve(kReservedQueueingSlots);
+ chunks_[kPooled].reserve(kReservedQueueingSlots);
+ }
void AddMemoryChunkSafe(MemoryChunk* chunk) {
if ((chunk->size() == Page::kPageSize) &&
@@ -1141,6 +1144,8 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
void TearDown();
private:
+ static const int kReservedQueueingSlots = 64;
+
enum ChunkQueueType {
kRegular, // Pages of kPageSize that do not live in a CodeRange and
// can thus be used for stealing.
@@ -1169,8 +1174,8 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
MemoryChunk* GetMemoryChunkSafe() {
base::LockGuard<base::Mutex> guard(&mutex_);
if (chunks_[type].empty()) return nullptr;
- MemoryChunk* chunk = chunks_[type].front();
- chunks_[type].pop_front();
+ MemoryChunk* chunk = chunks_[type].back();
+ chunks_[type].pop_back();
return chunk;
}
@@ -1180,7 +1185,7 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
base::Mutex mutex_;
MemoryAllocator* allocator_;
- std::list<MemoryChunk*> chunks_[kNumberOfChunkQueues];
+ std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues];
// Delayed chunks cannot be processed in the current unmapping cycle because
// of dependencies such as an active sweeper.
// See MemoryAllocator::CanFreeMemoryChunk.
@@ -2456,34 +2461,24 @@ class NewSpace : public Space {
}
size_t AllocatedSinceLastGC() {
- bool seen_age_mark = false;
- Address age_mark = to_space_.age_mark();
- Page* current_page = to_space_.first_page();
- Page* age_mark_page = Page::FromAddress(age_mark);
- Page* last_page = Page::FromAddress(top() - kPointerSize);
- if (age_mark_page == last_page) {
- if (top() - age_mark >= 0) {
- return top() - age_mark;
- }
- // Top was reset at some point, invalidating this metric.
- return 0;
- }
- while (current_page != last_page) {
- if (current_page == age_mark_page) {
- seen_age_mark = true;
- break;
- }
+ const Address age_mark = to_space_.age_mark();
+ DCHECK_NOT_NULL(age_mark);
+ DCHECK_NOT_NULL(top());
+ Page* const age_mark_page = Page::FromAllocationAreaAddress(age_mark);
+ Page* const last_page = Page::FromAllocationAreaAddress(top());
+ Page* current_page = age_mark_page;
+ size_t allocated = 0;
+ if (current_page != last_page) {
+ DCHECK_EQ(current_page, age_mark_page);
+ DCHECK_GE(age_mark_page->area_end(), age_mark);
+ allocated += age_mark_page->area_end() - age_mark;
current_page = current_page->next_page();
+ } else {
+ DCHECK_GE(top(), age_mark);
+ return top() - age_mark;
}
- if (!seen_age_mark) {
- // Top was reset at some point, invalidating this metric.
- return 0;
- }
- DCHECK_GE(age_mark_page->area_end(), age_mark);
- size_t allocated = age_mark_page->area_end() - age_mark;
- DCHECK_EQ(current_page, age_mark_page);
- current_page = age_mark_page->next_page();
while (current_page != last_page) {
+ DCHECK_NE(current_page, age_mark_page);
allocated += Page::kAllocatableMemory;
current_page = current_page->next_page();
}
diff --git a/deps/v8/src/heap/store-buffer.cc b/deps/v8/src/heap/store-buffer.cc
index 94a8ca81b7..8eb943f3ea 100644
--- a/deps/v8/src/heap/store-buffer.cc
+++ b/deps/v8/src/heap/store-buffer.cc
@@ -105,7 +105,6 @@ void StoreBuffer::MoveEntriesToRememberedSet(int index) {
DCHECK_LT(index, kStoreBuffers);
for (Address* current = start_[index]; current < lazy_top_[index];
current++) {
- DCHECK(!heap_->code_space()->Contains(*current));
Address addr = *current;
Page* page = Page::FromAnyPointerAddress(heap_, addr);
if (IsDeletionAddress(addr)) {
diff --git a/deps/v8/src/heap/store-buffer.h b/deps/v8/src/heap/store-buffer.h
index be46cb3242..0ade9e020f 100644
--- a/deps/v8/src/heap/store-buffer.h
+++ b/deps/v8/src/heap/store-buffer.h
@@ -212,8 +212,8 @@ class StoreBuffer {
// Callbacks are more efficient than reading out the gc state for every
// store buffer operation.
- std::function<void(StoreBuffer*, Address)> insertion_callback;
- std::function<void(StoreBuffer*, Address, Address)> deletion_callback;
+ void (*insertion_callback)(StoreBuffer*, Address);
+ void (*deletion_callback)(StoreBuffer*, Address, Address);
};
} // namespace internal
diff --git a/deps/v8/src/i18n.cc b/deps/v8/src/i18n.cc
index d2245ef34a..7c22871ff5 100644
--- a/deps/v8/src/i18n.cc
+++ b/deps/v8/src/i18n.cc
@@ -30,8 +30,13 @@
#include "unicode/ucol.h"
#include "unicode/ucurr.h"
#include "unicode/unum.h"
+#include "unicode/uvernum.h"
#include "unicode/uversion.h"
+#if U_ICU_VERSION_MAJOR_NUM >= 59
+#include "unicode/char16ptr.h"
+#endif
+
namespace v8 {
namespace internal {
@@ -270,8 +275,13 @@ icu::DecimalFormat* CreateICUNumberFormat(
}
UErrorCode status_digits = U_ZERO_ERROR;
+#if U_ICU_VERSION_MAJOR_NUM >= 59
uint32_t fraction_digits = ucurr_getDefaultFractionDigits(
- currency.getTerminatedBuffer(), &status_digits);
+ icu::toUCharPtr(currency.getTerminatedBuffer()), &status_digits);
+#else
+ uint32_t fraction_digits = ucurr_getDefaultFractionDigits(
+ currency.getTerminatedBuffer(), &status_digits);
+#endif
if (U_SUCCESS(status_digits)) {
number_format->setMinimumFractionDigits(fraction_digits);
number_format->setMaximumFractionDigits(fraction_digits);
diff --git a/deps/v8/src/i18n.h b/deps/v8/src/i18n.h
index a87ac97663..f89d005b00 100644
--- a/deps/v8/src/i18n.h
+++ b/deps/v8/src/i18n.h
@@ -6,7 +6,6 @@
#ifndef V8_I18N_H_
#define V8_I18N_H_
-#include "src/handles.h"
#include "src/objects.h"
#include "unicode/uversion.h"
@@ -20,6 +19,9 @@ class SimpleDateFormat;
namespace v8 {
namespace internal {
+template <typename T>
+class Handle;
+
class DateFormat {
public:
// Create a formatter for the specificied locale and options. Returns the
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index 281c3ef932..de5fc6b53e 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -41,6 +41,7 @@
#include "src/assembler.h"
#include "src/debug/debug.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -446,6 +447,17 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
}
}
+Address Assembler::target_address_at(Address pc, Code* code) {
+ Address constant_pool = code ? code->constant_pool() : NULL;
+ return target_address_at(pc, constant_pool);
+}
+
+void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
+ Address target,
+ ICacheFlushMode icache_flush_mode) {
+ Address constant_pool = code ? code->constant_pool() : NULL;
+ set_target_address_at(isolate, pc, constant_pool, target);
+}
Address Assembler::target_address_from_return_address(Address pc) {
return pc - kCallTargetAddressOffset;
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index 021177478d..9bbf6f4cc4 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -835,7 +835,7 @@ void Assembler::cmpb(Register reg, const Operand& op) {
void Assembler::cmpw(const Operand& op, Immediate imm16) {
- DCHECK(imm16.is_int16());
+ DCHECK(imm16.is_int16() || imm16.is_uint16());
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x81);
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index ddee696162..a4bc98d114 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -148,6 +148,7 @@ GENERAL_REGISTERS(DECLARE_REGISTER)
const Register no_reg = {Register::kCode_no_reg};
static const bool kSimpleFPAliasing = true;
+static const bool kSimdMaskRegisters = false;
struct XMMRegister {
enum Code {
@@ -498,16 +499,10 @@ class Assembler : public AssemblerBase {
inline static void set_target_address_at(
Isolate* isolate, Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
- static inline Address target_address_at(Address pc, Code* code) {
- Address constant_pool = code ? code->constant_pool() : NULL;
- return target_address_at(pc, constant_pool);
- }
+ static inline Address target_address_at(Address pc, Code* code);
static inline void set_target_address_at(
Isolate* isolate, Address pc, Code* code, Address target,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) {
- Address constant_pool = code ? code->constant_pool() : NULL;
- set_target_address_at(isolate, pc, constant_pool, target);
- }
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index bfe44534ab..1320d90e8b 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -462,57 +462,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ ret(0);
}
-void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
- Label miss;
- Register receiver = LoadDescriptor::ReceiverRegister();
- // With careful management, we won't have to save slot and vector on
- // the stack. Simply handle the possibly missing case first.
- // TODO(mvstanton): this code can be more efficient.
- __ cmp(FieldOperand(receiver, JSFunction::kPrototypeOrInitialMapOffset),
- Immediate(isolate()->factory()->the_hole_value()));
- __ j(equal, &miss);
- __ TryGetFunctionPrototype(receiver, eax, ebx, &miss);
- __ ret(0);
-
- __ bind(&miss);
- PropertyAccessCompiler::TailCallBuiltin(
- masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
-}
-
-
-void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
- // Return address is on the stack.
- Label miss;
-
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register index = LoadDescriptor::NameRegister();
- Register scratch = edi;
- DCHECK(!scratch.is(receiver) && !scratch.is(index));
- Register result = eax;
- DCHECK(!result.is(scratch));
- DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
- result.is(LoadDescriptor::SlotRegister()));
-
- // StringCharAtGenerator doesn't use the result register until it's passed
- // the different miss possibilities. If it did, we would have a conflict
- // when FLAG_vector_ics is true.
- StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
- &miss, // When not a string.
- &miss, // When not a number.
- &miss, // When index out of range.
- RECEIVER_IS_STRING);
- char_at_generator.GenerateFast(masm);
- __ ret(0);
-
- StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
-
- __ bind(&miss);
- PropertyAccessCompiler::TailCallBuiltin(
- masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
-}
-
-
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
@@ -604,7 +553,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// (8) Is the external string one byte? If yes, go to (5).
// (9) Two byte sequential. Load regexp code for two byte. Go to (E).
// (10) Short external string or not a string? If yes, bail out to runtime.
- // (11) Sliced string. Replace subject with parent. Go to (1).
+ // (11) Sliced or thin string. Replace subject with parent. Go to (1).
Label seq_one_byte_string /* 5 */, seq_two_byte_string /* 9 */,
external_string /* 7 */, check_underlying /* 1 */,
@@ -634,6 +583,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// have already been covered.
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+ STATIC_ASSERT(kThinStringTag > kExternalStringTag);
STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
__ cmp(ebx, Immediate(kExternalStringTag));
@@ -912,11 +862,18 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ test(ebx, Immediate(kIsNotStringMask | kShortExternalStringTag));
__ j(not_zero, &runtime);
- // (11) Sliced string. Replace subject with parent. Go to (1).
+ // (11) Sliced or thin string. Replace subject with parent. Go to (1).
+ Label thin_string;
+ __ cmp(ebx, Immediate(kThinStringTag));
+ __ j(equal, &thin_string, Label::kNear);
// Load offset into edi and replace subject string with parent.
__ mov(edi, FieldOperand(eax, SlicedString::kOffsetOffset));
__ mov(eax, FieldOperand(eax, SlicedString::kParentOffset));
__ jmp(&check_underlying); // Go to (1).
+
+ __ bind(&thin_string);
+ __ mov(eax, FieldOperand(eax, ThinString::kActualOffset));
+ __ jmp(&check_underlying); // Go to (1).
#endif // V8_INTERPRETED_REGEXP
}
@@ -1016,9 +973,6 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmpb(ecx, Immediate(SYMBOL_TYPE));
__ j(equal, &runtime_call, Label::kFar);
- // Call runtime on identical SIMD values since we must throw a TypeError.
- __ cmpb(ecx, Immediate(SIMD128_VALUE_TYPE));
- __ j(equal, &runtime_call, Label::kFar);
}
__ Move(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0);
@@ -1399,206 +1353,6 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
-static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
- Register slot) {
- __ add(FieldOperand(feedback_vector, slot, times_half_pointer_size,
- FixedArray::kHeaderSize + kPointerSize),
- Immediate(Smi::FromInt(1)));
-}
-
-void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
- // eax - number of arguments
- // edi - function
- // edx - slot id
- // ebx - vector
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
- __ cmp(edi, ecx);
- __ j(not_equal, miss);
-
- // Reload ecx.
- __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
- FixedArray::kHeaderSize));
-
- // Increment the call count for monomorphic function calls.
- IncrementCallCount(masm, ebx, edx);
-
- __ mov(ebx, ecx);
- __ mov(edx, edi);
- ArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
-
- // Unreachable.
-}
-
-
-void CallICStub::Generate(MacroAssembler* masm) {
- // edi - number of arguments
- // edi - function
- // edx - slot id
- // ebx - vector
- Isolate* isolate = masm->isolate();
- Label extra_checks_or_miss, call, call_function, call_count_incremented;
-
- // The checks. First, does edi match the recorded monomorphic target?
- __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
- FixedArray::kHeaderSize));
-
- // We don't know that we have a weak cell. We might have a private symbol
- // or an AllocationSite, but the memory is safe to examine.
- // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
- // FixedArray.
- // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
- // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
- // computed, meaning that it can't appear to be a pointer. If the low bit is
- // 0, then hash is computed, but the 0 bit prevents the field from appearing
- // to be a pointer.
- STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
- STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
- WeakCell::kValueOffset &&
- WeakCell::kValueOffset == Symbol::kHashFieldSlot);
-
- __ cmp(edi, FieldOperand(ecx, WeakCell::kValueOffset));
- __ j(not_equal, &extra_checks_or_miss);
-
- // The compare above could have been a SMI/SMI comparison. Guard against this
- // convincing us that we have a monomorphic JSFunction.
- __ JumpIfSmi(edi, &extra_checks_or_miss);
-
- __ bind(&call_function);
-
- // Increment the call count for monomorphic function calls.
- IncrementCallCount(masm, ebx, edx);
-
- __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
- tail_call_mode()),
- RelocInfo::CODE_TARGET);
-
- __ bind(&extra_checks_or_miss);
- Label uninitialized, miss, not_allocation_site;
-
- __ cmp(ecx, Immediate(FeedbackVector::MegamorphicSentinel(isolate)));
- __ j(equal, &call);
-
- // Check if we have an allocation site.
- __ CompareRoot(FieldOperand(ecx, HeapObject::kMapOffset),
- Heap::kAllocationSiteMapRootIndex);
- __ j(not_equal, &not_allocation_site);
-
- // We have an allocation site.
- HandleArrayCase(masm, &miss);
-
- __ bind(&not_allocation_site);
-
- // The following cases attempt to handle MISS cases without going to the
- // runtime.
- if (FLAG_trace_ic) {
- __ jmp(&miss);
- }
-
- __ cmp(ecx, Immediate(FeedbackVector::UninitializedSentinel(isolate)));
- __ j(equal, &uninitialized);
-
- // We are going megamorphic. If the feedback is a JSFunction, it is fine
- // to handle it here. More complex cases are dealt with in the runtime.
- __ AssertNotSmi(ecx);
- __ CmpObjectType(ecx, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &miss);
- __ mov(
- FieldOperand(ebx, edx, times_half_pointer_size, FixedArray::kHeaderSize),
- Immediate(FeedbackVector::MegamorphicSentinel(isolate)));
-
- __ bind(&call);
-
- // Increment the call count for megamorphic function calls.
- IncrementCallCount(masm, ebx, edx);
-
- __ bind(&call_count_incremented);
-
- __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
- RelocInfo::CODE_TARGET);
-
- __ bind(&uninitialized);
-
- // We are going monomorphic, provided we actually have a JSFunction.
- __ JumpIfSmi(edi, &miss);
-
- // Goto miss case if we do not have a function.
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &miss);
-
- // Make sure the function is not the Array() function, which requires special
- // behavior on MISS.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
- __ cmp(edi, ecx);
- __ j(equal, &miss);
-
- // Make sure the function belongs to the same native context.
- __ mov(ecx, FieldOperand(edi, JSFunction::kContextOffset));
- __ mov(ecx, ContextOperand(ecx, Context::NATIVE_CONTEXT_INDEX));
- __ cmp(ecx, NativeContextOperand());
- __ j(not_equal, &miss);
-
- // Store the function. Use a stub since we need a frame for allocation.
- // eax - number of arguments
- // ebx - vector
- // edx - slot
- // edi - function
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- CreateWeakCellStub create_stub(isolate);
- __ SmiTag(eax);
- __ push(eax);
- __ push(ebx);
- __ push(edx);
- __ push(edi);
- __ push(esi);
- __ CallStub(&create_stub);
- __ pop(esi);
- __ pop(edi);
- __ pop(edx);
- __ pop(ebx);
- __ pop(eax);
- __ SmiUntag(eax);
- }
-
- __ jmp(&call_function);
-
- // We are here because tracing is on or we encountered a MISS case we can't
- // handle here.
- __ bind(&miss);
- GenerateMiss(masm);
-
- __ jmp(&call_count_incremented);
-
- // Unreachable
- __ int3();
-}
-
-
-void CallICStub::GenerateMiss(MacroAssembler* masm) {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve the number of arguments.
- __ SmiTag(eax);
- __ push(eax);
-
- // Push the function and feedback info.
- __ push(edi);
- __ push(ebx);
- __ push(edx);
-
- // Call the entry.
- __ CallRuntime(Runtime::kCallIC_Miss);
-
- // Move result to edi and exit the internal frame.
- __ mov(edi, eax);
-
- // Restore number of arguments.
- __ pop(eax);
- __ SmiUntag(eax);
-}
-
-
bool CEntryStub::NeedsImmovableCode() {
return false;
}
@@ -1792,8 +1546,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ mov(ebp, esp);
// Push marker in two places.
- int marker = type();
- __ push(Immediate(Smi::FromInt(marker))); // marker
+ StackFrame::Type marker = type();
+ __ push(Immediate(StackFrame::TypeToMarker(marker))); // marker
ExternalReference context_address(Isolate::kContextAddress, isolate());
__ push(Operand::StaticVariable(context_address)); // context
// Save callee-saved registers (C calling conventions).
@@ -1810,10 +1564,10 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0));
__ j(not_equal, &not_outermost_js, Label::kNear);
__ mov(Operand::StaticVariable(js_entry_sp), ebp);
- __ push(Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+ __ push(Immediate(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ jmp(&invoke, Label::kNear);
__ bind(&not_outermost_js);
- __ push(Immediate(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
+ __ push(Immediate(StackFrame::INNER_JSENTRY_FRAME));
// Jump to a faked try block that does the invoke, with a faked catch
// block that sets the pending exception.
@@ -1857,7 +1611,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ bind(&exit);
// Check if the current stack frame is marked as the outermost JS frame.
__ pop(ebx);
- __ cmp(ebx, Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+ __ cmp(ebx, Immediate(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ j(not_equal, &not_outermost_js_2);
__ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
__ bind(&not_outermost_js_2);
@@ -1973,52 +1727,6 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
}
-
-// -------------------------------------------------------------------------
-// StringCharFromCodeGenerator
-
-void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
- // Fast case of Heap::LookupSingleCharacterStringFromCode.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiShiftSize == 0);
- DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU + 1));
- __ test(code_, Immediate(kSmiTagMask |
- ((~String::kMaxOneByteCharCodeU) << kSmiTagSize)));
- __ j(not_zero, &slow_case_);
-
- Factory* factory = masm->isolate()->factory();
- __ Move(result_, Immediate(factory->single_character_string_cache()));
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiShiftSize == 0);
- // At this point code register contains smi tagged one byte char code.
- __ mov(result_, FieldOperand(result_,
- code_, times_half_pointer_size,
- FixedArray::kHeaderSize));
- __ cmp(result_, factory->undefined_value());
- __ j(equal, &slow_case_);
- __ bind(&exit_);
-}
-
-
-void StringCharFromCodeGenerator::GenerateSlow(
- MacroAssembler* masm,
- const RuntimeCallHelper& call_helper) {
- __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
-
- __ bind(&slow_case_);
- call_helper.BeforeCall(masm);
- __ push(code_);
- __ CallRuntime(Runtime::kStringCharFromCode);
- if (!result_.is(eax)) {
- __ mov(result_, eax);
- }
- call_helper.AfterCall(masm);
- __ jmp(&exit_);
-
- __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
-}
-
void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
Register left,
Register right,
@@ -2910,13 +2618,6 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
__ jmp(ecx); // Return to IC Miss stub, continuation still on stack.
}
-void CallICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadFeedbackVector(ebx);
- CallICStub stub(isolate(), state());
- __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
-}
-
-
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
@@ -3263,559 +2964,6 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
GenerateCase(masm, FAST_ELEMENTS);
}
-void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- edi : function
- // -- esi : context
- // -- ebp : frame pointer
- // -- esp[0] : return address
- // -----------------------------------
- __ AssertFunction(edi);
-
- // Make edx point to the JavaScript frame.
- __ mov(edx, ebp);
- if (skip_stub_frame()) {
- // For Ignition we need to skip the handler/stub frame to reach the
- // JavaScript frame for the function.
- __ mov(edx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
- }
- if (FLAG_debug_code) {
- Label ok;
- __ cmp(edi, Operand(edx, StandardFrameConstants::kFunctionOffset));
- __ j(equal, &ok);
- __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
- __ bind(&ok);
- }
-
- // Check if we have rest parameters (only possible if we have an
- // arguments adaptor frame below the function frame).
- Label no_rest_parameters;
- __ mov(ebx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
- __ cmp(Operand(ebx, CommonFrameConstants::kContextOrFrameTypeOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &no_rest_parameters, Label::kNear);
-
- // Check if the arguments adaptor frame contains more arguments than
- // specified by the function's internal formal parameter count.
- Label rest_parameters;
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(eax, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ sub(eax,
- FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
- __ j(greater, &rest_parameters);
-
- // Return an empty rest parameter array.
- __ bind(&no_rest_parameters);
- {
- // ----------- S t a t e -------------
- // -- esi : context
- // -- esp[0] : return address
- // -----------------------------------
-
- // Allocate an empty rest parameter array.
- Label allocate, done_allocate;
- __ Allocate(JSArray::kSize, eax, edx, ecx, &allocate, NO_ALLOCATION_FLAGS);
- __ bind(&done_allocate);
-
- // Setup the rest parameter array in rax.
- __ LoadGlobalFunction(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, ecx);
- __ mov(FieldOperand(eax, JSArray::kMapOffset), ecx);
- __ mov(ecx, isolate()->factory()->empty_fixed_array());
- __ mov(FieldOperand(eax, JSArray::kPropertiesOffset), ecx);
- __ mov(FieldOperand(eax, JSArray::kElementsOffset), ecx);
- __ mov(FieldOperand(eax, JSArray::kLengthOffset), Immediate(Smi::kZero));
- STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
- __ Ret();
-
- // Fall back to %AllocateInNewSpace.
- __ bind(&allocate);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(Smi::FromInt(JSArray::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- }
- __ jmp(&done_allocate);
- }
-
- __ bind(&rest_parameters);
- {
- // Compute the pointer to the first rest parameter (skippping the receiver).
- __ lea(ebx,
- Operand(ebx, eax, times_half_pointer_size,
- StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
-
- // ----------- S t a t e -------------
- // -- esi : context
- // -- eax : number of rest parameters (tagged)
- // -- ebx : pointer to first rest parameters
- // -- esp[0] : return address
- // -----------------------------------
-
- // Allocate space for the rest parameter array plus the backing store.
- Label allocate, done_allocate;
- __ lea(ecx, Operand(eax, times_half_pointer_size,
- JSArray::kSize + FixedArray::kHeaderSize));
- __ Allocate(ecx, edx, edi, no_reg, &allocate, NO_ALLOCATION_FLAGS);
- __ bind(&done_allocate);
-
- // Setup the elements array in edx.
- __ mov(FieldOperand(edx, FixedArray::kMapOffset),
- isolate()->factory()->fixed_array_map());
- __ mov(FieldOperand(edx, FixedArray::kLengthOffset), eax);
- {
- Label loop, done_loop;
- __ Move(ecx, Smi::kZero);
- __ bind(&loop);
- __ cmp(ecx, eax);
- __ j(equal, &done_loop, Label::kNear);
- __ mov(edi, Operand(ebx, 0 * kPointerSize));
- __ mov(FieldOperand(edx, ecx, times_half_pointer_size,
- FixedArray::kHeaderSize),
- edi);
- __ sub(ebx, Immediate(1 * kPointerSize));
- __ add(ecx, Immediate(Smi::FromInt(1)));
- __ jmp(&loop);
- __ bind(&done_loop);
- }
-
- // Setup the rest parameter array in edi.
- __ lea(edi,
- Operand(edx, eax, times_half_pointer_size, FixedArray::kHeaderSize));
- __ LoadGlobalFunction(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, ecx);
- __ mov(FieldOperand(edi, JSArray::kMapOffset), ecx);
- __ mov(FieldOperand(edi, JSArray::kPropertiesOffset),
- isolate()->factory()->empty_fixed_array());
- __ mov(FieldOperand(edi, JSArray::kElementsOffset), edx);
- __ mov(FieldOperand(edi, JSArray::kLengthOffset), eax);
- STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
- __ mov(eax, edi);
- __ Ret();
-
- // Fall back to %AllocateInNewSpace (if not too big).
- Label too_big_for_new_space;
- __ bind(&allocate);
- __ cmp(ecx, Immediate(kMaxRegularHeapObjectSize));
- __ j(greater, &too_big_for_new_space);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(ecx);
- __ Push(eax);
- __ Push(ebx);
- __ Push(ecx);
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- __ mov(edx, eax);
- __ Pop(ebx);
- __ Pop(eax);
- }
- __ jmp(&done_allocate);
-
- // Fall back to %NewRestParameter.
- __ bind(&too_big_for_new_space);
- __ PopReturnAddressTo(ecx);
- // We reload the function from the caller frame due to register pressure
- // within this stub. This is the slow path, hence reloading is preferable.
- if (skip_stub_frame()) {
- // For Ignition we need to skip the handler/stub frame to reach the
- // JavaScript frame for the function.
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ Push(Operand(edx, StandardFrameConstants::kFunctionOffset));
- } else {
- __ Push(Operand(ebp, StandardFrameConstants::kFunctionOffset));
- }
- __ PushReturnAddressFrom(ecx);
- __ TailCallRuntime(Runtime::kNewRestParameter);
- }
-}
-
-
-void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- edi : function
- // -- esi : context
- // -- ebp : frame pointer
- // -- esp[0] : return address
- // -----------------------------------
- __ AssertFunction(edi);
-
- // Make ecx point to the JavaScript frame.
- __ mov(ecx, ebp);
- if (skip_stub_frame()) {
- // For Ignition we need to skip the handler/stub frame to reach the
- // JavaScript frame for the function.
- __ mov(ecx, Operand(ecx, StandardFrameConstants::kCallerFPOffset));
- }
- if (FLAG_debug_code) {
- Label ok;
- __ cmp(edi, Operand(ecx, StandardFrameConstants::kFunctionOffset));
- __ j(equal, &ok);
- __ Abort(kInvalidFrameForFastNewSloppyArgumentsStub);
- __ bind(&ok);
- }
-
- // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
- __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ebx,
- FieldOperand(ebx, SharedFunctionInfo::kFormalParameterCountOffset));
- __ lea(edx, Operand(ecx, ebx, times_half_pointer_size,
- StandardFrameConstants::kCallerSPOffset));
-
- // ebx : number of parameters (tagged)
- // edx : parameters pointer
- // edi : function
- // ecx : JavaScript frame pointer.
- // esp[0] : return address
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ mov(eax, Operand(ecx, StandardFrameConstants::kCallerFPOffset));
- __ mov(eax, Operand(eax, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adaptor_frame, Label::kNear);
-
- // No adaptor, parameter count = argument count.
- __ mov(ecx, ebx);
- __ push(ebx);
- __ jmp(&try_allocate, Label::kNear);
-
- // We have an adaptor frame. Patch the parameters pointer.
- __ bind(&adaptor_frame);
- __ push(ebx);
- __ mov(edx, Operand(ecx, StandardFrameConstants::kCallerFPOffset));
- __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ lea(edx, Operand(edx, ecx, times_2,
- StandardFrameConstants::kCallerSPOffset));
-
- // ebx = parameter count (tagged)
- // ecx = argument count (smi-tagged)
- // Compute the mapped parameter count = min(ebx, ecx) in ebx.
- __ cmp(ebx, ecx);
- __ j(less_equal, &try_allocate, Label::kNear);
- __ mov(ebx, ecx);
-
- // Save mapped parameter count and function.
- __ bind(&try_allocate);
- __ push(edi);
- __ push(ebx);
-
- // Compute the sizes of backing store, parameter map, and arguments object.
- // 1. Parameter map, has 2 extra words containing context and backing store.
- const int kParameterMapHeaderSize =
- FixedArray::kHeaderSize + 2 * kPointerSize;
- Label no_parameter_map;
- __ test(ebx, ebx);
- __ j(zero, &no_parameter_map, Label::kNear);
- __ lea(ebx, Operand(ebx, times_2, kParameterMapHeaderSize));
- __ bind(&no_parameter_map);
-
- // 2. Backing store.
- __ lea(ebx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
-
- // 3. Arguments object.
- __ add(ebx, Immediate(JSSloppyArgumentsObject::kSize));
-
- // Do the allocation of all three objects in one go.
- __ Allocate(ebx, eax, edi, no_reg, &runtime, NO_ALLOCATION_FLAGS);
-
- // eax = address of new object(s) (tagged)
- // ecx = argument count (smi-tagged)
- // esp[0] = mapped parameter count (tagged)
- // esp[4] = function
- // esp[8] = parameter count (tagged)
- // Get the arguments map from the current native context into edi.
- Label has_mapped_parameters, instantiate;
- __ mov(edi, NativeContextOperand());
- __ mov(ebx, Operand(esp, 0 * kPointerSize));
- __ test(ebx, ebx);
- __ j(not_zero, &has_mapped_parameters, Label::kNear);
- __ mov(
- edi,
- Operand(edi, Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX)));
- __ jmp(&instantiate, Label::kNear);
-
- __ bind(&has_mapped_parameters);
- __ mov(edi, Operand(edi, Context::SlotOffset(
- Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX)));
- __ bind(&instantiate);
-
- // eax = address of new object (tagged)
- // ebx = mapped parameter count (tagged)
- // ecx = argument count (smi-tagged)
- // edi = address of arguments map (tagged)
- // esp[0] = mapped parameter count (tagged)
- // esp[4] = function
- // esp[8] = parameter count (tagged)
- // Copy the JS object part.
- __ mov(FieldOperand(eax, JSObject::kMapOffset), edi);
- __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
- masm->isolate()->factory()->empty_fixed_array());
- __ mov(FieldOperand(eax, JSObject::kElementsOffset),
- masm->isolate()->factory()->empty_fixed_array());
-
- // Set up the callee in-object property.
- STATIC_ASSERT(JSSloppyArgumentsObject::kCalleeIndex == 1);
- __ mov(edi, Operand(esp, 1 * kPointerSize));
- __ AssertNotSmi(edi);
- __ mov(FieldOperand(eax, JSSloppyArgumentsObject::kCalleeOffset), edi);
-
- // Use the length (smi tagged) and set that as an in-object property too.
- __ AssertSmi(ecx);
- __ mov(FieldOperand(eax, JSSloppyArgumentsObject::kLengthOffset), ecx);
-
- // Set up the elements pointer in the allocated arguments object.
- // If we allocated a parameter map, edi will point there, otherwise to the
- // backing store.
- __ lea(edi, Operand(eax, JSSloppyArgumentsObject::kSize));
- __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
-
- // eax = address of new object (tagged)
- // ebx = mapped parameter count (tagged)
- // ecx = argument count (tagged)
- // edx = address of receiver argument
- // edi = address of parameter map or backing store (tagged)
- // esp[0] = mapped parameter count (tagged)
- // esp[4] = function
- // esp[8] = parameter count (tagged)
- // Free two registers.
- __ push(edx);
- __ push(eax);
-
- // Initialize parameter map. If there are no mapped arguments, we're done.
- Label skip_parameter_map;
- __ test(ebx, ebx);
- __ j(zero, &skip_parameter_map);
-
- __ mov(FieldOperand(edi, FixedArray::kMapOffset),
- Immediate(isolate()->factory()->sloppy_arguments_elements_map()));
- __ lea(eax, Operand(ebx, reinterpret_cast<intptr_t>(Smi::FromInt(2))));
- __ mov(FieldOperand(edi, FixedArray::kLengthOffset), eax);
- __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 0 * kPointerSize), esi);
- __ lea(eax, Operand(edi, ebx, times_2, kParameterMapHeaderSize));
- __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 1 * kPointerSize), eax);
-
- // Copy the parameter slots and the holes in the arguments.
- // We need to fill in mapped_parameter_count slots. They index the context,
- // where parameters are stored in reverse order, at
- // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
- // The mapped parameter thus need to get indices
- // MIN_CONTEXT_SLOTS+parameter_count-1 ..
- // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
- // We loop from right to left.
- Label parameters_loop, parameters_test;
- __ push(ecx);
- __ mov(eax, Operand(esp, 3 * kPointerSize));
- __ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
- __ add(ebx, Operand(esp, 5 * kPointerSize));
- __ sub(ebx, eax);
- __ mov(ecx, isolate()->factory()->the_hole_value());
- __ mov(edx, edi);
- __ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize));
- // eax = loop variable (tagged)
- // ebx = mapping index (tagged)
- // ecx = the hole value
- // edx = address of parameter map (tagged)
- // edi = address of backing store (tagged)
- // esp[0] = argument count (tagged)
- // esp[4] = address of new object (tagged)
- // esp[8] = address of receiver argument
- // esp[12] = mapped parameter count (tagged)
- // esp[16] = function
- // esp[20] = parameter count (tagged)
- __ jmp(&parameters_test, Label::kNear);
-
- __ bind(&parameters_loop);
- __ sub(eax, Immediate(Smi::FromInt(1)));
- __ mov(FieldOperand(edx, eax, times_2, kParameterMapHeaderSize), ebx);
- __ mov(FieldOperand(edi, eax, times_2, FixedArray::kHeaderSize), ecx);
- __ add(ebx, Immediate(Smi::FromInt(1)));
- __ bind(&parameters_test);
- __ test(eax, eax);
- __ j(not_zero, &parameters_loop, Label::kNear);
- __ pop(ecx);
-
- __ bind(&skip_parameter_map);
-
- // ecx = argument count (tagged)
- // edi = address of backing store (tagged)
- // esp[0] = address of new object (tagged)
- // esp[4] = address of receiver argument
- // esp[8] = mapped parameter count (tagged)
- // esp[12] = function
- // esp[16] = parameter count (tagged)
- // Copy arguments header and remaining slots (if there are any).
- __ mov(FieldOperand(edi, FixedArray::kMapOffset),
- Immediate(isolate()->factory()->fixed_array_map()));
- __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
-
- Label arguments_loop, arguments_test;
- __ mov(ebx, Operand(esp, 2 * kPointerSize));
- __ mov(edx, Operand(esp, 1 * kPointerSize));
- __ sub(edx, ebx); // Is there a smarter way to do negative scaling?
- __ sub(edx, ebx);
- __ jmp(&arguments_test, Label::kNear);
-
- __ bind(&arguments_loop);
- __ sub(edx, Immediate(kPointerSize));
- __ mov(eax, Operand(edx, 0));
- __ mov(FieldOperand(edi, ebx, times_2, FixedArray::kHeaderSize), eax);
- __ add(ebx, Immediate(Smi::FromInt(1)));
-
- __ bind(&arguments_test);
- __ cmp(ebx, ecx);
- __ j(less, &arguments_loop, Label::kNear);
-
- // Restore.
- __ pop(eax); // Address of arguments object.
- __ Drop(4);
-
- // Return.
- __ ret(0);
-
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ pop(eax); // Remove saved mapped parameter count.
- __ pop(edi); // Pop saved function.
- __ pop(eax); // Remove saved parameter count.
- __ pop(eax); // Pop return address.
- __ push(edi); // Push function.
- __ push(edx); // Push parameters pointer.
- __ push(ecx); // Push parameter count.
- __ push(eax); // Push return address.
- __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
-void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- edi : function
- // -- esi : context
- // -- ebp : frame pointer
- // -- esp[0] : return address
- // -----------------------------------
- __ AssertFunction(edi);
-
- // Make edx point to the JavaScript frame.
- __ mov(edx, ebp);
- if (skip_stub_frame()) {
- // For Ignition we need to skip the handler/stub frame to reach the
- // JavaScript frame for the function.
- __ mov(edx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
- }
- if (FLAG_debug_code) {
- Label ok;
- __ cmp(edi, Operand(edx, StandardFrameConstants::kFunctionOffset));
- __ j(equal, &ok);
- __ Abort(kInvalidFrameForFastNewStrictArgumentsStub);
- __ bind(&ok);
- }
-
- // Check if we have an arguments adaptor frame below the function frame.
- Label arguments_adaptor, arguments_done;
- __ mov(ebx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
- __ cmp(Operand(ebx, CommonFrameConstants::kContextOrFrameTypeOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &arguments_adaptor, Label::kNear);
- {
- __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(eax,
- FieldOperand(eax, SharedFunctionInfo::kFormalParameterCountOffset));
- __ lea(ebx,
- Operand(edx, eax, times_half_pointer_size,
- StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
- }
- __ jmp(&arguments_done, Label::kNear);
- __ bind(&arguments_adaptor);
- {
- __ mov(eax, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ lea(ebx,
- Operand(ebx, eax, times_half_pointer_size,
- StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
- }
- __ bind(&arguments_done);
-
- // ----------- S t a t e -------------
- // -- eax : number of arguments (tagged)
- // -- ebx : pointer to the first argument
- // -- esi : context
- // -- esp[0] : return address
- // -----------------------------------
-
- // Allocate space for the strict arguments object plus the backing store.
- Label allocate, done_allocate;
- __ lea(ecx,
- Operand(eax, times_half_pointer_size,
- JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
- __ Allocate(ecx, edx, edi, no_reg, &allocate, NO_ALLOCATION_FLAGS);
- __ bind(&done_allocate);
-
- // Setup the elements array in edx.
- __ mov(FieldOperand(edx, FixedArray::kMapOffset),
- isolate()->factory()->fixed_array_map());
- __ mov(FieldOperand(edx, FixedArray::kLengthOffset), eax);
- {
- Label loop, done_loop;
- __ Move(ecx, Smi::kZero);
- __ bind(&loop);
- __ cmp(ecx, eax);
- __ j(equal, &done_loop, Label::kNear);
- __ mov(edi, Operand(ebx, 0 * kPointerSize));
- __ mov(FieldOperand(edx, ecx, times_half_pointer_size,
- FixedArray::kHeaderSize),
- edi);
- __ sub(ebx, Immediate(1 * kPointerSize));
- __ add(ecx, Immediate(Smi::FromInt(1)));
- __ jmp(&loop);
- __ bind(&done_loop);
- }
-
- // Setup the rest parameter array in edi.
- __ lea(edi,
- Operand(edx, eax, times_half_pointer_size, FixedArray::kHeaderSize));
- __ LoadGlobalFunction(Context::STRICT_ARGUMENTS_MAP_INDEX, ecx);
- __ mov(FieldOperand(edi, JSStrictArgumentsObject::kMapOffset), ecx);
- __ mov(FieldOperand(edi, JSStrictArgumentsObject::kPropertiesOffset),
- isolate()->factory()->empty_fixed_array());
- __ mov(FieldOperand(edi, JSStrictArgumentsObject::kElementsOffset), edx);
- __ mov(FieldOperand(edi, JSStrictArgumentsObject::kLengthOffset), eax);
- STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
- __ mov(eax, edi);
- __ Ret();
-
- // Fall back to %AllocateInNewSpace (if not too big).
- Label too_big_for_new_space;
- __ bind(&allocate);
- __ cmp(ecx, Immediate(kMaxRegularHeapObjectSize));
- __ j(greater, &too_big_for_new_space);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(ecx);
- __ Push(eax);
- __ Push(ebx);
- __ Push(ecx);
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- __ mov(edx, eax);
- __ Pop(ebx);
- __ Pop(eax);
- }
- __ jmp(&done_allocate);
-
- // Fall back to %NewStrictArguments.
- __ bind(&too_big_for_new_space);
- __ PopReturnAddressTo(ecx);
- // We reload the function from the caller frame due to register pressure
- // within this stub. This is the slow path, hence reloading is preferable.
- if (skip_stub_frame()) {
- // For Ignition we need to skip the handler/stub frame to reach the
- // JavaScript frame for the function.
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ Push(Operand(edx, StandardFrameConstants::kFunctionOffset));
- } else {
- __ Push(Operand(ebp, StandardFrameConstants::kFunctionOffset));
- }
- __ PushReturnAddressFrom(ecx);
- __ TailCallRuntime(Runtime::kNewStrictArguments);
-}
-
-
// Generates an Operand for saving parameters after PrepareCallApiFunction.
static Operand ApiParameterOperand(int index) {
return Operand(esp, index * kPointerSize);
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index ccd159e299..fd7b9cac2c 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -491,6 +491,9 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
Register index,
Register result,
Label* call_runtime) {
+ Label indirect_string_loaded;
+ __ bind(&indirect_string_loaded);
+
// Fetch the instance type of the receiver into result register.
__ mov(result, FieldOperand(string, HeapObject::kMapOffset));
__ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
@@ -501,17 +504,24 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ j(zero, &check_sequential, Label::kNear);
// Dispatch on the indirect string shape: slice or cons.
- Label cons_string;
- __ test(result, Immediate(kSlicedNotConsMask));
- __ j(zero, &cons_string, Label::kNear);
+ Label cons_string, thin_string;
+ __ and_(result, Immediate(kStringRepresentationMask));
+ __ cmp(result, Immediate(kConsStringTag));
+ __ j(equal, &cons_string, Label::kNear);
+ __ cmp(result, Immediate(kThinStringTag));
+ __ j(equal, &thin_string, Label::kNear);
// Handle slices.
- Label indirect_string_loaded;
__ mov(result, FieldOperand(string, SlicedString::kOffsetOffset));
__ SmiUntag(result);
__ add(index, result);
__ mov(string, FieldOperand(string, SlicedString::kParentOffset));
- __ jmp(&indirect_string_loaded, Label::kNear);
+ __ jmp(&indirect_string_loaded);
+
+ // Handle thin strings.
+ __ bind(&thin_string);
+ __ mov(string, FieldOperand(string, ThinString::kActualOffset));
+ __ jmp(&indirect_string_loaded);
// Handle cons strings.
// Check whether the right hand side is the empty string (i.e. if
@@ -523,10 +533,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
Immediate(factory->empty_string()));
__ j(not_equal, call_runtime);
__ mov(string, FieldOperand(string, ConsString::kFirstOffset));
-
- __ bind(&indirect_string_loaded);
- __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
- __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
+ __ jmp(&indirect_string_loaded);
// Distinguish sequential and external strings. Only these two string
// representations can reach here (slices and flat cons strings have been
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index 7410a46a61..da4d2e8a01 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -181,7 +181,7 @@ void Deoptimizer::SetPlatformCompiledStubRegisters(
void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
for (int i = 0; i < XMMRegister::kMaxNumRegisters; ++i) {
- double double_value = input_->GetDoubleRegister(i);
+ Float64 double_value = input_->GetDoubleRegister(i);
output_frame->SetDoubleRegister(i, double_value);
}
}
diff --git a/deps/v8/src/ia32/interface-descriptors-ia32.cc b/deps/v8/src/ia32/interface-descriptors-ia32.cc
index cef6449ca0..0264af918d 100644
--- a/deps/v8/src/ia32/interface-descriptors-ia32.cc
+++ b/deps/v8/src/ia32/interface-descriptors-ia32.cc
@@ -69,27 +69,6 @@ void FastNewClosureDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void FastNewRestParameterDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {edi};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
-void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {edi};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
-void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {edi};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return eax; }
@@ -141,15 +120,13 @@ void CallFunctionDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-
-void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
+void CallICTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {edi, edx};
+ Register registers[] = {edi, eax, edx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
+void CallICDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {edi, eax, edx, ebx};
data->InitializePlatformSpecific(arraysize(registers), registers);
@@ -178,6 +155,13 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void CallForwardVarargsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // ecx : start index (to support rest parameters)
+ // edi : the target to call
+ Register registers[] = {edi, ecx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void ConstructStubDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -213,13 +197,12 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(0, nullptr, nullptr);
}
-#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
- void Allocate##Type##Descriptor::InitializePlatformSpecific( \
- CallInterfaceDescriptorData* data) { \
- data->InitializePlatformSpecific(0, nullptr, nullptr); \
- }
-SIMD128_TYPES(SIMD128_ALLOC_DESC)
-#undef SIMD128_ALLOC_DESC
+void ArrayConstructorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite
+ Register registers[] = {edi, edx, eax, ebx};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -412,6 +395,14 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ ebx, // loaded new FP
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 30960efd53..906c369172 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -673,12 +673,14 @@ void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
bind(&done);
}
-void MacroAssembler::DebugBreak() {
- Move(eax, Immediate(0));
- mov(ebx, Immediate(ExternalReference(Runtime::kHandleDebuggerStatement,
- isolate())));
- CEntryStub ces(isolate(), 1);
- call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
+void MacroAssembler::MaybeDropFrames() {
+ // Check whether we need to drop frames to restart a function on the stack.
+ ExternalReference restart_fp =
+ ExternalReference::debug_restart_fp_address(isolate());
+ mov(ebx, Operand::StaticVariable(restart_fp));
+ test(ebx, ebx);
+ j(not_zero, isolate()->builtins()->FrameDropperTrampoline(),
+ RelocInfo::CODE_TARGET);
}
void MacroAssembler::Cvtsi2sd(XMMRegister dst, const Operand& src) {
@@ -997,7 +999,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
void MacroAssembler::StubPrologue(StackFrame::Type type) {
push(ebp); // Caller's frame pointer.
mov(ebp, esp);
- push(Immediate(Smi::FromInt(type)));
+ push(Immediate(StackFrame::TypeToMarker(type)));
}
void MacroAssembler::Prologue(bool code_pre_aging) {
@@ -1018,8 +1020,8 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
mov(vector, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- mov(vector, FieldOperand(vector, JSFunction::kLiteralsOffset));
- mov(vector, FieldOperand(vector, LiteralsArray::kFeedbackVectorOffset));
+ mov(vector, FieldOperand(vector, JSFunction::kFeedbackVectorOffset));
+ mov(vector, FieldOperand(vector, Cell::kValueOffset));
}
@@ -1033,7 +1035,7 @@ void MacroAssembler::EnterFrame(StackFrame::Type type,
void MacroAssembler::EnterFrame(StackFrame::Type type) {
push(ebp);
mov(ebp, esp);
- push(Immediate(Smi::FromInt(type)));
+ push(Immediate(StackFrame::TypeToMarker(type)));
if (type == StackFrame::INTERNAL) {
push(Immediate(CodeObject()));
}
@@ -1047,7 +1049,7 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
if (emit_debug_code()) {
cmp(Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset),
- Immediate(Smi::FromInt(type)));
+ Immediate(StackFrame::TypeToMarker(type)));
Check(equal, kStackFrameTypesMustMatch);
}
leave();
@@ -1082,7 +1084,7 @@ void MacroAssembler::EnterExitFramePrologue(StackFrame::Type frame_type) {
mov(ebp, esp);
// Reserve room for entry stack pointer and push the code object.
- push(Immediate(Smi::FromInt(frame_type)));
+ push(Immediate(StackFrame::TypeToMarker(frame_type)));
DCHECK_EQ(-2 * kPointerSize, ExitFrameConstants::kSPOffset);
push(Immediate(0)); // Saved entry sp, patched before call.
DCHECK_EQ(-3 * kPointerSize, ExitFrameConstants::kCodeOffset);
@@ -1680,32 +1682,6 @@ void MacroAssembler::GetMapConstructor(Register result, Register map,
bind(&done);
}
-
-void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
- Register scratch, Label* miss) {
- // Get the prototype or initial map from the function.
- mov(result,
- FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
- // If the prototype or initial map is the hole, don't return it and
- // simply miss the cache instead. This will allow us to allocate a
- // prototype object on-demand in the runtime system.
- cmp(result, Immediate(isolate()->factory()->the_hole_value()));
- j(equal, miss);
-
- // If the function does not have an initial map, we're done.
- Label done;
- CmpObjectType(result, MAP_TYPE, scratch);
- j(not_equal, &done, Label::kNear);
-
- // Get the prototype from the initial map.
- mov(result, FieldOperand(result, Map::kPrototypeOffset));
-
- // All done.
- bind(&done);
-}
-
-
void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs.
call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
@@ -1915,6 +1891,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
DCHECK(actual.reg().is(eax));
DCHECK(expected.reg().is(ebx));
} else {
+ definitely_matches = true;
Move(eax, actual.reg());
}
}
@@ -2563,11 +2540,13 @@ void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register object1,
const int kFlatOneByteStringTag =
kStringTag | kOneByteStringTag | kSeqStringTag;
// Interleave bits from both instance types and compare them in one check.
- DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
+ const int kShift = 8;
+ DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << kShift));
and_(scratch1, kFlatOneByteStringMask);
and_(scratch2, kFlatOneByteStringMask);
- lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
- cmp(scratch1, kFlatOneByteStringTag | (kFlatOneByteStringTag << 3));
+ shl(scratch2, kShift);
+ or_(scratch1, scratch2);
+ cmp(scratch1, kFlatOneByteStringTag | (kFlatOneByteStringTag << kShift));
j(not_equal, failure);
}
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index 1c329de2bd..8aa7d38073 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -228,10 +228,8 @@ class MacroAssembler: public Assembler {
void RecordWriteForMap(Register object, Handle<Map> map, Register scratch1,
Register scratch2, SaveFPRegsMode save_fp);
- // ---------------------------------------------------------------------------
- // Debugger Support
-
- void DebugBreak();
+ // Frame restart support
+ void MaybeDropFrames();
// Generates function and stub prologue code.
void StubPrologue(StackFrame::Type type);
@@ -476,7 +474,12 @@ class MacroAssembler: public Assembler {
test(value, Immediate(kSmiTagMask));
j(not_zero, not_smi_label, distance);
}
-
+ // Jump if the operand is not a smi.
+ inline void JumpIfNotSmi(Operand value, Label* smi_label,
+ Label::Distance distance = Label::kFar) {
+ test(value, Immediate(kSmiTagMask));
+ j(not_zero, smi_label, distance);
+ }
// Jump if the value cannot be represented by a smi.
inline void JumpIfNotValidSmiValue(Register value, Register scratch,
Label* on_invalid,
@@ -642,14 +645,6 @@ class MacroAssembler: public Assembler {
// |temp| holds |result|'s map when done.
void GetMapConstructor(Register result, Register map, Register temp);
- // Try to get function prototype of a function and puts the value in
- // the result register. Checks that the function really is a
- // function and jumps to the miss label if the fast checks fail. The
- // function register will be untouched; the other registers may be
- // clobbered.
- void TryGetFunctionPrototype(Register function, Register result,
- Register scratch, Label* miss);
-
// ---------------------------------------------------------------------------
// Runtime calls
diff --git a/deps/v8/src/ic/access-compiler.cc b/deps/v8/src/ic/access-compiler.cc
index d92f9c0c53..d210ea8c71 100644
--- a/deps/v8/src/ic/access-compiler.cc
+++ b/deps/v8/src/ic/access-compiler.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/ic/access-compiler.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/ic/accessor-assembler-impl.h b/deps/v8/src/ic/accessor-assembler-impl.h
deleted file mode 100644
index 1699b5c855..0000000000
--- a/deps/v8/src/ic/accessor-assembler-impl.h
+++ /dev/null
@@ -1,203 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_SRC_IC_ACCESSOR_ASSEMBLER_IMPL_H_
-#define V8_SRC_IC_ACCESSOR_ASSEMBLER_IMPL_H_
-
-#include "src/code-stub-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-namespace compiler {
-class CodeAssemblerState;
-}
-
-using compiler::Node;
-
-#define ACCESSOR_ASSEMBLER_PUBLIC_INTERFACE(V) \
- V(LoadIC) \
- V(LoadField) \
- V(LoadICTrampoline) \
- V(KeyedLoadICTF) \
- V(KeyedLoadICTrampolineTF) \
- V(KeyedLoadICMegamorphic) \
- V(StoreIC) \
- V(StoreICTrampoline)
-// The other IC entry points need custom handling because of additional
-// parameters like "typeof_mode" or "language_mode".
-
-class AccessorAssemblerImpl : public CodeStubAssembler {
- public:
- explicit AccessorAssemblerImpl(compiler::CodeAssemblerState* state)
- : CodeStubAssembler(state) {}
-
-#define DECLARE_PUBLIC_METHOD(Name) void Generate##Name();
-
- ACCESSOR_ASSEMBLER_PUBLIC_INTERFACE(DECLARE_PUBLIC_METHOD)
-#undef DECLARE_PUBLIC_METHOD
-
- void GenerateLoadICProtoArray(bool throw_reference_error_if_nonexistent);
-
- void GenerateLoadGlobalIC(TypeofMode typeof_mode);
- void GenerateLoadGlobalICTrampoline(TypeofMode typeof_mode);
-
- void GenerateKeyedStoreICTF(LanguageMode language_mode);
- void GenerateKeyedStoreICTrampolineTF(LanguageMode language_mode);
-
- void TryProbeStubCache(StubCache* stub_cache, Node* receiver, Node* name,
- Label* if_handler, Variable* var_handler,
- Label* if_miss);
-
- Node* StubCachePrimaryOffsetForTesting(Node* name, Node* map) {
- return StubCachePrimaryOffset(name, map);
- }
- Node* StubCacheSecondaryOffsetForTesting(Node* name, Node* map) {
- return StubCacheSecondaryOffset(name, map);
- }
-
- protected:
- struct LoadICParameters {
- LoadICParameters(Node* context, Node* receiver, Node* name, Node* slot,
- Node* vector)
- : context(context),
- receiver(receiver),
- name(name),
- slot(slot),
- vector(vector) {}
-
- Node* context;
- Node* receiver;
- Node* name;
- Node* slot;
- Node* vector;
- };
-
- struct StoreICParameters : public LoadICParameters {
- StoreICParameters(Node* context, Node* receiver, Node* name, Node* value,
- Node* slot, Node* vector)
- : LoadICParameters(context, receiver, name, slot, vector),
- value(value) {}
- Node* value;
- };
-
- enum ElementSupport { kOnlyProperties, kSupportElements };
- void HandleStoreICHandlerCase(
- const StoreICParameters* p, Node* handler, Label* miss,
- ElementSupport support_elements = kOnlyProperties);
-
- private:
- // Stub generation entry points.
-
- void LoadIC(const LoadICParameters* p);
- void LoadICProtoArray(const LoadICParameters* p, Node* handler,
- bool throw_reference_error_if_nonexistent);
- void LoadGlobalIC(const LoadICParameters* p, TypeofMode typeof_mode);
- void KeyedLoadIC(const LoadICParameters* p);
- void KeyedLoadICGeneric(const LoadICParameters* p);
- void StoreIC(const StoreICParameters* p);
- void KeyedStoreIC(const StoreICParameters* p, LanguageMode language_mode);
-
- // IC dispatcher behavior.
-
- // Checks monomorphic case. Returns {feedback} entry of the vector.
- Node* TryMonomorphicCase(Node* slot, Node* vector, Node* receiver_map,
- Label* if_handler, Variable* var_handler,
- Label* if_miss);
- void HandlePolymorphicCase(Node* receiver_map, Node* feedback,
- Label* if_handler, Variable* var_handler,
- Label* if_miss, int unroll_count);
- void HandleKeyedStorePolymorphicCase(Node* receiver_map, Node* feedback,
- Label* if_handler, Variable* var_handler,
- Label* if_transition_handler,
- Variable* var_transition_map_cell,
- Label* if_miss);
-
- // LoadIC implementation.
-
- void HandleLoadICHandlerCase(
- const LoadICParameters* p, Node* handler, Label* miss,
- ElementSupport support_elements = kOnlyProperties);
-
- void HandleLoadICSmiHandlerCase(const LoadICParameters* p, Node* holder,
- Node* smi_handler, Label* miss,
- ElementSupport support_elements);
-
- void HandleLoadICProtoHandlerCase(const LoadICParameters* p, Node* handler,
- Variable* var_holder,
- Variable* var_smi_handler,
- Label* if_smi_handler, Label* miss,
- bool throw_reference_error_if_nonexistent);
-
- Node* EmitLoadICProtoArrayCheck(const LoadICParameters* p, Node* handler,
- Node* handler_length, Node* handler_flags,
- Label* miss,
- bool throw_reference_error_if_nonexistent);
-
- // LoadGlobalIC implementation.
-
- void HandleLoadGlobalICHandlerCase(const LoadICParameters* p, Node* handler,
- Label* miss,
- bool throw_reference_error_if_nonexistent);
-
- // StoreIC implementation.
-
- void HandleStoreICElementHandlerCase(const StoreICParameters* p,
- Node* handler, Label* miss);
-
- void HandleStoreICProtoHandler(const StoreICParameters* p, Node* handler,
- Label* miss);
- // If |transition| is nullptr then the normal field store is generated or
- // transitioning store otherwise.
- void HandleStoreICSmiHandlerCase(Node* handler_word, Node* holder,
- Node* value, Node* transition, Label* miss);
- // If |transition| is nullptr then the normal field store is generated or
- // transitioning store otherwise.
- void HandleStoreFieldAndReturn(Node* handler_word, Node* holder,
- Representation representation, Node* value,
- Node* transition, Label* miss);
-
- // Low-level helpers.
-
- Node* PrepareValueForStore(Node* handler_word, Node* holder,
- Representation representation, Node* transition,
- Node* value, Label* bailout);
-
- // Extends properties backing store by JSObject::kFieldsAdded elements.
- void ExtendPropertiesBackingStore(Node* object);
-
- void StoreNamedField(Node* handler_word, Node* object, bool is_inobject,
- Representation representation, Node* value,
- bool transition_to_field);
-
- void EmitFastElementsBoundsCheck(Node* object, Node* elements,
- Node* intptr_index,
- Node* is_jsarray_condition, Label* miss);
- void EmitElementLoad(Node* object, Node* elements, Node* elements_kind,
- Node* key, Node* is_jsarray_condition, Label* if_hole,
- Label* rebox_double, Variable* var_double_value,
- Label* unimplemented_elements_kind, Label* out_of_bounds,
- Label* miss);
- void CheckPrototype(Node* prototype_cell, Node* name, Label* miss);
- void NameDictionaryNegativeLookup(Node* object, Node* name, Label* miss);
-
- // Stub cache access helpers.
-
- // This enum is used here as a replacement for StubCache::Table to avoid
- // including stub cache header.
- enum StubCacheTable : int;
-
- Node* StubCachePrimaryOffset(Node* name, Node* map);
- Node* StubCacheSecondaryOffset(Node* name, Node* seed);
-
- void TryProbeStubCacheTable(StubCache* stub_cache, StubCacheTable table_id,
- Node* entry_offset, Node* name, Node* map,
- Label* if_handler, Variable* var_handler,
- Label* if_miss);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_SRC_IC_ACCESSOR_ASSEMBLER_IMPL_H_
diff --git a/deps/v8/src/ic/accessor-assembler.cc b/deps/v8/src/ic/accessor-assembler.cc
index c2b2d17950..d3379ab6d2 100644
--- a/deps/v8/src/ic/accessor-assembler.cc
+++ b/deps/v8/src/ic/accessor-assembler.cc
@@ -3,25 +3,27 @@
// found in the LICENSE file.
#include "src/ic/accessor-assembler.h"
-#include "src/ic/accessor-assembler-impl.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
+#include "src/counters.h"
#include "src/ic/handler-configuration.h"
#include "src/ic/stub-cache.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
using compiler::CodeAssemblerState;
+using compiler::Node;
//////////////////// Private helpers.
-Node* AccessorAssemblerImpl::TryMonomorphicCase(Node* slot, Node* vector,
- Node* receiver_map,
- Label* if_handler,
- Variable* var_handler,
- Label* if_miss) {
+Node* AccessorAssembler::TryMonomorphicCase(Node* slot, Node* vector,
+ Node* receiver_map,
+ Label* if_handler,
+ Variable* var_handler,
+ Label* if_miss) {
Comment("TryMonomorphicCase");
DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
@@ -51,9 +53,11 @@ Node* AccessorAssemblerImpl::TryMonomorphicCase(Node* slot, Node* vector,
return feedback;
}
-void AccessorAssemblerImpl::HandlePolymorphicCase(
- Node* receiver_map, Node* feedback, Label* if_handler,
- Variable* var_handler, Label* if_miss, int unroll_count) {
+void AccessorAssembler::HandlePolymorphicCase(Node* receiver_map,
+ Node* feedback, Label* if_handler,
+ Variable* var_handler,
+ Label* if_miss,
+ int unroll_count) {
Comment("HandlePolymorphicCase");
DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
@@ -78,7 +82,7 @@ void AccessorAssemblerImpl::HandlePolymorphicCase(
Node* init = IntPtrConstant(unroll_count * kEntrySize);
Node* length = LoadAndUntagFixedArrayBaseLength(feedback);
BuildFastLoop(
- MachineType::PointerRepresentation(), init, length,
+ init, length,
[this, receiver_map, feedback, if_handler, var_handler](Node* index) {
Node* cached_map =
LoadWeakCellValue(LoadFixedArrayElement(feedback, index));
@@ -93,12 +97,12 @@ void AccessorAssemblerImpl::HandlePolymorphicCase(
Bind(&next_entry);
},
- kEntrySize, IndexAdvanceMode::kPost);
+ kEntrySize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
// The loop falls through if no handler was found.
Goto(if_miss);
}
-void AccessorAssemblerImpl::HandleKeyedStorePolymorphicCase(
+void AccessorAssembler::HandleKeyedStorePolymorphicCase(
Node* receiver_map, Node* feedback, Label* if_handler,
Variable* var_handler, Label* if_transition_handler,
Variable* var_transition_map_cell, Label* if_miss) {
@@ -109,7 +113,7 @@ void AccessorAssemblerImpl::HandleKeyedStorePolymorphicCase(
Node* init = IntPtrConstant(0);
Node* length = LoadAndUntagFixedArrayBaseLength(feedback);
- BuildFastLoop(MachineType::PointerRepresentation(), init, length,
+ BuildFastLoop(init, length,
[this, receiver_map, feedback, if_handler, var_handler,
if_transition_handler, var_transition_map_cell](Node* index) {
Node* cached_map =
@@ -130,15 +134,17 @@ void AccessorAssemblerImpl::HandleKeyedStorePolymorphicCase(
Bind(&next_entry);
},
- kEntrySize, IndexAdvanceMode::kPost);
+ kEntrySize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
// The loop falls through if no handler was found.
Goto(if_miss);
}
-void AccessorAssemblerImpl::HandleLoadICHandlerCase(
+void AccessorAssembler::HandleLoadICHandlerCase(
const LoadICParameters* p, Node* handler, Label* miss,
ElementSupport support_elements) {
Comment("have_handler");
+ ExitPoint direct_exit(this);
+
Variable var_holder(this, MachineRepresentation::kTagged);
var_holder.Bind(p->receiver);
Variable var_smi_handler(this, MachineRepresentation::kTagged);
@@ -155,14 +161,14 @@ void AccessorAssemblerImpl::HandleLoadICHandlerCase(
Bind(&if_smi_handler);
{
HandleLoadICSmiHandlerCase(p, var_holder.value(), var_smi_handler.value(),
- miss, support_elements);
+ miss, &direct_exit, support_elements);
}
Bind(&try_proto_handler);
{
GotoIf(IsCodeMap(LoadMap(handler)), &call_handler);
HandleLoadICProtoHandlerCase(p, handler, &var_holder, &var_smi_handler,
- &if_smi_handler, miss, false);
+ &if_smi_handler, miss, &direct_exit, false);
}
Bind(&call_handler);
@@ -173,9 +179,9 @@ void AccessorAssemblerImpl::HandleLoadICHandlerCase(
}
}
-void AccessorAssemblerImpl::HandleLoadICSmiHandlerCase(
+void AccessorAssembler::HandleLoadICSmiHandlerCase(
const LoadICParameters* p, Node* holder, Node* smi_handler, Label* miss,
- ElementSupport support_elements) {
+ ExitPoint* exit_point, ElementSupport support_elements) {
Variable var_double_value(this, MachineRepresentation::kFloat64);
Label rebox_double(this, &var_double_value);
@@ -183,7 +189,7 @@ void AccessorAssemblerImpl::HandleLoadICSmiHandlerCase(
Node* handler_kind = DecodeWord<LoadHandler::KindBits>(handler_word);
if (support_elements == kSupportElements) {
Label property(this);
- GotoUnless(
+ GotoIfNot(
WordEqual(handler_kind, IntPtrConstant(LoadHandler::kForElements)),
&property);
@@ -199,7 +205,7 @@ void AccessorAssemblerImpl::HandleLoadICSmiHandlerCase(
EmitElementLoad(holder, elements, elements_kind, intptr_index,
is_jsarray_condition, &if_hole, &rebox_double,
&var_double_value, &unimplemented_elements_kind,
- out_of_bounds, miss);
+ out_of_bounds, miss, exit_point);
Bind(&unimplemented_elements_kind);
{
@@ -212,14 +218,14 @@ void AccessorAssemblerImpl::HandleLoadICSmiHandlerCase(
Bind(&if_hole);
{
Comment("convert hole");
- GotoUnless(IsSetWord<LoadHandler::ConvertHoleBits>(handler_word), miss);
+ GotoIfNot(IsSetWord<LoadHandler::ConvertHoleBits>(handler_word), miss);
Node* protector_cell = LoadRoot(Heap::kArrayProtectorRootIndex);
DCHECK(isolate()->heap()->array_protector()->IsPropertyCell());
- GotoUnless(
+ GotoIfNot(
WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
SmiConstant(Smi::FromInt(Isolate::kProtectorValid))),
miss);
- Return(UndefinedConstant());
+ exit_point->Return(UndefinedConstant());
}
Bind(&property);
@@ -243,7 +249,7 @@ void AccessorAssemblerImpl::HandleLoadICSmiHandlerCase(
{
Label is_double(this);
GotoIf(IsSetWord<LoadHandler::IsDoubleBits>(handler_word), &is_double);
- Return(LoadObjectField(holder, offset));
+ exit_point->Return(LoadObjectField(holder, offset));
Bind(&is_double);
if (FLAG_unbox_double_fields) {
@@ -262,7 +268,7 @@ void AccessorAssemblerImpl::HandleLoadICSmiHandlerCase(
Node* properties = LoadProperties(holder);
Node* value = LoadObjectField(properties, offset);
GotoIf(IsSetWord<LoadHandler::IsDoubleBits>(handler_word), &is_double);
- Return(value);
+ exit_point->Return(value);
Bind(&is_double);
var_double_value.Bind(LoadHeapNumberValue(value));
@@ -270,7 +276,7 @@ void AccessorAssemblerImpl::HandleLoadICSmiHandlerCase(
}
Bind(&rebox_double);
- Return(AllocateHeapNumberWithValue(var_double_value.value()));
+ exit_point->Return(AllocateHeapNumberWithValue(var_double_value.value()));
}
Bind(&constant);
@@ -287,18 +293,19 @@ void AccessorAssemblerImpl::HandleLoadICSmiHandlerCase(
Label if_accessor_info(this);
GotoIf(IsSetWord<LoadHandler::IsAccessorInfoBits>(handler_word),
&if_accessor_info);
- Return(value);
+ exit_point->Return(value);
Bind(&if_accessor_info);
Callable callable = CodeFactory::ApiGetter(isolate());
- TailCallStub(callable, p->context, p->receiver, holder, value);
+ exit_point->ReturnCallStub(callable, p->context, p->receiver, holder,
+ value);
}
}
-void AccessorAssemblerImpl::HandleLoadICProtoHandlerCase(
+void AccessorAssembler::HandleLoadICProtoHandlerCase(
const LoadICParameters* p, Node* handler, Variable* var_holder,
Variable* var_smi_handler, Label* if_smi_handler, Label* miss,
- bool throw_reference_error_if_nonexistent) {
+ ExitPoint* exit_point, bool throw_reference_error_if_nonexistent) {
DCHECK_EQ(MachineRepresentation::kTagged, var_holder->rep());
DCHECK_EQ(MachineRepresentation::kTagged, var_smi_handler->rep());
@@ -327,7 +334,7 @@ void AccessorAssemblerImpl::HandleLoadICProtoHandlerCase(
Node* handler_flags = SmiUntag(smi_handler);
Label check_prototypes(this);
- GotoUnless(
+ GotoIfNot(
IsSetWord<LoadHandler::DoNegativeLookupOnReceiverBits>(handler_flags),
&check_prototypes);
{
@@ -350,9 +357,10 @@ void AccessorAssemblerImpl::HandleLoadICProtoHandlerCase(
GotoIf(WordNotEqual(maybe_holder_cell, NullConstant()), &load_existent);
// This is a handler for a load of a non-existent value.
if (throw_reference_error_if_nonexistent) {
- TailCallRuntime(Runtime::kThrowReferenceError, p->context, p->name);
+ exit_point->ReturnCallRuntime(Runtime::kThrowReferenceError, p->context,
+ p->name);
} else {
- Return(UndefinedConstant());
+ exit_point->Return(UndefinedConstant());
}
Bind(&load_existent);
@@ -368,15 +376,14 @@ void AccessorAssemblerImpl::HandleLoadICProtoHandlerCase(
Bind(&array_handler);
{
- typedef LoadICProtoArrayDescriptor Descriptor;
- LoadICProtoArrayStub stub(isolate(), throw_reference_error_if_nonexistent);
- Node* target = HeapConstant(stub.GetCode());
- TailCallStub(Descriptor(isolate()), target, p->context, p->receiver,
- p->name, p->slot, p->vector, handler);
+ exit_point->ReturnCallStub(
+ CodeFactory::LoadICProtoArray(isolate(),
+ throw_reference_error_if_nonexistent),
+ p->context, p->receiver, p->name, p->slot, p->vector, handler);
}
}
-Node* AccessorAssemblerImpl::EmitLoadICProtoArrayCheck(
+Node* AccessorAssembler::EmitLoadICProtoArrayCheck(
const LoadICParameters* p, Node* handler, Node* handler_length,
Node* handler_flags, Label* miss,
bool throw_reference_error_if_nonexistent) {
@@ -384,8 +391,8 @@ Node* AccessorAssemblerImpl::EmitLoadICProtoArrayCheck(
start_index.Bind(IntPtrConstant(LoadHandler::kFirstPrototypeIndex));
Label can_access(this);
- GotoUnless(IsSetWord<LoadHandler::DoAccessCheckOnReceiverBits>(handler_flags),
- &can_access);
+ GotoIfNot(IsSetWord<LoadHandler::DoAccessCheckOnReceiverBits>(handler_flags),
+ &can_access);
{
// Skip this entry of a handler.
start_index.Bind(IntPtrConstant(LoadHandler::kFirstPrototypeIndex + 1));
@@ -399,7 +406,7 @@ Node* AccessorAssemblerImpl::EmitLoadICProtoArrayCheck(
Node* native_context = LoadNativeContext(p->context);
GotoIf(WordEqual(expected_native_context, native_context), &can_access);
// If the receiver is not a JSGlobalProxy then we miss.
- GotoUnless(IsJSGlobalProxy(p->receiver), miss);
+ GotoIfNot(IsJSGlobalProxy(p->receiver), miss);
// For JSGlobalProxy receiver try to compare security tokens of current
// and expected native contexts.
Node* expected_token = LoadContextElement(expected_native_context,
@@ -410,13 +417,13 @@ Node* AccessorAssemblerImpl::EmitLoadICProtoArrayCheck(
}
Bind(&can_access);
- BuildFastLoop(
- MachineType::PointerRepresentation(), start_index.value(), handler_length,
- [this, p, handler, miss](Node* current) {
- Node* prototype_cell = LoadFixedArrayElement(handler, current);
- CheckPrototype(prototype_cell, p->name, miss);
- },
- 1, IndexAdvanceMode::kPost);
+ BuildFastLoop(start_index.value(), handler_length,
+ [this, p, handler, miss](Node* current) {
+ Node* prototype_cell =
+ LoadFixedArrayElement(handler, current);
+ CheckPrototype(prototype_cell, p->name, miss);
+ },
+ 1, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
Node* maybe_holder_cell =
LoadFixedArrayElement(handler, LoadHandler::kHolderCellIndex);
@@ -438,9 +445,9 @@ Node* AccessorAssemblerImpl::EmitLoadICProtoArrayCheck(
return holder;
}
-void AccessorAssemblerImpl::HandleLoadGlobalICHandlerCase(
+void AccessorAssembler::HandleLoadGlobalICHandlerCase(
const LoadICParameters* pp, Node* handler, Label* miss,
- bool throw_reference_error_if_nonexistent) {
+ ExitPoint* exit_point, bool throw_reference_error_if_nonexistent) {
LoadICParameters p = *pp;
DCHECK_NULL(p.receiver);
Node* native_context = LoadNativeContext(p.context);
@@ -450,14 +457,14 @@ void AccessorAssemblerImpl::HandleLoadGlobalICHandlerCase(
Variable var_smi_handler(this, MachineRepresentation::kTagged);
Label if_smi_handler(this);
HandleLoadICProtoHandlerCase(&p, handler, &var_holder, &var_smi_handler,
- &if_smi_handler, miss,
+ &if_smi_handler, miss, exit_point,
throw_reference_error_if_nonexistent);
Bind(&if_smi_handler);
HandleLoadICSmiHandlerCase(&p, var_holder.value(), var_smi_handler.value(),
- miss, kOnlyProperties);
+ miss, exit_point, kOnlyProperties);
}
-void AccessorAssemblerImpl::HandleStoreICHandlerCase(
+void AccessorAssembler::HandleStoreICHandlerCase(
const StoreICParameters* p, Node* handler, Label* miss,
ElementSupport support_elements) {
Label if_smi_handler(this), if_nonsmi_handler(this);
@@ -504,7 +511,7 @@ void AccessorAssemblerImpl::HandleStoreICHandlerCase(
}
}
-void AccessorAssemblerImpl::HandleStoreICElementHandlerCase(
+void AccessorAssembler::HandleStoreICElementHandlerCase(
const StoreICParameters* p, Node* handler, Label* miss) {
Comment("HandleStoreICElementHandlerCase");
Node* validity_cell = LoadObjectField(handler, Tuple2::kValue1Offset);
@@ -521,8 +528,8 @@ void AccessorAssemblerImpl::HandleStoreICElementHandlerCase(
p->value, p->slot, p->vector);
}
-void AccessorAssemblerImpl::HandleStoreICProtoHandler(
- const StoreICParameters* p, Node* handler, Label* miss) {
+void AccessorAssembler::HandleStoreICProtoHandler(const StoreICParameters* p,
+ Node* handler, Label* miss) {
// IC dispatchers rely on these assumptions to be held.
STATIC_ASSERT(FixedArray::kLengthOffset ==
StoreHandler::kTransitionCellOffset);
@@ -564,14 +571,13 @@ void AccessorAssemblerImpl::HandleStoreICProtoHandler(
Bind(&array_handler);
{
Node* length = SmiUntag(maybe_transition_cell);
- BuildFastLoop(MachineType::PointerRepresentation(),
- IntPtrConstant(StoreHandler::kFirstPrototypeIndex), length,
+ BuildFastLoop(IntPtrConstant(StoreHandler::kFirstPrototypeIndex), length,
[this, p, handler, miss](Node* current) {
Node* prototype_cell =
LoadFixedArrayElement(handler, current);
CheckPrototype(prototype_cell, p->name, miss);
},
- 1, IndexAdvanceMode::kPost);
+ 1, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
Node* maybe_transition_cell =
LoadFixedArrayElement(handler, StoreHandler::kTransitionCellIndex);
@@ -613,11 +619,10 @@ void AccessorAssemblerImpl::HandleStoreICProtoHandler(
}
}
-void AccessorAssemblerImpl::HandleStoreICSmiHandlerCase(Node* handler_word,
- Node* holder,
- Node* value,
- Node* transition,
- Label* miss) {
+void AccessorAssembler::HandleStoreICSmiHandlerCase(Node* handler_word,
+ Node* holder, Node* value,
+ Node* transition,
+ Label* miss) {
Comment(transition ? "transitioning field store" : "field store");
#ifdef DEBUG
@@ -631,8 +636,17 @@ void AccessorAssemblerImpl::HandleStoreICSmiHandlerCase(Node* handler_word,
WordEqual(handler_kind,
IntPtrConstant(StoreHandler::kTransitionToConstant))));
} else {
- CSA_ASSERT(this, WordEqual(handler_kind,
- IntPtrConstant(StoreHandler::kStoreField)));
+ if (FLAG_track_constant_fields) {
+ CSA_ASSERT(
+ this,
+ Word32Or(WordEqual(handler_kind,
+ IntPtrConstant(StoreHandler::kStoreField)),
+ WordEqual(handler_kind,
+ IntPtrConstant(StoreHandler::kStoreConstField))));
+ } else {
+ CSA_ASSERT(this, WordEqual(handler_kind,
+ IntPtrConstant(StoreHandler::kStoreField)));
+ }
}
#endif
@@ -683,9 +697,11 @@ void AccessorAssemblerImpl::HandleStoreICSmiHandlerCase(Node* handler_word,
}
}
-void AccessorAssemblerImpl::HandleStoreFieldAndReturn(
- Node* handler_word, Node* holder, Representation representation,
- Node* value, Node* transition, Label* miss) {
+void AccessorAssembler::HandleStoreFieldAndReturn(Node* handler_word,
+ Node* holder,
+ Representation representation,
+ Node* value, Node* transition,
+ Label* miss) {
bool transition_to_field = transition != nullptr;
Node* prepared_value = PrepareValueForStore(
handler_word, holder, representation, transition, value, miss);
@@ -697,7 +713,7 @@ void AccessorAssemblerImpl::HandleStoreFieldAndReturn(
Bind(&if_inobject);
{
StoreNamedField(handler_word, holder, true, representation, prepared_value,
- transition_to_field);
+ transition_to_field, miss);
if (transition_to_field) {
StoreMap(holder, transition);
}
@@ -708,8 +724,8 @@ void AccessorAssemblerImpl::HandleStoreFieldAndReturn(
{
if (transition_to_field) {
Label storage_extended(this);
- GotoUnless(IsSetWord<StoreHandler::ExtendStorageBits>(handler_word),
- &storage_extended);
+ GotoIfNot(IsSetWord<StoreHandler::ExtendStorageBits>(handler_word),
+ &storage_extended);
Comment("[ Extend storage");
ExtendPropertiesBackingStore(holder);
Comment("] Extend storage");
@@ -719,7 +735,7 @@ void AccessorAssemblerImpl::HandleStoreFieldAndReturn(
}
StoreNamedField(handler_word, holder, false, representation, prepared_value,
- transition_to_field);
+ transition_to_field, miss);
if (transition_to_field) {
StoreMap(holder, transition);
}
@@ -727,16 +743,24 @@ void AccessorAssemblerImpl::HandleStoreFieldAndReturn(
}
}
-Node* AccessorAssemblerImpl::PrepareValueForStore(Node* handler_word,
- Node* holder,
- Representation representation,
- Node* transition, Node* value,
- Label* bailout) {
+Node* AccessorAssembler::PrepareValueForStore(Node* handler_word, Node* holder,
+ Representation representation,
+ Node* transition, Node* value,
+ Label* bailout) {
if (representation.IsDouble()) {
value = TryTaggedToFloat64(value, bailout);
} else if (representation.IsHeapObject()) {
GotoIf(TaggedIsSmi(value), bailout);
+
+ Label done(this);
+ if (FLAG_track_constant_fields && !transition) {
+ // Skip field type check in favor of constant value check when storing
+ // to constant field.
+ GotoIf(WordEqual(DecodeWord<StoreHandler::KindBits>(handler_word),
+ IntPtrConstant(StoreHandler::kStoreConstField)),
+ &done);
+ }
Node* value_index_in_descriptor =
DecodeWord<StoreHandler::DescriptorValueIndexBits>(handler_word);
Node* descriptors =
@@ -744,7 +768,6 @@ Node* AccessorAssemblerImpl::PrepareValueForStore(Node* handler_word,
Node* maybe_field_type =
LoadFixedArrayElement(descriptors, value_index_in_descriptor);
- Label done(this);
GotoIf(TaggedIsSmi(maybe_field_type), &done);
// Check that value type matches the field type.
{
@@ -754,7 +777,7 @@ Node* AccessorAssemblerImpl::PrepareValueForStore(Node* handler_word,
Bind(&done);
} else if (representation.IsSmi()) {
- GotoUnless(TaggedIsSmi(value), bailout);
+ GotoIfNot(TaggedIsSmi(value), bailout);
} else {
DCHECK(representation.IsTagged());
@@ -762,7 +785,7 @@ Node* AccessorAssemblerImpl::PrepareValueForStore(Node* handler_word,
return value;
}
-void AccessorAssemblerImpl::ExtendPropertiesBackingStore(Node* object) {
+void AccessorAssembler::ExtendPropertiesBackingStore(Node* object) {
Node* properties = LoadProperties(object);
Node* length = LoadFixedArrayBaseLength(properties);
@@ -798,11 +821,11 @@ void AccessorAssemblerImpl::ExtendPropertiesBackingStore(Node* object) {
StoreObjectField(object, JSObject::kPropertiesOffset, new_properties);
}
-void AccessorAssemblerImpl::StoreNamedField(Node* handler_word, Node* object,
- bool is_inobject,
- Representation representation,
- Node* value,
- bool transition_to_field) {
+void AccessorAssembler::StoreNamedField(Node* handler_word, Node* object,
+ bool is_inobject,
+ Representation representation,
+ Node* value, bool transition_to_field,
+ Label* bailout) {
bool store_value_as_double = representation.IsDouble();
Node* property_storage = object;
if (!is_inobject) {
@@ -826,6 +849,27 @@ void AccessorAssemblerImpl::StoreNamedField(Node* handler_word, Node* object,
}
}
+ // Do constant value check if necessary.
+ if (FLAG_track_constant_fields && !transition_to_field) {
+ Label done(this);
+ GotoIfNot(WordEqual(DecodeWord<StoreHandler::KindBits>(handler_word),
+ IntPtrConstant(StoreHandler::kStoreConstField)),
+ &done);
+ {
+ if (store_value_as_double) {
+ Node* current_value =
+ LoadObjectField(property_storage, offset, MachineType::Float64());
+ GotoIfNot(Float64Equal(current_value, value), bailout);
+ } else {
+ Node* current_value = LoadObjectField(property_storage, offset);
+ GotoIfNot(WordEqual(current_value, value), bailout);
+ }
+ Goto(&done);
+ }
+ Bind(&done);
+ }
+
+ // Do the store.
if (store_value_as_double) {
StoreObjectFieldNoWriteBarrier(property_storage, offset, value,
MachineRepresentation::kFloat64);
@@ -836,9 +880,11 @@ void AccessorAssemblerImpl::StoreNamedField(Node* handler_word, Node* object,
}
}
-void AccessorAssemblerImpl::EmitFastElementsBoundsCheck(
- Node* object, Node* elements, Node* intptr_index,
- Node* is_jsarray_condition, Label* miss) {
+void AccessorAssembler::EmitFastElementsBoundsCheck(Node* object,
+ Node* elements,
+ Node* intptr_index,
+ Node* is_jsarray_condition,
+ Label* miss) {
Variable var_length(this, MachineType::PointerRepresentation());
Comment("Fast elements bounds check");
Label if_array(this), length_loaded(this, &var_length);
@@ -853,14 +899,14 @@ void AccessorAssemblerImpl::EmitFastElementsBoundsCheck(
Goto(&length_loaded);
}
Bind(&length_loaded);
- GotoUnless(UintPtrLessThan(intptr_index, var_length.value()), miss);
+ GotoIfNot(UintPtrLessThan(intptr_index, var_length.value()), miss);
}
-void AccessorAssemblerImpl::EmitElementLoad(
+void AccessorAssembler::EmitElementLoad(
Node* object, Node* elements, Node* elements_kind, Node* intptr_index,
Node* is_jsarray_condition, Label* if_hole, Label* rebox_double,
Variable* var_double_value, Label* unimplemented_elements_kind,
- Label* out_of_bounds, Label* miss) {
+ Label* out_of_bounds, Label* miss, ExitPoint* exit_point) {
Label if_typed_array(this), if_fast_packed(this), if_fast_holey(this),
if_fast_double(this), if_fast_holey_double(this), if_nonfast(this),
if_dictionary(this);
@@ -892,7 +938,7 @@ void AccessorAssemblerImpl::EmitElementLoad(
Bind(&if_fast_packed);
{
Comment("fast packed elements");
- Return(LoadFixedArrayElement(elements, intptr_index));
+ exit_point->Return(LoadFixedArrayElement(elements, intptr_index));
}
Bind(&if_fast_holey);
@@ -900,7 +946,7 @@ void AccessorAssemblerImpl::EmitElementLoad(
Comment("fast holey elements");
Node* element = LoadFixedArrayElement(elements, intptr_index);
GotoIf(WordEqual(element, TheHoleConstant()), if_hole);
- Return(element);
+ exit_point->Return(element);
}
Bind(&if_fast_double);
@@ -943,16 +989,15 @@ void AccessorAssemblerImpl::EmitElementLoad(
elements, intptr_index, &if_found, &var_entry, if_hole);
Bind(&if_found);
// Check that the value is a data property.
- Node* details_index = EntryToIndex<SeededNumberDictionary>(
- var_entry.value(), SeededNumberDictionary::kEntryDetailsIndex);
- Node* details = SmiToWord32(LoadFixedArrayElement(elements, details_index));
+ Node* index = EntryToIndex<SeededNumberDictionary>(var_entry.value());
+ Node* details =
+ LoadDetailsByKeyIndex<SeededNumberDictionary>(elements, index);
Node* kind = DecodeWord32<PropertyDetails::KindField>(details);
// TODO(jkummerow): Support accessors without missing?
- GotoUnless(Word32Equal(kind, Int32Constant(kData)), miss);
+ GotoIfNot(Word32Equal(kind, Int32Constant(kData)), miss);
// Finally, load the value.
- Node* value_index = EntryToIndex<SeededNumberDictionary>(
- var_entry.value(), SeededNumberDictionary::kEntryValueIndex);
- Return(LoadFixedArrayElement(elements, value_index));
+ exit_point->Return(
+ LoadValueByKeyIndex<SeededNumberDictionary>(elements, index));
}
Bind(&if_typed_array);
@@ -965,7 +1010,7 @@ void AccessorAssemblerImpl::EmitElementLoad(
// Bounds check.
Node* length =
SmiUntag(LoadObjectField(object, JSTypedArray::kLengthOffset));
- GotoUnless(UintPtrLessThan(intptr_index, length), out_of_bounds);
+ GotoIfNot(UintPtrLessThan(intptr_index, length), out_of_bounds);
// Backing store = external_pointer + base_pointer.
Node* external_pointer =
@@ -998,41 +1043,41 @@ void AccessorAssemblerImpl::EmitElementLoad(
{
Comment("UINT8_ELEMENTS"); // Handles UINT8_CLAMPED_ELEMENTS too.
Node* element = Load(MachineType::Uint8(), backing_store, intptr_index);
- Return(SmiFromWord32(element));
+ exit_point->Return(SmiFromWord32(element));
}
Bind(&int8_elements);
{
Comment("INT8_ELEMENTS");
Node* element = Load(MachineType::Int8(), backing_store, intptr_index);
- Return(SmiFromWord32(element));
+ exit_point->Return(SmiFromWord32(element));
}
Bind(&uint16_elements);
{
Comment("UINT16_ELEMENTS");
Node* index = WordShl(intptr_index, IntPtrConstant(1));
Node* element = Load(MachineType::Uint16(), backing_store, index);
- Return(SmiFromWord32(element));
+ exit_point->Return(SmiFromWord32(element));
}
Bind(&int16_elements);
{
Comment("INT16_ELEMENTS");
Node* index = WordShl(intptr_index, IntPtrConstant(1));
Node* element = Load(MachineType::Int16(), backing_store, index);
- Return(SmiFromWord32(element));
+ exit_point->Return(SmiFromWord32(element));
}
Bind(&uint32_elements);
{
Comment("UINT32_ELEMENTS");
Node* index = WordShl(intptr_index, IntPtrConstant(2));
Node* element = Load(MachineType::Uint32(), backing_store, index);
- Return(ChangeUint32ToTagged(element));
+ exit_point->Return(ChangeUint32ToTagged(element));
}
Bind(&int32_elements);
{
Comment("INT32_ELEMENTS");
Node* index = WordShl(intptr_index, IntPtrConstant(2));
Node* element = Load(MachineType::Int32(), backing_store, index);
- Return(ChangeInt32ToTagged(element));
+ exit_point->Return(ChangeInt32ToTagged(element));
}
Bind(&float32_elements);
{
@@ -1053,8 +1098,8 @@ void AccessorAssemblerImpl::EmitElementLoad(
}
}
-void AccessorAssemblerImpl::CheckPrototype(Node* prototype_cell, Node* name,
- Label* miss) {
+void AccessorAssembler::CheckPrototype(Node* prototype_cell, Node* name,
+ Label* miss) {
Node* maybe_prototype = LoadWeakCellValue(prototype_cell, miss);
Label done(this);
@@ -1083,9 +1128,8 @@ void AccessorAssemblerImpl::CheckPrototype(Node* prototype_cell, Node* name,
Bind(&done);
}
-void AccessorAssemblerImpl::NameDictionaryNegativeLookup(Node* object,
- Node* name,
- Label* miss) {
+void AccessorAssembler::NameDictionaryNegativeLookup(Node* object, Node* name,
+ Label* miss) {
CSA_ASSERT(this, IsDictionaryMap(LoadMap(object)));
Node* properties = LoadProperties(object);
// Ensure the property does not exist in a dictionary-mode object.
@@ -1096,14 +1140,199 @@ void AccessorAssemblerImpl::NameDictionaryNegativeLookup(Node* object,
Bind(&done);
}
+void AccessorAssembler::GenericElementLoad(Node* receiver, Node* receiver_map,
+ Node* instance_type, Node* index,
+ Label* slow) {
+ Comment("integer index");
+
+ ExitPoint direct_exit(this);
+
+ Label if_element_hole(this), if_oob(this);
+ // Receivers requiring non-standard element accesses (interceptors, access
+ // checks, strings and string wrappers, proxies) are handled in the runtime.
+ GotoIf(Int32LessThanOrEqual(instance_type,
+ Int32Constant(LAST_CUSTOM_ELEMENTS_RECEIVER)),
+ slow);
+ Node* elements = LoadElements(receiver);
+ Node* elements_kind = LoadMapElementsKind(receiver_map);
+ Node* is_jsarray_condition =
+ Word32Equal(instance_type, Int32Constant(JS_ARRAY_TYPE));
+ Variable var_double_value(this, MachineRepresentation::kFloat64);
+ Label rebox_double(this, &var_double_value);
+
+ // Unimplemented elements kinds fall back to a runtime call.
+ Label* unimplemented_elements_kind = slow;
+ IncrementCounter(isolate()->counters()->ic_keyed_load_generic_smi(), 1);
+ EmitElementLoad(receiver, elements, elements_kind, index,
+ is_jsarray_condition, &if_element_hole, &rebox_double,
+ &var_double_value, unimplemented_elements_kind, &if_oob, slow,
+ &direct_exit);
+
+ Bind(&rebox_double);
+ Return(AllocateHeapNumberWithValue(var_double_value.value()));
+
+ Bind(&if_oob);
+ {
+ Comment("out of bounds");
+ // Negative keys can't take the fast OOB path.
+ GotoIf(IntPtrLessThan(index, IntPtrConstant(0)), slow);
+ // Positive OOB indices are effectively the same as hole loads.
+ Goto(&if_element_hole);
+ }
+
+ Bind(&if_element_hole);
+ {
+ Comment("found the hole");
+ Label return_undefined(this);
+ BranchIfPrototypesHaveNoElements(receiver_map, &return_undefined, slow);
+
+ Bind(&return_undefined);
+ Return(UndefinedConstant());
+ }
+}
+
+void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
+ Node* instance_type, Node* key,
+ const LoadICParameters* p,
+ Label* slow) {
+ Comment("key is unique name");
+ Label if_found_on_receiver(this), if_property_dictionary(this),
+ lookup_prototype_chain(this);
+ Variable var_details(this, MachineRepresentation::kWord32);
+ Variable var_value(this, MachineRepresentation::kTagged);
+
+ // Receivers requiring non-standard accesses (interceptors, access
+ // checks, strings and string wrappers, proxies) are handled in the runtime.
+ GotoIf(Int32LessThanOrEqual(instance_type,
+ Int32Constant(LAST_SPECIAL_RECEIVER_TYPE)),
+ slow);
+
+ // Check if the receiver has fast or slow properties.
+ Node* properties = LoadProperties(receiver);
+ Node* properties_map = LoadMap(properties);
+ GotoIf(WordEqual(properties_map, LoadRoot(Heap::kHashTableMapRootIndex)),
+ &if_property_dictionary);
+
+ // Try looking up the property on the receiver; if unsuccessful, look
+ // for a handler in the stub cache.
+ Node* bitfield3 = LoadMapBitField3(receiver_map);
+ Node* descriptors = LoadMapDescriptors(receiver_map);
+
+ Label if_descriptor_found(this), stub_cache(this);
+ Variable var_name_index(this, MachineType::PointerRepresentation());
+ DescriptorLookup(key, descriptors, bitfield3, &if_descriptor_found,
+ &var_name_index, &stub_cache);
+
+ Bind(&if_descriptor_found);
+ {
+ LoadPropertyFromFastObject(receiver, receiver_map, descriptors,
+ var_name_index.value(), &var_details,
+ &var_value);
+ Goto(&if_found_on_receiver);
+ }
+
+ Bind(&stub_cache);
+ {
+ Comment("stub cache probe for fast property load");
+ Variable var_handler(this, MachineRepresentation::kTagged);
+ Label found_handler(this, &var_handler), stub_cache_miss(this);
+ TryProbeStubCache(isolate()->load_stub_cache(), receiver, key,
+ &found_handler, &var_handler, &stub_cache_miss);
+ Bind(&found_handler);
+ { HandleLoadICHandlerCase(p, var_handler.value(), slow); }
+
+ Bind(&stub_cache_miss);
+ {
+ // TODO(jkummerow): Check if the property exists on the prototype
+ // chain. If it doesn't, then there's no point in missing.
+ Comment("KeyedLoadGeneric_miss");
+ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, p->context, p->receiver,
+ p->name, p->slot, p->vector);
+ }
+ }
+
+ Bind(&if_property_dictionary);
+ {
+ Comment("dictionary property load");
+ // We checked for LAST_CUSTOM_ELEMENTS_RECEIVER before, which rules out
+ // seeing global objects here (which would need special handling).
+
+ Variable var_name_index(this, MachineType::PointerRepresentation());
+ Label dictionary_found(this, &var_name_index);
+ NameDictionaryLookup<NameDictionary>(properties, key, &dictionary_found,
+ &var_name_index,
+ &lookup_prototype_chain);
+ Bind(&dictionary_found);
+ {
+ LoadPropertyFromNameDictionary(properties, var_name_index.value(),
+ &var_details, &var_value);
+ Goto(&if_found_on_receiver);
+ }
+ }
+
+ Bind(&if_found_on_receiver);
+ {
+ Node* value = CallGetterIfAccessor(var_value.value(), var_details.value(),
+ p->context, receiver, slow);
+ IncrementCounter(isolate()->counters()->ic_keyed_load_generic_symbol(), 1);
+ Return(value);
+ }
+
+ Bind(&lookup_prototype_chain);
+ {
+ Variable var_holder_map(this, MachineRepresentation::kTagged);
+ Variable var_holder_instance_type(this, MachineRepresentation::kWord32);
+ Label return_undefined(this);
+ Variable* merged_variables[] = {&var_holder_map, &var_holder_instance_type};
+ Label loop(this, arraysize(merged_variables), merged_variables);
+
+ var_holder_map.Bind(receiver_map);
+ var_holder_instance_type.Bind(instance_type);
+ // Private symbols must not be looked up on the prototype chain.
+ GotoIf(IsPrivateSymbol(key), &return_undefined);
+ Goto(&loop);
+ Bind(&loop);
+ {
+ // Bailout if it can be an integer indexed exotic case.
+ GotoIf(Word32Equal(var_holder_instance_type.value(),
+ Int32Constant(JS_TYPED_ARRAY_TYPE)),
+ slow);
+ Node* proto = LoadMapPrototype(var_holder_map.value());
+ GotoIf(WordEqual(proto, NullConstant()), &return_undefined);
+ Node* proto_map = LoadMap(proto);
+ Node* proto_instance_type = LoadMapInstanceType(proto_map);
+ var_holder_map.Bind(proto_map);
+ var_holder_instance_type.Bind(proto_instance_type);
+ Label next_proto(this), return_value(this, &var_value), goto_slow(this);
+ TryGetOwnProperty(p->context, receiver, proto, proto_map,
+ proto_instance_type, key, &return_value, &var_value,
+ &next_proto, &goto_slow);
+
+ // This trampoline and the next are required to appease Turbofan's
+ // variable merging.
+ Bind(&next_proto);
+ Goto(&loop);
+
+ Bind(&goto_slow);
+ Goto(slow);
+
+ Bind(&return_value);
+ Return(var_value.value());
+ }
+
+ Bind(&return_undefined);
+ Return(UndefinedConstant());
+ }
+}
+
//////////////////// Stub cache access helpers.
-enum AccessorAssemblerImpl::StubCacheTable : int {
+enum AccessorAssembler::StubCacheTable : int {
kPrimary = static_cast<int>(StubCache::kPrimary),
kSecondary = static_cast<int>(StubCache::kSecondary)
};
-Node* AccessorAssemblerImpl::StubCachePrimaryOffset(Node* name, Node* map) {
+Node* AccessorAssembler::StubCachePrimaryOffset(Node* name, Node* map) {
// See v8::internal::StubCache::PrimaryOffset().
STATIC_ASSERT(StubCache::kCacheIndexShift == Name::kHashShift);
// Compute the hash of the name (use entire hash field).
@@ -1125,7 +1354,7 @@ Node* AccessorAssemblerImpl::StubCachePrimaryOffset(Node* name, Node* map) {
return ChangeUint32ToWord(Word32And(hash, Int32Constant(mask)));
}
-Node* AccessorAssemblerImpl::StubCacheSecondaryOffset(Node* name, Node* seed) {
+Node* AccessorAssembler::StubCacheSecondaryOffset(Node* name, Node* seed) {
// See v8::internal::StubCache::SecondaryOffset().
// Use the seed from the primary cache in the secondary cache.
@@ -1137,10 +1366,12 @@ Node* AccessorAssemblerImpl::StubCacheSecondaryOffset(Node* name, Node* seed) {
return ChangeUint32ToWord(Word32And(hash, Int32Constant(mask)));
}
-void AccessorAssemblerImpl::TryProbeStubCacheTable(
- StubCache* stub_cache, StubCacheTable table_id, Node* entry_offset,
- Node* name, Node* map, Label* if_handler, Variable* var_handler,
- Label* if_miss) {
+void AccessorAssembler::TryProbeStubCacheTable(StubCache* stub_cache,
+ StubCacheTable table_id,
+ Node* entry_offset, Node* name,
+ Node* map, Label* if_handler,
+ Variable* var_handler,
+ Label* if_miss) {
StubCache::Table table = static_cast<StubCache::Table>(table_id);
#ifdef DEBUG
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
@@ -1180,11 +1411,10 @@ void AccessorAssemblerImpl::TryProbeStubCacheTable(
Goto(if_handler);
}
-void AccessorAssemblerImpl::TryProbeStubCache(StubCache* stub_cache,
- Node* receiver, Node* name,
- Label* if_handler,
- Variable* var_handler,
- Label* if_miss) {
+void AccessorAssembler::TryProbeStubCache(StubCache* stub_cache, Node* receiver,
+ Node* name, Label* if_handler,
+ Variable* var_handler,
+ Label* if_miss) {
Label try_secondary(this), miss(this);
Counters* counters = isolate()->counters();
@@ -1217,7 +1447,7 @@ void AccessorAssemblerImpl::TryProbeStubCache(StubCache* stub_cache,
//////////////////// Entry points into private implementation (one per stub).
-void AccessorAssemblerImpl::LoadIC(const LoadICParameters* p) {
+void AccessorAssembler::LoadIC(const LoadICParameters* p) {
Variable var_handler(this, MachineRepresentation::kTagged);
// TODO(ishell): defer blocks when it works.
Label if_handler(this, &var_handler), try_polymorphic(this),
@@ -1238,8 +1468,8 @@ void AccessorAssemblerImpl::LoadIC(const LoadICParameters* p) {
{
// Check polymorphic case.
Comment("LoadIC_try_polymorphic");
- GotoUnless(WordEqual(LoadMap(feedback), FixedArrayMapConstant()),
- &try_megamorphic);
+ GotoIfNot(WordEqual(LoadMap(feedback), FixedArrayMapConstant()),
+ &try_megamorphic);
HandlePolymorphicCase(receiver_map, feedback, &if_handler, &var_handler,
&miss, 2);
}
@@ -1247,9 +1477,8 @@ void AccessorAssemblerImpl::LoadIC(const LoadICParameters* p) {
Bind(&try_megamorphic);
{
// Check megamorphic case.
- GotoUnless(
- WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
- &miss);
+ GotoIfNot(WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
+ &miss);
TryProbeStubCache(isolate()->load_stub_cache(), p->receiver, p->name,
&if_handler, &var_handler, &miss);
@@ -1261,13 +1490,15 @@ void AccessorAssemblerImpl::LoadIC(const LoadICParameters* p) {
}
}
-void AccessorAssemblerImpl::LoadICProtoArray(
+void AccessorAssembler::LoadICProtoArray(
const LoadICParameters* p, Node* handler,
bool throw_reference_error_if_nonexistent) {
Label miss(this);
CSA_ASSERT(this, Word32BinaryNot(TaggedIsSmi(handler)));
CSA_ASSERT(this, IsFixedArrayMap(LoadMap(handler)));
+ ExitPoint direct_exit(this);
+
Node* smi_handler = LoadObjectField(handler, LoadHandler::kSmiHandlerOffset);
Node* handler_flags = SmiUntag(smi_handler);
@@ -1277,7 +1508,8 @@ void AccessorAssemblerImpl::LoadICProtoArray(
EmitLoadICProtoArrayCheck(p, handler, handler_length, handler_flags,
&miss, throw_reference_error_if_nonexistent);
- HandleLoadICSmiHandlerCase(p, holder, smi_handler, &miss, kOnlyProperties);
+ HandleLoadICSmiHandlerCase(p, holder, smi_handler, &miss, &direct_exit,
+ kOnlyProperties);
Bind(&miss);
{
@@ -1286,36 +1518,41 @@ void AccessorAssemblerImpl::LoadICProtoArray(
}
}
-void AccessorAssemblerImpl::LoadGlobalIC(const LoadICParameters* p,
- TypeofMode typeof_mode) {
- Label try_handler(this), call_handler(this), miss(this);
- Node* weak_cell =
- LoadFixedArrayElement(p->vector, p->slot, 0, SMI_PARAMETERS);
+void AccessorAssembler::LoadGlobalIC_TryPropertyCellCase(
+ Node* vector, Node* slot, ExitPoint* exit_point, Label* try_handler,
+ Label* miss, ParameterMode slot_mode) {
+ Comment("LoadGlobalIC_TryPropertyCellCase");
+
+ Node* weak_cell = LoadFixedArrayElement(vector, slot, 0, slot_mode);
CSA_ASSERT(this, HasInstanceType(weak_cell, WEAK_CELL_TYPE));
// Load value or try handler case if the {weak_cell} is cleared.
- Node* property_cell = LoadWeakCellValue(weak_cell, &try_handler);
+ Node* property_cell = LoadWeakCellValue(weak_cell, try_handler);
CSA_ASSERT(this, HasInstanceType(property_cell, PROPERTY_CELL_TYPE));
Node* value = LoadObjectField(property_cell, PropertyCell::kValueOffset);
- GotoIf(WordEqual(value, TheHoleConstant()), &miss);
- Return(value);
+ GotoIf(WordEqual(value, TheHoleConstant()), miss);
+ exit_point->Return(value);
+}
- Node* handler;
- Bind(&try_handler);
- {
- handler =
- LoadFixedArrayElement(p->vector, p->slot, kPointerSize, SMI_PARAMETERS);
- CSA_ASSERT(this, Word32BinaryNot(TaggedIsSmi(handler)));
- GotoIf(WordEqual(handler, LoadRoot(Heap::kuninitialized_symbolRootIndex)),
- &miss);
- GotoIf(IsCodeMap(LoadMap(handler)), &call_handler);
+void AccessorAssembler::LoadGlobalIC_TryHandlerCase(const LoadICParameters* p,
+ TypeofMode typeof_mode,
+ ExitPoint* exit_point,
+ Label* miss) {
+ Comment("LoadGlobalIC_TryHandlerCase");
- bool throw_reference_error_if_nonexistent =
- typeof_mode == NOT_INSIDE_TYPEOF;
- HandleLoadGlobalICHandlerCase(p, handler, &miss,
- throw_reference_error_if_nonexistent);
- }
+ Label call_handler(this);
+
+ Node* handler =
+ LoadFixedArrayElement(p->vector, p->slot, kPointerSize, SMI_PARAMETERS);
+ CSA_ASSERT(this, Word32BinaryNot(TaggedIsSmi(handler)));
+ GotoIf(WordEqual(handler, LoadRoot(Heap::kuninitialized_symbolRootIndex)),
+ miss);
+ GotoIf(IsCodeMap(LoadMap(handler)), &call_handler);
+
+ bool throw_reference_error_if_nonexistent = typeof_mode == NOT_INSIDE_TYPEOF;
+ HandleLoadGlobalICHandlerCase(p, handler, miss, exit_point,
+ throw_reference_error_if_nonexistent);
Bind(&call_handler);
{
@@ -1323,17 +1560,35 @@ void AccessorAssemblerImpl::LoadGlobalIC(const LoadICParameters* p,
Node* native_context = LoadNativeContext(p->context);
Node* receiver =
LoadContextElement(native_context, Context::EXTENSION_INDEX);
- TailCallStub(descriptor, handler, p->context, receiver, p->name, p->slot,
- p->vector);
+ exit_point->ReturnCallStub(descriptor, handler, p->context, receiver,
+ p->name, p->slot, p->vector);
}
+}
+
+void AccessorAssembler::LoadGlobalIC_MissCase(const LoadICParameters* p,
+ ExitPoint* exit_point) {
+ Comment("LoadGlobalIC_MissCase");
+
+ exit_point->ReturnCallRuntime(Runtime::kLoadGlobalIC_Miss, p->context,
+ p->name, p->slot, p->vector);
+}
+
+void AccessorAssembler::LoadGlobalIC(const LoadICParameters* p,
+ TypeofMode typeof_mode) {
+ ExitPoint direct_exit(this);
+
+ Label try_handler(this), miss(this);
+ LoadGlobalIC_TryPropertyCellCase(p->vector, p->slot, &direct_exit,
+ &try_handler, &miss);
+
+ Bind(&try_handler);
+ LoadGlobalIC_TryHandlerCase(p, typeof_mode, &direct_exit, &miss);
+
Bind(&miss);
- {
- TailCallRuntime(Runtime::kLoadGlobalIC_Miss, p->context, p->name, p->slot,
- p->vector);
- }
+ LoadGlobalIC_MissCase(p, &direct_exit);
}
-void AccessorAssemblerImpl::KeyedLoadIC(const LoadICParameters* p) {
+void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p) {
Variable var_handler(this, MachineRepresentation::kTagged);
// TODO(ishell): defer blocks when it works.
Label if_handler(this, &var_handler), try_polymorphic(this),
@@ -1355,8 +1610,8 @@ void AccessorAssemblerImpl::KeyedLoadIC(const LoadICParameters* p) {
{
// Check polymorphic case.
Comment("KeyedLoadIC_try_polymorphic");
- GotoUnless(WordEqual(LoadMap(feedback), FixedArrayMapConstant()),
- &try_megamorphic);
+ GotoIfNot(WordEqual(LoadMap(feedback), FixedArrayMapConstant()),
+ &try_megamorphic);
HandlePolymorphicCase(receiver_map, feedback, &if_handler, &var_handler,
&miss, 2);
}
@@ -1365,9 +1620,8 @@ void AccessorAssemblerImpl::KeyedLoadIC(const LoadICParameters* p) {
{
// Check megamorphic case.
Comment("KeyedLoadIC_try_megamorphic");
- GotoUnless(
- WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
- &try_polymorphic_name);
+ GotoIfNot(WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
+ &try_polymorphic_name);
// TODO(jkummerow): Inline this? Or some of it?
TailCallStub(CodeFactory::KeyedLoadIC_Megamorphic(isolate()), p->context,
p->receiver, p->name, p->slot, p->vector);
@@ -1376,7 +1630,7 @@ void AccessorAssemblerImpl::KeyedLoadIC(const LoadICParameters* p) {
{
// We might have a name in feedback, and a fixed array in the next slot.
Comment("KeyedLoadIC_try_polymorphic_name");
- GotoUnless(WordEqual(feedback, p->name), &miss);
+ GotoIfNot(WordEqual(feedback, p->name), &miss);
// If the name comparison succeeded, we know we have a fixed array with
// at least one map/handler pair.
Node* offset = ElementOffsetFromIndex(
@@ -1394,150 +1648,30 @@ void AccessorAssemblerImpl::KeyedLoadIC(const LoadICParameters* p) {
}
}
-void AccessorAssemblerImpl::KeyedLoadICGeneric(const LoadICParameters* p) {
+void AccessorAssembler::KeyedLoadICGeneric(const LoadICParameters* p) {
Variable var_index(this, MachineType::PointerRepresentation());
- Variable var_details(this, MachineRepresentation::kWord32);
- Variable var_value(this, MachineRepresentation::kTagged);
- Label if_index(this), if_unique_name(this), if_element_hole(this),
- if_oob(this), slow(this), stub_cache_miss(this),
- if_property_dictionary(this), if_found_on_receiver(this);
+ Variable var_unique(this, MachineRepresentation::kTagged);
+ var_unique.Bind(p->name); // Dummy initialization.
+ Label if_index(this), if_unique_name(this), slow(this);
Node* receiver = p->receiver;
GotoIf(TaggedIsSmi(receiver), &slow);
Node* receiver_map = LoadMap(receiver);
Node* instance_type = LoadMapInstanceType(receiver_map);
- // Receivers requiring non-standard element accesses (interceptors, access
- // checks, strings and string wrappers, proxies) are handled in the runtime.
- GotoIf(Int32LessThanOrEqual(instance_type,
- Int32Constant(LAST_CUSTOM_ELEMENTS_RECEIVER)),
- &slow);
- Node* key = p->name;
- TryToName(key, &if_index, &var_index, &if_unique_name, &slow);
+ TryToName(p->name, &if_index, &var_index, &if_unique_name, &var_unique,
+ &slow);
Bind(&if_index);
{
- Comment("integer index");
- Node* index = var_index.value();
- Node* elements = LoadElements(receiver);
- Node* elements_kind = LoadMapElementsKind(receiver_map);
- Node* is_jsarray_condition =
- Word32Equal(instance_type, Int32Constant(JS_ARRAY_TYPE));
- Variable var_double_value(this, MachineRepresentation::kFloat64);
- Label rebox_double(this, &var_double_value);
-
- // Unimplemented elements kinds fall back to a runtime call.
- Label* unimplemented_elements_kind = &slow;
- IncrementCounter(isolate()->counters()->ic_keyed_load_generic_smi(), 1);
- EmitElementLoad(receiver, elements, elements_kind, index,
- is_jsarray_condition, &if_element_hole, &rebox_double,
- &var_double_value, unimplemented_elements_kind, &if_oob,
- &slow);
-
- Bind(&rebox_double);
- Return(AllocateHeapNumberWithValue(var_double_value.value()));
- }
-
- Bind(&if_oob);
- {
- Comment("out of bounds");
- Node* index = var_index.value();
- // Negative keys can't take the fast OOB path.
- GotoIf(IntPtrLessThan(index, IntPtrConstant(0)), &slow);
- // Positive OOB indices are effectively the same as hole loads.
- Goto(&if_element_hole);
- }
-
- Bind(&if_element_hole);
- {
- Comment("found the hole");
- Label return_undefined(this);
- BranchIfPrototypesHaveNoElements(receiver_map, &return_undefined, &slow);
-
- Bind(&return_undefined);
- Return(UndefinedConstant());
+ GenericElementLoad(receiver, receiver_map, instance_type, var_index.value(),
+ &slow);
}
- Node* properties = nullptr;
Bind(&if_unique_name);
{
- Comment("key is unique name");
- // Check if the receiver has fast or slow properties.
- properties = LoadProperties(receiver);
- Node* properties_map = LoadMap(properties);
- GotoIf(WordEqual(properties_map, LoadRoot(Heap::kHashTableMapRootIndex)),
- &if_property_dictionary);
-
- // Try looking up the property on the receiver; if unsuccessful, look
- // for a handler in the stub cache.
- Comment("DescriptorArray lookup");
-
- // Skip linear search if there are too many descriptors.
- // TODO(jkummerow): Consider implementing binary search.
- // See also TryLookupProperty() which has the same limitation.
- const int32_t kMaxLinear = 210;
- Label stub_cache(this);
- Node* bitfield3 = LoadMapBitField3(receiver_map);
- Node* nof =
- DecodeWordFromWord32<Map::NumberOfOwnDescriptorsBits>(bitfield3);
- GotoIf(UintPtrLessThan(IntPtrConstant(kMaxLinear), nof), &stub_cache);
- Node* descriptors = LoadMapDescriptors(receiver_map);
- Variable var_name_index(this, MachineType::PointerRepresentation());
- Label if_descriptor_found(this);
- DescriptorLookupLinear(key, descriptors, nof, &if_descriptor_found,
- &var_name_index, &stub_cache);
-
- Bind(&if_descriptor_found);
- {
- LoadPropertyFromFastObject(receiver, receiver_map, descriptors,
- var_name_index.value(), &var_details,
- &var_value);
- Goto(&if_found_on_receiver);
- }
-
- Bind(&stub_cache);
- {
- Comment("stub cache probe for fast property load");
- Variable var_handler(this, MachineRepresentation::kTagged);
- Label found_handler(this, &var_handler), stub_cache_miss(this);
- TryProbeStubCache(isolate()->load_stub_cache(), receiver, key,
- &found_handler, &var_handler, &stub_cache_miss);
- Bind(&found_handler);
- { HandleLoadICHandlerCase(p, var_handler.value(), &slow); }
-
- Bind(&stub_cache_miss);
- {
- Comment("KeyedLoadGeneric_miss");
- TailCallRuntime(Runtime::kKeyedLoadIC_Miss, p->context, p->receiver,
- p->name, p->slot, p->vector);
- }
- }
- }
-
- Bind(&if_property_dictionary);
- {
- Comment("dictionary property load");
- // We checked for LAST_CUSTOM_ELEMENTS_RECEIVER before, which rules out
- // seeing global objects here (which would need special handling).
-
- Variable var_name_index(this, MachineType::PointerRepresentation());
- Label dictionary_found(this, &var_name_index);
- NameDictionaryLookup<NameDictionary>(properties, key, &dictionary_found,
- &var_name_index, &slow);
- Bind(&dictionary_found);
- {
- LoadPropertyFromNameDictionary(properties, var_name_index.value(),
- &var_details, &var_value);
- Goto(&if_found_on_receiver);
- }
- }
-
- Bind(&if_found_on_receiver);
- {
- Node* value = CallGetterIfAccessor(var_value.value(), var_details.value(),
- p->context, receiver, &slow);
- IncrementCounter(isolate()->counters()->ic_keyed_load_generic_symbol(), 1);
- Return(value);
+ GenericPropertyLoad(receiver, receiver_map, instance_type,
+ var_unique.value(), p, &slow);
}
Bind(&slow);
@@ -1550,7 +1684,7 @@ void AccessorAssemblerImpl::KeyedLoadICGeneric(const LoadICParameters* p) {
}
}
-void AccessorAssemblerImpl::StoreIC(const StoreICParameters* p) {
+void AccessorAssembler::StoreIC(const StoreICParameters* p) {
Variable var_handler(this, MachineRepresentation::kTagged);
// TODO(ishell): defer blocks when it works.
Label if_handler(this, &var_handler), try_polymorphic(this),
@@ -1574,7 +1708,7 @@ void AccessorAssemblerImpl::StoreIC(const StoreICParameters* p) {
{
// Check polymorphic case.
Comment("StoreIC_try_polymorphic");
- GotoUnless(
+ GotoIfNot(
WordEqual(LoadMap(feedback), LoadRoot(Heap::kFixedArrayMapRootIndex)),
&try_megamorphic);
HandlePolymorphicCase(receiver_map, feedback, &if_handler, &var_handler,
@@ -1584,9 +1718,8 @@ void AccessorAssemblerImpl::StoreIC(const StoreICParameters* p) {
Bind(&try_megamorphic);
{
// Check megamorphic case.
- GotoUnless(
- WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
- &miss);
+ GotoIfNot(WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
+ &miss);
TryProbeStubCache(isolate()->store_stub_cache(), p->receiver, p->name,
&if_handler, &var_handler, &miss);
@@ -1598,8 +1731,8 @@ void AccessorAssemblerImpl::StoreIC(const StoreICParameters* p) {
}
}
-void AccessorAssemblerImpl::KeyedStoreIC(const StoreICParameters* p,
- LanguageMode language_mode) {
+void AccessorAssembler::KeyedStoreIC(const StoreICParameters* p,
+ LanguageMode language_mode) {
// TODO(ishell): defer blocks when it works.
Label miss(this /*, Label::kDeferred*/);
{
@@ -1627,7 +1760,7 @@ void AccessorAssemblerImpl::KeyedStoreIC(const StoreICParameters* p,
{
// CheckPolymorphic case.
Comment("KeyedStoreIC_try_polymorphic");
- GotoUnless(
+ GotoIfNot(
WordEqual(LoadMap(feedback), LoadRoot(Heap::kFixedArrayMapRootIndex)),
&try_megamorphic);
Label if_transition_handler(this);
@@ -1643,7 +1776,7 @@ void AccessorAssemblerImpl::KeyedStoreIC(const StoreICParameters* p,
Label call_handler(this);
Variable var_code_handler(this, MachineRepresentation::kTagged);
var_code_handler.Bind(handler);
- GotoUnless(IsTuple2Map(LoadMap(handler)), &call_handler);
+ GotoIfNot(IsTuple2Map(LoadMap(handler)), &call_handler);
{
CSA_ASSERT(this, IsTuple2Map(LoadMap(handler)));
@@ -1677,7 +1810,7 @@ void AccessorAssemblerImpl::KeyedStoreIC(const StoreICParameters* p,
{
// Check megamorphic case.
Comment("KeyedStoreIC_try_megamorphic");
- GotoUnless(
+ GotoIfNot(
WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
&try_polymorphic_name);
TailCallStub(
@@ -1689,7 +1822,7 @@ void AccessorAssemblerImpl::KeyedStoreIC(const StoreICParameters* p,
{
// We might have a name in feedback, and a fixed array in the next slot.
Comment("KeyedStoreIC_try_polymorphic_name");
- GotoUnless(WordEqual(feedback, p->name), &miss);
+ GotoIfNot(WordEqual(feedback, p->name), &miss);
// If the name comparison succeeded, we know we have a FixedArray with
// at least one map/handler pair.
Node* offset = ElementOffsetFromIndex(
@@ -1710,7 +1843,7 @@ void AccessorAssemblerImpl::KeyedStoreIC(const StoreICParameters* p,
//////////////////// Public methods.
-void AccessorAssemblerImpl::GenerateLoadIC() {
+void AccessorAssembler::GenerateLoadIC() {
typedef LoadWithVectorDescriptor Descriptor;
Node* receiver = Parameter(Descriptor::kReceiver);
@@ -1723,7 +1856,7 @@ void AccessorAssemblerImpl::GenerateLoadIC() {
LoadIC(&p);
}
-void AccessorAssemblerImpl::GenerateLoadICTrampoline() {
+void AccessorAssembler::GenerateLoadICTrampoline() {
typedef LoadDescriptor Descriptor;
Node* receiver = Parameter(Descriptor::kReceiver);
@@ -1736,9 +1869,9 @@ void AccessorAssemblerImpl::GenerateLoadICTrampoline() {
LoadIC(&p);
}
-void AccessorAssemblerImpl::GenerateLoadICProtoArray(
+void AccessorAssembler::GenerateLoadICProtoArray(
bool throw_reference_error_if_nonexistent) {
- typedef LoadICProtoArrayStub::Descriptor Descriptor;
+ typedef LoadICProtoArrayDescriptor Descriptor;
Node* receiver = Parameter(Descriptor::kReceiver);
Node* name = Parameter(Descriptor::kName);
@@ -1751,8 +1884,8 @@ void AccessorAssemblerImpl::GenerateLoadICProtoArray(
LoadICProtoArray(&p, handler, throw_reference_error_if_nonexistent);
}
-void AccessorAssemblerImpl::GenerateLoadField() {
- typedef LoadFieldStub::Descriptor Descriptor;
+void AccessorAssembler::GenerateLoadField() {
+ typedef LoadFieldDescriptor Descriptor;
Node* receiver = Parameter(Descriptor::kReceiver);
Node* name = nullptr;
@@ -1761,11 +1894,13 @@ void AccessorAssemblerImpl::GenerateLoadField() {
Node* context = Parameter(Descriptor::kContext);
LoadICParameters p(context, receiver, name, slot, vector);
+ ExitPoint direct_exit(this);
+
HandleLoadICSmiHandlerCase(&p, receiver, Parameter(Descriptor::kSmiHandler),
- nullptr, kOnlyProperties);
+ nullptr, &direct_exit, kOnlyProperties);
}
-void AccessorAssemblerImpl::GenerateLoadGlobalIC(TypeofMode typeof_mode) {
+void AccessorAssembler::GenerateLoadGlobalIC(TypeofMode typeof_mode) {
typedef LoadGlobalWithVectorDescriptor Descriptor;
Node* name = Parameter(Descriptor::kName);
@@ -1777,8 +1912,7 @@ void AccessorAssemblerImpl::GenerateLoadGlobalIC(TypeofMode typeof_mode) {
LoadGlobalIC(&p, typeof_mode);
}
-void AccessorAssemblerImpl::GenerateLoadGlobalICTrampoline(
- TypeofMode typeof_mode) {
+void AccessorAssembler::GenerateLoadGlobalICTrampoline(TypeofMode typeof_mode) {
typedef LoadGlobalDescriptor Descriptor;
Node* name = Parameter(Descriptor::kName);
@@ -1790,7 +1924,7 @@ void AccessorAssemblerImpl::GenerateLoadGlobalICTrampoline(
LoadGlobalIC(&p, typeof_mode);
}
-void AccessorAssemblerImpl::GenerateKeyedLoadICTF() {
+void AccessorAssembler::GenerateKeyedLoadIC() {
typedef LoadWithVectorDescriptor Descriptor;
Node* receiver = Parameter(Descriptor::kReceiver);
@@ -1803,7 +1937,7 @@ void AccessorAssemblerImpl::GenerateKeyedLoadICTF() {
KeyedLoadIC(&p);
}
-void AccessorAssemblerImpl::GenerateKeyedLoadICTrampolineTF() {
+void AccessorAssembler::GenerateKeyedLoadICTrampoline() {
typedef LoadDescriptor Descriptor;
Node* receiver = Parameter(Descriptor::kReceiver);
@@ -1816,7 +1950,7 @@ void AccessorAssemblerImpl::GenerateKeyedLoadICTrampolineTF() {
KeyedLoadIC(&p);
}
-void AccessorAssemblerImpl::GenerateKeyedLoadICMegamorphic() {
+void AccessorAssembler::GenerateKeyedLoadIC_Megamorphic() {
typedef LoadWithVectorDescriptor Descriptor;
Node* receiver = Parameter(Descriptor::kReceiver);
@@ -1829,7 +1963,7 @@ void AccessorAssemblerImpl::GenerateKeyedLoadICMegamorphic() {
KeyedLoadICGeneric(&p);
}
-void AccessorAssemblerImpl::GenerateStoreIC() {
+void AccessorAssembler::GenerateStoreIC() {
typedef StoreWithVectorDescriptor Descriptor;
Node* receiver = Parameter(Descriptor::kReceiver);
@@ -1843,7 +1977,7 @@ void AccessorAssemblerImpl::GenerateStoreIC() {
StoreIC(&p);
}
-void AccessorAssemblerImpl::GenerateStoreICTrampoline() {
+void AccessorAssembler::GenerateStoreICTrampoline() {
typedef StoreDescriptor Descriptor;
Node* receiver = Parameter(Descriptor::kReceiver);
@@ -1857,7 +1991,7 @@ void AccessorAssemblerImpl::GenerateStoreICTrampoline() {
StoreIC(&p);
}
-void AccessorAssemblerImpl::GenerateKeyedStoreICTF(LanguageMode language_mode) {
+void AccessorAssembler::GenerateKeyedStoreIC(LanguageMode language_mode) {
typedef StoreWithVectorDescriptor Descriptor;
Node* receiver = Parameter(Descriptor::kReceiver);
@@ -1871,7 +2005,7 @@ void AccessorAssemblerImpl::GenerateKeyedStoreICTF(LanguageMode language_mode) {
KeyedStoreIC(&p, language_mode);
}
-void AccessorAssemblerImpl::GenerateKeyedStoreICTrampolineTF(
+void AccessorAssembler::GenerateKeyedStoreICTrampoline(
LanguageMode language_mode) {
typedef StoreDescriptor Descriptor;
@@ -1886,48 +2020,5 @@ void AccessorAssemblerImpl::GenerateKeyedStoreICTrampolineTF(
KeyedStoreIC(&p, language_mode);
}
-//////////////////// AccessorAssembler implementation.
-
-#define DISPATCH_TO_IMPL(Name) \
- void AccessorAssembler::Generate##Name(CodeAssemblerState* state) { \
- AccessorAssemblerImpl assembler(state); \
- assembler.Generate##Name(); \
- }
-
-ACCESSOR_ASSEMBLER_PUBLIC_INTERFACE(DISPATCH_TO_IMPL)
-#undef DISPATCH_TO_IMPL
-
-void AccessorAssembler::GenerateLoadICProtoArray(
- CodeAssemblerState* state, bool throw_reference_error_if_nonexistent) {
- AccessorAssemblerImpl assembler(state);
- assembler.GenerateLoadICProtoArray(throw_reference_error_if_nonexistent);
-}
-
-void AccessorAssembler::GenerateLoadGlobalIC(CodeAssemblerState* state,
- TypeofMode typeof_mode) {
- AccessorAssemblerImpl assembler(state);
- assembler.GenerateLoadGlobalIC(typeof_mode);
-}
-
-void AccessorAssembler::GenerateLoadGlobalICTrampoline(
- CodeAssemblerState* state, TypeofMode typeof_mode) {
- AccessorAssemblerImpl assembler(state);
- assembler.GenerateLoadGlobalICTrampoline(typeof_mode);
-}
-
-void AccessorAssembler::GenerateKeyedStoreICTF(CodeAssemblerState* state,
- LanguageMode language_mode) {
- AccessorAssemblerImpl assembler(state);
- assembler.GenerateKeyedStoreICTF(language_mode);
-}
-
-void AccessorAssembler::GenerateKeyedStoreICTrampolineTF(
- CodeAssemblerState* state, LanguageMode language_mode) {
- AccessorAssemblerImpl assembler(state);
- assembler.GenerateKeyedStoreICTrampolineTF(language_mode);
-}
-
-#undef ACCESSOR_ASSEMBLER_PUBLIC_INTERFACE
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ic/accessor-assembler.h b/deps/v8/src/ic/accessor-assembler.h
index 3b75c2e54d..9bc2873f85 100644
--- a/deps/v8/src/ic/accessor-assembler.h
+++ b/deps/v8/src/ic/accessor-assembler.h
@@ -5,7 +5,7 @@
#ifndef V8_SRC_IC_ACCESSOR_ASSEMBLER_H_
#define V8_SRC_IC_ACCESSOR_ASSEMBLER_H_
-#include "src/globals.h"
+#include "src/code-stub-assembler.h"
namespace v8 {
namespace internal {
@@ -14,29 +14,268 @@ namespace compiler {
class CodeAssemblerState;
}
-class AccessorAssembler {
+class ExitPoint;
+
+class AccessorAssembler : public CodeStubAssembler {
+ public:
+ typedef compiler::Node Node;
+
+ explicit AccessorAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ void GenerateLoadIC();
+ void GenerateLoadField();
+ void GenerateLoadICTrampoline();
+ void GenerateKeyedLoadIC();
+ void GenerateKeyedLoadICTrampoline();
+ void GenerateKeyedLoadIC_Megamorphic();
+ void GenerateStoreIC();
+ void GenerateStoreICTrampoline();
+
+ void GenerateLoadICProtoArray(bool throw_reference_error_if_nonexistent);
+
+ void GenerateLoadGlobalIC(TypeofMode typeof_mode);
+ void GenerateLoadGlobalICTrampoline(TypeofMode typeof_mode);
+
+ void GenerateKeyedStoreIC(LanguageMode language_mode);
+ void GenerateKeyedStoreICTrampoline(LanguageMode language_mode);
+
+ void TryProbeStubCache(StubCache* stub_cache, Node* receiver, Node* name,
+ Label* if_handler, Variable* var_handler,
+ Label* if_miss);
+
+ Node* StubCachePrimaryOffsetForTesting(Node* name, Node* map) {
+ return StubCachePrimaryOffset(name, map);
+ }
+ Node* StubCacheSecondaryOffsetForTesting(Node* name, Node* map) {
+ return StubCacheSecondaryOffset(name, map);
+ }
+
+ struct LoadICParameters {
+ LoadICParameters(Node* context, Node* receiver, Node* name, Node* slot,
+ Node* vector)
+ : context(context),
+ receiver(receiver),
+ name(name),
+ slot(slot),
+ vector(vector) {}
+
+ Node* context;
+ Node* receiver;
+ Node* name;
+ Node* slot;
+ Node* vector;
+ };
+
+ void LoadGlobalIC_TryPropertyCellCase(
+ Node* vector, Node* slot, ExitPoint* exit_point, Label* try_handler,
+ Label* miss, ParameterMode slot_mode = SMI_PARAMETERS);
+ void LoadGlobalIC_TryHandlerCase(const LoadICParameters* p,
+ TypeofMode typeof_mode,
+ ExitPoint* exit_point, Label* miss);
+ void LoadGlobalIC_MissCase(const LoadICParameters* p, ExitPoint* exit_point);
+
+ protected:
+ struct StoreICParameters : public LoadICParameters {
+ StoreICParameters(Node* context, Node* receiver, Node* name, Node* value,
+ Node* slot, Node* vector)
+ : LoadICParameters(context, receiver, name, slot, vector),
+ value(value) {}
+ Node* value;
+ };
+
+ enum ElementSupport { kOnlyProperties, kSupportElements };
+ void HandleStoreICHandlerCase(
+ const StoreICParameters* p, Node* handler, Label* miss,
+ ElementSupport support_elements = kOnlyProperties);
+
+ private:
+ // Stub generation entry points.
+
+ void LoadIC(const LoadICParameters* p);
+ void LoadICProtoArray(const LoadICParameters* p, Node* handler,
+ bool throw_reference_error_if_nonexistent);
+ void LoadGlobalIC(const LoadICParameters* p, TypeofMode typeof_mode);
+ void KeyedLoadIC(const LoadICParameters* p);
+ void KeyedLoadICGeneric(const LoadICParameters* p);
+ void StoreIC(const StoreICParameters* p);
+ void KeyedStoreIC(const StoreICParameters* p, LanguageMode language_mode);
+
+ // IC dispatcher behavior.
+
+ // Checks monomorphic case. Returns {feedback} entry of the vector.
+ Node* TryMonomorphicCase(Node* slot, Node* vector, Node* receiver_map,
+ Label* if_handler, Variable* var_handler,
+ Label* if_miss);
+ void HandlePolymorphicCase(Node* receiver_map, Node* feedback,
+ Label* if_handler, Variable* var_handler,
+ Label* if_miss, int unroll_count);
+ void HandleKeyedStorePolymorphicCase(Node* receiver_map, Node* feedback,
+ Label* if_handler, Variable* var_handler,
+ Label* if_transition_handler,
+ Variable* var_transition_map_cell,
+ Label* if_miss);
+
+ // LoadIC implementation.
+
+ void HandleLoadICHandlerCase(
+ const LoadICParameters* p, Node* handler, Label* miss,
+ ElementSupport support_elements = kOnlyProperties);
+
+ void HandleLoadICSmiHandlerCase(const LoadICParameters* p, Node* holder,
+ Node* smi_handler, Label* miss,
+ ExitPoint* exit_point,
+ ElementSupport support_elements);
+
+ void HandleLoadICProtoHandlerCase(const LoadICParameters* p, Node* handler,
+ Variable* var_holder,
+ Variable* var_smi_handler,
+ Label* if_smi_handler, Label* miss,
+ ExitPoint* exit_point,
+ bool throw_reference_error_if_nonexistent);
+
+ Node* EmitLoadICProtoArrayCheck(const LoadICParameters* p, Node* handler,
+ Node* handler_length, Node* handler_flags,
+ Label* miss,
+ bool throw_reference_error_if_nonexistent);
+
+ // LoadGlobalIC implementation.
+
+ void HandleLoadGlobalICHandlerCase(const LoadICParameters* p, Node* handler,
+ Label* miss, ExitPoint* exit_point,
+ bool throw_reference_error_if_nonexistent);
+
+ // StoreIC implementation.
+
+ void HandleStoreICElementHandlerCase(const StoreICParameters* p,
+ Node* handler, Label* miss);
+
+ void HandleStoreICProtoHandler(const StoreICParameters* p, Node* handler,
+ Label* miss);
+ // If |transition| is nullptr then the normal field store is generated or
+ // transitioning store otherwise.
+ void HandleStoreICSmiHandlerCase(Node* handler_word, Node* holder,
+ Node* value, Node* transition, Label* miss);
+ // If |transition| is nullptr then the normal field store is generated or
+ // transitioning store otherwise.
+ void HandleStoreFieldAndReturn(Node* handler_word, Node* holder,
+ Representation representation, Node* value,
+ Node* transition, Label* miss);
+
+ // KeyedLoadIC_Generic implementation.
+
+ void GenericElementLoad(Node* receiver, Node* receiver_map,
+ Node* instance_type, Node* index, Label* slow);
+
+ void GenericPropertyLoad(Node* receiver, Node* receiver_map,
+ Node* instance_type, Node* key,
+ const LoadICParameters* p, Label* slow);
+
+ // Low-level helpers.
+
+ Node* PrepareValueForStore(Node* handler_word, Node* holder,
+ Representation representation, Node* transition,
+ Node* value, Label* bailout);
+
+ // Extends properties backing store by JSObject::kFieldsAdded elements.
+ void ExtendPropertiesBackingStore(Node* object);
+
+ void StoreNamedField(Node* handler_word, Node* object, bool is_inobject,
+ Representation representation, Node* value,
+ bool transition_to_field, Label* bailout);
+
+ void EmitFastElementsBoundsCheck(Node* object, Node* elements,
+ Node* intptr_index,
+ Node* is_jsarray_condition, Label* miss);
+ void EmitElementLoad(Node* object, Node* elements, Node* elements_kind,
+ Node* key, Node* is_jsarray_condition, Label* if_hole,
+ Label* rebox_double, Variable* var_double_value,
+ Label* unimplemented_elements_kind, Label* out_of_bounds,
+ Label* miss, ExitPoint* exit_point);
+ void CheckPrototype(Node* prototype_cell, Node* name, Label* miss);
+ void NameDictionaryNegativeLookup(Node* object, Node* name, Label* miss);
+
+ // Stub cache access helpers.
+
+ // This enum is used here as a replacement for StubCache::Table to avoid
+ // including stub cache header.
+ enum StubCacheTable : int;
+
+ Node* StubCachePrimaryOffset(Node* name, Node* map);
+ Node* StubCacheSecondaryOffset(Node* name, Node* seed);
+
+ void TryProbeStubCacheTable(StubCache* stub_cache, StubCacheTable table_id,
+ Node* entry_offset, Node* name, Node* map,
+ Label* if_handler, Variable* var_handler,
+ Label* if_miss);
+};
+
+// Abstraction over direct and indirect exit points. Direct exits correspond to
+// tailcalls and Return, while indirect exits store the result in a variable
+// and then jump to an exit label.
+class ExitPoint {
+ private:
+ typedef compiler::Node Node;
+ typedef compiler::CodeAssemblerLabel CodeAssemblerLabel;
+ typedef compiler::CodeAssemblerVariable CodeAssemblerVariable;
+
public:
- static void GenerateLoadIC(compiler::CodeAssemblerState* state);
- static void GenerateLoadICTrampoline(compiler::CodeAssemblerState* state);
- static void GenerateLoadICProtoArray(
- compiler::CodeAssemblerState* state,
- bool throw_reference_error_if_nonexistent);
- static void GenerateLoadGlobalIC(compiler::CodeAssemblerState* state,
- TypeofMode typeof_mode);
- static void GenerateLoadGlobalICTrampoline(
- compiler::CodeAssemblerState* state, TypeofMode typeof_mode);
- static void GenerateKeyedLoadICTF(compiler::CodeAssemblerState* state);
- static void GenerateKeyedLoadICTrampolineTF(
- compiler::CodeAssemblerState* state);
- static void GenerateKeyedLoadICMegamorphic(
- compiler::CodeAssemblerState* state);
- static void GenerateLoadField(compiler::CodeAssemblerState* state);
- static void GenerateStoreIC(compiler::CodeAssemblerState* state);
- static void GenerateStoreICTrampoline(compiler::CodeAssemblerState* state);
- static void GenerateKeyedStoreICTF(compiler::CodeAssemblerState* state,
- LanguageMode language_mode);
- static void GenerateKeyedStoreICTrampolineTF(
- compiler::CodeAssemblerState* state, LanguageMode language_mode);
+ explicit ExitPoint(CodeStubAssembler* assembler)
+ : ExitPoint(assembler, nullptr, nullptr) {}
+ ExitPoint(CodeStubAssembler* assembler, CodeAssemblerLabel* out,
+ CodeAssemblerVariable* var_result)
+ : out_(out), var_result_(var_result), asm_(assembler) {
+ DCHECK_EQ(out != nullptr, var_result != nullptr);
+ }
+
+ template <class... TArgs>
+ void ReturnCallRuntime(Runtime::FunctionId function, Node* context,
+ TArgs... args) {
+ if (IsDirect()) {
+ asm_->TailCallRuntime(function, context, args...);
+ } else {
+ IndirectReturn(asm_->CallRuntime(function, context, args...));
+ }
+ }
+
+ template <class... TArgs>
+ void ReturnCallStub(Callable const& callable, Node* context, TArgs... args) {
+ if (IsDirect()) {
+ asm_->TailCallStub(callable, context, args...);
+ } else {
+ IndirectReturn(asm_->CallStub(callable, context, args...));
+ }
+ }
+
+ template <class... TArgs>
+ void ReturnCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+ Node* context, TArgs... args) {
+ if (IsDirect()) {
+ asm_->TailCallStub(descriptor, target, context, args...);
+ } else {
+ IndirectReturn(asm_->CallStub(descriptor, target, context, args...));
+ }
+ }
+
+ void Return(Node* const result) {
+ if (IsDirect()) {
+ asm_->Return(result);
+ } else {
+ IndirectReturn(result);
+ }
+ }
+
+ bool IsDirect() const { return out_ == nullptr; }
+
+ private:
+ void IndirectReturn(Node* const result) {
+ var_result_->Bind(result);
+ asm_->Goto(out_);
+ }
+
+ CodeAssemblerLabel* const out_;
+ CodeAssemblerVariable* const var_result_;
+ CodeStubAssembler* const asm_;
};
} // namespace internal
diff --git a/deps/v8/src/ic/arm/handler-compiler-arm.cc b/deps/v8/src/ic/arm/handler-compiler-arm.cc
index 3f2d0e42de..ebef63ca66 100644
--- a/deps/v8/src/ic/arm/handler-compiler-arm.cc
+++ b/deps/v8/src/ic/arm/handler-compiler-arm.cc
@@ -181,15 +181,6 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
__ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
}
-void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
- MacroAssembler* masm, Register receiver, Register scratch1,
- Register scratch2, Label* miss_label) {
- __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
- __ mov(r0, scratch1);
- __ Ret();
-}
-
-
// Generate code to check that a global property cell is empty. Create
// the property cell at compilation time if no cell exists for the
// property.
@@ -208,10 +199,12 @@ void PropertyHandlerCompiler::GenerateCheckPropertyCell(
__ b(ne, miss);
}
+static void CompileCallLoadPropertyWithInterceptor(
+ MacroAssembler* masm, Register receiver, Register holder, Register name,
+ Handle<JSObject> holder_obj, Runtime::FunctionId id) {
+ DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
+ Runtime::FunctionForId(id)->nargs);
-static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
- Register holder, Register name,
- Handle<JSObject> holder_obj) {
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
@@ -219,15 +212,7 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
__ push(name);
__ push(receiver);
__ push(holder);
-}
-
-static void CompileCallLoadPropertyWithInterceptor(
- MacroAssembler* masm, Register receiver, Register holder, Register name,
- Handle<JSObject> holder_obj, Runtime::FunctionId id) {
- DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
- Runtime::FunctionForId(id)->nargs);
- PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
__ CallRuntime(id);
}
@@ -530,8 +515,18 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
// Call the runtime system to load the interceptor.
DCHECK(holder()->HasNamedInterceptor());
DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
- PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
- holder());
+
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+ __ Push(name(), receiver(), holder_reg);
+ // See NamedLoadHandlerCompiler::InterceptorVectorSlotPop() for details.
+ if (holder_reg.is(receiver())) {
+ __ Push(slot(), vector());
+ } else {
+ __ Push(scratch3(), scratch2()); // slot, vector
+ }
__ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
}
diff --git a/deps/v8/src/ic/arm/ic-arm.cc b/deps/v8/src/ic/arm/ic-arm.cc
index fad0737a1c..b749027ebe 100644
--- a/deps/v8/src/ic/arm/ic-arm.cc
+++ b/deps/v8/src/ic/arm/ic-arm.cc
@@ -6,45 +6,12 @@
#include "src/codegen.h"
#include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
#include "src/ic/stub-cache.h"
namespace v8 {
namespace internal {
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
- __ Push(StoreWithVectorDescriptor::ValueRegister(),
- StoreWithVectorDescriptor::SlotRegister(),
- StoreWithVectorDescriptor::VectorRegister(),
- StoreWithVectorDescriptor::ReceiverRegister(),
- StoreWithVectorDescriptor::NameRegister());
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
- StoreIC_PushArgs(masm);
-
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
-}
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
- StoreIC_PushArgs(masm);
-
- // The slow case calls into the runtime to complete the store without causing
- // an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
-#undef __
-
-
Condition CompareIC::ComputeCondition(Token::Value op) {
switch (op) {
case Token::EQ_STRICT:
@@ -100,9 +67,7 @@ void PatchInlinedSmiCode(Isolate* isolate, Address address,
}
if (FLAG_trace_ic) {
- PrintF("[ patching ic at %p, cmp=%p, delta=%d\n",
- static_cast<void*>(address),
- static_cast<void*>(cmp_instruction_address), delta);
+ LOG(isolate, PatchIC(address, cmp_instruction_address, delta));
}
Address patch_address =
diff --git a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
index 8c89908f4e..b7dc58974f 100644
--- a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
+++ b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
@@ -83,19 +83,6 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
__ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
}
-void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
- MacroAssembler* masm, Register receiver, Register scratch1,
- Register scratch2, Label* miss_label) {
- __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
- // TryGetFunctionPrototype can't put the result directly in x0 because the
- // 3 inputs registers can't alias and we call this function from
- // LoadIC::GenerateFunctionPrototype, where receiver is x0. So we explicitly
- // move the result in x0.
- __ Mov(x0, scratch1);
- __ Ret();
-}
-
-
// Generate code to check that a global property cell is empty. Create
// the property cell at compilation time if no cell exists for the
// property.
@@ -112,25 +99,18 @@ void PropertyHandlerCompiler::GenerateCheckPropertyCell(
__ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, miss);
}
+static void CompileCallLoadPropertyWithInterceptor(
+ MacroAssembler* masm, Register receiver, Register holder, Register name,
+ Handle<JSObject> holder_obj, Runtime::FunctionId id) {
+ DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
+ Runtime::FunctionForId(id)->nargs);
-static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
- Register holder, Register name,
- Handle<JSObject> holder_obj) {
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
-
__ Push(name, receiver, holder);
-}
-
-static void CompileCallLoadPropertyWithInterceptor(
- MacroAssembler* masm, Register receiver, Register holder, Register name,
- Handle<JSObject> holder_obj, Runtime::FunctionId id) {
- DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
- Runtime::FunctionForId(id)->nargs);
- PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
__ CallRuntime(id);
}
@@ -566,8 +546,18 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
// Call the runtime system to load the interceptor.
DCHECK(holder()->HasNamedInterceptor());
DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
- PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
- holder());
+
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+ __ Push(name(), receiver(), holder_reg);
+ // See NamedLoadHandlerCompiler::InterceptorVectorSlotPop() for details.
+ if (holder_reg.is(receiver())) {
+ __ Push(slot(), vector());
+ } else {
+ __ Push(scratch3(), scratch2()); // slot, vector
+ }
__ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
}
diff --git a/deps/v8/src/ic/arm64/ic-arm64.cc b/deps/v8/src/ic/arm64/ic-arm64.cc
index 04fdff76e1..8c7d4f2241 100644
--- a/deps/v8/src/ic/arm64/ic-arm64.cc
+++ b/deps/v8/src/ic/arm64/ic-arm64.cc
@@ -6,39 +6,12 @@
#include "src/codegen.h"
#include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
#include "src/ic/stub-cache.h"
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm)
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
- __ Push(StoreWithVectorDescriptor::ValueRegister(),
- StoreWithVectorDescriptor::SlotRegister(),
- StoreWithVectorDescriptor::VectorRegister(),
- StoreWithVectorDescriptor::ReceiverRegister(),
- StoreWithVectorDescriptor::NameRegister());
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
- ASM_LOCATION("KeyedStoreIC::GenerateMiss");
- StoreIC_PushArgs(masm);
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
-}
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
- ASM_LOCATION("KeyedStoreIC::GenerateSlow");
- StoreIC_PushArgs(masm);
-
- // The slow case calls into the runtime to complete the store without causing
- // an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
Condition CompareIC::ComputeCondition(Token::Value op) {
switch (op) {
case Token::EQ_STRICT:
@@ -86,9 +59,7 @@ void PatchInlinedSmiCode(Isolate* isolate, Address address,
}
if (FLAG_trace_ic) {
- PrintF("[ Patching ic at %p, marker=%p, SMI check=%p\n",
- static_cast<void*>(address), static_cast<void*>(info_address),
- static_cast<void*>(info.SmiCheck()));
+ LOG(isolate, PatchIC(address, info_address, info.SmiCheckDelta()));
}
// Patch and activate code generated by JumpPatchSite::EmitJumpIfNotSmi()
diff --git a/deps/v8/src/ic/call-optimization.cc b/deps/v8/src/ic/call-optimization.cc
index f7a1f6982f..6780ac4ca4 100644
--- a/deps/v8/src/ic/call-optimization.cc
+++ b/deps/v8/src/ic/call-optimization.cc
@@ -3,7 +3,7 @@
// found in the LICENSE file.
#include "src/ic/call-optimization.h"
-
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/ic/handler-compiler.cc b/deps/v8/src/ic/handler-compiler.cc
index 16aec0b494..6a9734d5eb 100644
--- a/deps/v8/src/ic/handler-compiler.cc
+++ b/deps/v8/src/ic/handler-compiler.cc
@@ -283,8 +283,7 @@ void NamedLoadHandlerCompiler::GenerateLoadPostInterceptor(
Handle<Object> smi_handler =
LoadIC::SimpleFieldLoad(isolate(), it->GetFieldIndex());
__ Move(LoadFieldDescriptor::SmiHandlerRegister(), smi_handler);
- LoadFieldStub stub(isolate());
- GenerateTailCall(masm(), stub.GetCode());
+ GenerateTailCall(masm(), isolate()->builtins()->LoadField());
break;
}
case LookupIterator::ACCESSOR:
@@ -353,7 +352,7 @@ Handle<Object> ElementHandlerCompiler::GetKeyedLoadHandler(
}
if (receiver_map->IsStringMap()) {
TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadIndexedStringStub);
- return LoadIndexedStringStub(isolate).GetCode();
+ return isolate->builtins()->KeyedLoadIC_IndexedString();
}
InstanceType instance_type = receiver_map->instance_type();
if (instance_type < FIRST_JS_RECEIVER_TYPE) {
diff --git a/deps/v8/src/ic/handler-compiler.h b/deps/v8/src/ic/handler-compiler.h
index 65f6fbbef3..a37375abfb 100644
--- a/deps/v8/src/ic/handler-compiler.h
+++ b/deps/v8/src/ic/handler-compiler.h
@@ -158,12 +158,6 @@ class NamedLoadHandlerCompiler : public PropertyHandlerCompiler {
no_reg);
}
- static void GenerateLoadFunctionPrototype(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss_label);
-
// These constants describe the structure of the interceptor arguments on the
// stack. The arguments are pushed by the (platform-specific)
// PushInterceptorArguments and read by LoadPropertyWithInterceptorOnly and
diff --git a/deps/v8/src/ic/handler-configuration-inl.h b/deps/v8/src/ic/handler-configuration-inl.h
index 8aa887d2b6..437c5288fb 100644
--- a/deps/v8/src/ic/handler-configuration-inl.h
+++ b/deps/v8/src/ic/handler-configuration-inl.h
@@ -103,7 +103,8 @@ Handle<Object> StoreHandler::StoreField(Isolate* isolate, Kind kind,
}
int value_index = DescriptorArray::ToValueIndex(descriptor);
- DCHECK(kind == kStoreField || kind == kTransitionToField);
+ DCHECK(kind == kStoreField || kind == kTransitionToField ||
+ (kind == kStoreConstField && FLAG_track_constant_fields));
DCHECK_IMPLIES(extend_storage, kind == kTransitionToField);
DCHECK_IMPLIES(field_index.is_inobject(), !extend_storage);
@@ -118,9 +119,12 @@ Handle<Object> StoreHandler::StoreField(Isolate* isolate, Kind kind,
Handle<Object> StoreHandler::StoreField(Isolate* isolate, int descriptor,
FieldIndex field_index,
+ PropertyConstness constness,
Representation representation) {
- return StoreField(isolate, kStoreField, descriptor, field_index,
- representation, false);
+ DCHECK_IMPLIES(!FLAG_track_constant_fields, constness == kMutable);
+ Kind kind = constness == kMutable ? kStoreField : kStoreConstField;
+ return StoreField(isolate, kind, descriptor, field_index, representation,
+ false);
}
Handle<Object> StoreHandler::TransitionToField(Isolate* isolate, int descriptor,
@@ -133,6 +137,7 @@ Handle<Object> StoreHandler::TransitionToField(Isolate* isolate, int descriptor,
Handle<Object> StoreHandler::TransitionToConstant(Isolate* isolate,
int descriptor) {
+ DCHECK(!FLAG_track_constant_fields);
int value_index = DescriptorArray::ToValueIndex(descriptor);
int config =
StoreHandler::KindBits::encode(StoreHandler::kTransitionToConstant) |
diff --git a/deps/v8/src/ic/handler-configuration.h b/deps/v8/src/ic/handler-configuration.h
index a5291736dc..539d448008 100644
--- a/deps/v8/src/ic/handler-configuration.h
+++ b/deps/v8/src/ic/handler-configuration.h
@@ -121,8 +121,10 @@ class StoreHandler {
enum Kind {
kStoreElement,
kStoreField,
+ kStoreConstField,
kTransitionToField,
- kTransitionToConstant
+ // TODO(ishell): remove once constant field tracking is done.
+ kTransitionToConstant = kStoreConstField
};
class KindBits : public BitField<Kind, 0, 2> {};
@@ -175,6 +177,7 @@ class StoreHandler {
// Creates a Smi-handler for storing a field to fast object.
static inline Handle<Object> StoreField(Isolate* isolate, int descriptor,
FieldIndex field_index,
+ PropertyConstness constness,
Representation representation);
// Creates a Smi-handler for transitioning store to a field.
diff --git a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
index b63e82b70a..f0f8faddde 100644
--- a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
+++ b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
@@ -122,15 +122,6 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
__ DecrementCounter(counters->negative_lookups_miss(), 1);
}
-void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
- MacroAssembler* masm, Register receiver, Register scratch1,
- Register scratch2, Label* miss_label) {
- // TODO(mvstanton): This isn't used on ia32. Move all the other
- // platform implementations into a code stub so this method can be removed.
- UNREACHABLE();
-}
-
-
// Generate call to api function.
// This function uses push() to generate smaller, faster code than
// the version above. It is an optimization that should will be removed
@@ -302,10 +293,12 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
}
}
+static void CompileCallLoadPropertyWithInterceptor(
+ MacroAssembler* masm, Register receiver, Register holder, Register name,
+ Handle<JSObject> holder_obj, Runtime::FunctionId id) {
+ DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
+ Runtime::FunctionForId(id)->nargs);
-static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
- Register holder, Register name,
- Handle<JSObject> holder_obj) {
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
@@ -313,15 +306,7 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
__ push(name);
__ push(receiver);
__ push(holder);
-}
-
-static void CompileCallLoadPropertyWithInterceptor(
- MacroAssembler* masm, Register receiver, Register holder, Register name,
- Handle<JSObject> holder_obj, Runtime::FunctionId id) {
- DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
- Runtime::FunctionForId(id)->nargs);
- PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
__ CallRuntime(id);
}
@@ -538,10 +523,26 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
DCHECK(holder()->HasNamedInterceptor());
DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
// Call the runtime system to load the interceptor.
- __ pop(scratch2()); // save old return address
- PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
- holder());
- __ push(scratch2()); // restore old return address
+
+ // Stack:
+ // return address
+
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+ __ push(receiver());
+ __ push(holder_reg);
+ // See NamedLoadHandlerCompiler::InterceptorVectorSlotPop() for details.
+ if (holder_reg.is(receiver())) {
+ __ push(slot());
+ __ push(vector());
+ } else {
+ __ push(scratch3()); // slot
+ __ push(scratch2()); // vector
+ }
+ __ push(Operand(esp, 4 * kPointerSize)); // return address
+ __ mov(Operand(esp, 5 * kPointerSize), name());
__ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
}
diff --git a/deps/v8/src/ic/ia32/ic-ia32.cc b/deps/v8/src/ic/ia32/ic-ia32.cc
index 4bf0eaee92..c4b4cdcc2b 100644
--- a/deps/v8/src/ic/ia32/ic-ia32.cc
+++ b/deps/v8/src/ic/ia32/ic-ia32.cc
@@ -6,54 +6,11 @@
#include "src/codegen.h"
#include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
#include "src/ic/stub-cache.h"
namespace v8 {
namespace internal {
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
- Register receiver = StoreWithVectorDescriptor::ReceiverRegister();
- Register name = StoreWithVectorDescriptor::NameRegister();
-
- STATIC_ASSERT(StoreWithVectorDescriptor::kStackArgumentsCount == 3);
- // Current stack layout:
- // - esp[12] -- value
- // - esp[8] -- slot
- // - esp[4] -- vector
- // - esp[0] -- return address
-
- Register return_address = StoreWithVectorDescriptor::SlotRegister();
- __ pop(return_address);
- __ push(receiver);
- __ push(name);
- __ push(return_address);
-}
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
- // Return address is on the stack.
- StoreIC_PushArgs(masm);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
-}
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
- // Return address is on the stack.
- StoreIC_PushArgs(masm);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
-#undef __
-
Condition CompareIC::ComputeCondition(Token::Value op) {
switch (op) {
@@ -104,9 +61,7 @@ void PatchInlinedSmiCode(Isolate* isolate, Address address,
// condition code uses at the patched jump.
uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address);
if (FLAG_trace_ic) {
- PrintF("[ patching ic at %p, test=%p, delta=%d\n",
- static_cast<void*>(address),
- static_cast<void*>(test_instruction_address), delta);
+ LOG(isolate, PatchIC(address, test_instruction_address, delta));
}
// Patch with a short conditional jump. Enabling means switching from a short
diff --git a/deps/v8/src/ic/ic-compiler.cc b/deps/v8/src/ic/ic-compiler.cc
deleted file mode 100644
index fcda0c1fa3..0000000000
--- a/deps/v8/src/ic/ic-compiler.cc
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/ic/ic-compiler.h"
-
-#include "src/ic/handler-compiler.h"
-#include "src/ic/ic-inl.h"
-
-namespace v8 {
-namespace internal {
-
-Handle<Object> PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(
- Handle<Map> receiver_map, KeyedAccessStoreMode store_mode) {
- Isolate* isolate = receiver_map->GetIsolate();
-
- DCHECK(store_mode == STANDARD_STORE ||
- store_mode == STORE_AND_GROW_NO_TRANSITION ||
- store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
- store_mode == STORE_NO_TRANSITION_HANDLE_COW);
-
- PropertyICCompiler compiler(isolate);
- Handle<Object> handler =
- compiler.CompileKeyedStoreMonomorphicHandler(receiver_map, store_mode);
- return handler;
-}
-
-void PropertyICCompiler::ComputeKeyedStorePolymorphicHandlers(
- MapHandleList* receiver_maps, MapHandleList* transitioned_maps,
- List<Handle<Object>>* handlers, KeyedAccessStoreMode store_mode) {
- Isolate* isolate = receiver_maps->at(0)->GetIsolate();
- DCHECK(store_mode == STANDARD_STORE ||
- store_mode == STORE_AND_GROW_NO_TRANSITION ||
- store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
- store_mode == STORE_NO_TRANSITION_HANDLE_COW);
- PropertyICCompiler compiler(isolate);
- compiler.CompileKeyedStorePolymorphicHandlers(
- receiver_maps, transitioned_maps, handlers, store_mode);
-}
-
-void PropertyICCompiler::CompileKeyedStorePolymorphicHandlers(
- MapHandleList* receiver_maps, MapHandleList* transitioned_maps,
- List<Handle<Object>>* handlers, KeyedAccessStoreMode store_mode) {
- for (int i = 0; i < receiver_maps->length(); ++i) {
- Handle<Map> receiver_map(receiver_maps->at(i));
- Handle<Object> handler;
- Handle<Map> transitioned_map;
- {
- Map* tmap = receiver_map->FindElementsKindTransitionedMap(receiver_maps);
- if (tmap != nullptr) transitioned_map = handle(tmap);
- }
-
- // TODO(mvstanton): The code below is doing pessimistic elements
- // transitions. I would like to stop doing that and rely on Allocation Site
- // Tracking to do a better job of ensuring the data types are what they need
- // to be. Not all the elements are in place yet, pessimistic elements
- // transitions are still important for performance.
- if (!transitioned_map.is_null()) {
- bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
- ElementsKind elements_kind = receiver_map->elements_kind();
- TRACE_HANDLER_STATS(isolate(),
- KeyedStoreIC_ElementsTransitionAndStoreStub);
- Handle<Code> stub =
- ElementsTransitionAndStoreStub(isolate(), elements_kind,
- transitioned_map->elements_kind(),
- is_js_array, store_mode)
- .GetCode();
- Handle<Object> validity_cell =
- Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
- if (validity_cell.is_null()) {
- handler = stub;
- } else {
- handler = isolate()->factory()->NewTuple2(validity_cell, stub);
- }
-
- } else if (receiver_map->instance_type() < FIRST_JS_RECEIVER_TYPE) {
- // TODO(mvstanton): Consider embedding store_mode in the state of the slow
- // keyed store ic for uniformity.
- TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_SlowStub);
- handler = isolate()->builtins()->KeyedStoreIC_Slow();
- } else {
- handler = CompileKeyedStoreMonomorphicHandler(receiver_map, store_mode);
- }
- DCHECK(!handler.is_null());
- handlers->Add(handler);
- transitioned_maps->Add(transitioned_map);
- }
-}
-
-
-#define __ ACCESS_MASM(masm())
-
-Handle<Object> PropertyICCompiler::CompileKeyedStoreMonomorphicHandler(
- Handle<Map> receiver_map, KeyedAccessStoreMode store_mode) {
- ElementsKind elements_kind = receiver_map->elements_kind();
- bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
- Handle<Code> stub;
- if (receiver_map->has_sloppy_arguments_elements()) {
- TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_KeyedStoreSloppyArgumentsStub);
- stub = KeyedStoreSloppyArgumentsStub(isolate(), store_mode).GetCode();
- } else if (receiver_map->has_fast_elements() ||
- receiver_map->has_fixed_typed_array_elements()) {
- TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_StoreFastElementStub);
- stub = StoreFastElementStub(isolate(), is_jsarray, elements_kind,
- store_mode).GetCode();
- } else {
- TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_StoreElementStub);
- stub = StoreElementStub(isolate(), elements_kind, store_mode).GetCode();
- }
- Handle<Object> validity_cell =
- Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
- if (validity_cell.is_null()) {
- return stub;
- }
- return isolate()->factory()->NewTuple2(validity_cell, stub);
-}
-
-
-#undef __
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/ic/ic-compiler.h b/deps/v8/src/ic/ic-compiler.h
deleted file mode 100644
index b8d6635ae0..0000000000
--- a/deps/v8/src/ic/ic-compiler.h
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_IC_IC_COMPILER_H_
-#define V8_IC_IC_COMPILER_H_
-
-#include "src/ic/access-compiler.h"
-
-namespace v8 {
-namespace internal {
-
-
-class PropertyICCompiler : public PropertyAccessCompiler {
- public:
- // Keyed
- static Handle<Object> ComputeKeyedStoreMonomorphicHandler(
- Handle<Map> receiver_map, KeyedAccessStoreMode store_mode);
- static void ComputeKeyedStorePolymorphicHandlers(
- MapHandleList* receiver_maps, MapHandleList* transitioned_maps,
- List<Handle<Object>>* handlers, KeyedAccessStoreMode store_mode);
-
- private:
- explicit PropertyICCompiler(Isolate* isolate)
- : PropertyAccessCompiler(isolate, Code::KEYED_STORE_IC,
- kCacheOnReceiver) {}
-
- Handle<Object> CompileKeyedStoreMonomorphicHandler(
- Handle<Map> receiver_map, KeyedAccessStoreMode store_mode);
- void CompileKeyedStorePolymorphicHandlers(MapHandleList* receiver_maps,
- MapHandleList* transitioned_maps,
- List<Handle<Object>>* handlers,
- KeyedAccessStoreMode store_mode);
-};
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_IC_IC_COMPILER_H_
diff --git a/deps/v8/src/ic/ic-inl.h b/deps/v8/src/ic/ic-inl.h
index b286315c01..aacb69091e 100644
--- a/deps/v8/src/ic/ic-inl.h
+++ b/deps/v8/src/ic/ic-inl.h
@@ -7,6 +7,7 @@
#include "src/ic/ic.h"
+#include "src/assembler-inl.h"
#include "src/debug/debug.h"
#include "src/macro-assembler.h"
#include "src/prototype.h"
@@ -45,7 +46,10 @@ Code* IC::GetTargetAtAddress(Address address, Address constant_pool) {
// Convert target address to the code object. Code::GetCodeFromTargetAddress
// is safe for use during GC where the map might be marked.
Code* result = Code::GetCodeFromTargetAddress(target);
- DCHECK(result->is_inline_cache_stub());
+ // The result can be an IC dispatcher (for vector-based ICs), an IC handler
+ // (for old-style patching ICs) or CEntryStub (for IC dispatchers inlined to
+ // bytecode handlers).
+ DCHECK(result->is_inline_cache_stub() || result->is_stub());
return result;
}
@@ -54,25 +58,13 @@ void IC::SetTargetAtAddress(Address address, Code* target,
Address constant_pool) {
if (AddressIsDeoptimizedCode(target->GetIsolate(), address)) return;
- DCHECK(target->is_inline_cache_stub() || target->is_compare_ic_stub());
-
- DCHECK(!target->is_inline_cache_stub() ||
- (target->kind() != Code::LOAD_IC &&
- target->kind() != Code::KEYED_LOAD_IC &&
- target->kind() != Code::CALL_IC && target->kind() != Code::STORE_IC &&
- target->kind() != Code::KEYED_STORE_IC));
+ // Only these three old-style ICs still do code patching.
+ DCHECK(target->is_binary_op_stub() || target->is_compare_ic_stub() ||
+ target->is_to_boolean_ic_stub());
Heap* heap = target->GetHeap();
Code* old_target = GetTargetAtAddress(address, constant_pool);
-#ifdef DEBUG
- // STORE_IC and KEYED_STORE_IC use Code::extra_ic_state() to mark
- // ICs as language mode. The language mode of the IC must be preserved.
- if (old_target->kind() == Code::STORE_IC ||
- old_target->kind() == Code::KEYED_STORE_IC) {
- DCHECK(StoreICState::GetLanguageMode(old_target->extra_ic_state()) ==
- StoreICState::GetLanguageMode(target->extra_ic_state()));
- }
-#endif
+
Assembler::set_target_address_at(heap->isolate(), address, constant_pool,
target->instruction_start());
if (heap->gc_state() == Heap::MARK_COMPACT) {
@@ -132,14 +124,6 @@ Handle<Map> IC::GetICCacheHolder(Handle<Map> map, Isolate* isolate,
}
-Code* IC::get_host() {
- return isolate()
- ->inner_pointer_to_code_cache()
- ->GetCacheEntry(address())
- ->code;
-}
-
-
bool IC::AddressIsDeoptimizedCode() const {
return AddressIsDeoptimizedCode(isolate(), address());
}
diff --git a/deps/v8/src/ic/ic-state.cc b/deps/v8/src/ic/ic-state.cc
index 7439ecd2c0..a217b115fd 100644
--- a/deps/v8/src/ic/ic-state.cc
+++ b/deps/v8/src/ic/ic-state.cc
@@ -4,7 +4,10 @@
#include "src/ic/ic-state.h"
+#include "src/ast/ast-types.h"
+#include "src/feedback-vector.h"
#include "src/ic/ic.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -16,11 +19,6 @@ void ICUtility::Clear(Isolate* isolate, Address address,
}
-std::ostream& operator<<(std::ostream& os, const CallICState& s) {
- return os << "(" << s.convert_mode() << ", " << s.tail_call_mode() << ")";
-}
-
-
// static
STATIC_CONST_MEMBER_DEFINITION const int BinaryOpICState::FIRST_TOKEN;
diff --git a/deps/v8/src/ic/ic-state.h b/deps/v8/src/ic/ic-state.h
index 836979c4f0..16651c5623 100644
--- a/deps/v8/src/ic/ic-state.h
+++ b/deps/v8/src/ic/ic-state.h
@@ -11,6 +11,7 @@
namespace v8 {
namespace internal {
+class AstType;
const int kMaxKeyedPolymorphism = 4;
@@ -22,38 +23,6 @@ class ICUtility : public AllStatic {
};
-class CallICState final BASE_EMBEDDED {
- public:
- explicit CallICState(ExtraICState extra_ic_state)
- : bit_field_(extra_ic_state) {}
- CallICState(ConvertReceiverMode convert_mode, TailCallMode tail_call_mode)
- : bit_field_(ConvertModeBits::encode(convert_mode) |
- TailCallModeBits::encode(tail_call_mode)) {}
-
- ExtraICState GetExtraICState() const { return bit_field_; }
-
- static void GenerateAheadOfTime(Isolate*,
- void (*Generate)(Isolate*,
- const CallICState&));
-
- ConvertReceiverMode convert_mode() const {
- return ConvertModeBits::decode(bit_field_);
- }
- TailCallMode tail_call_mode() const {
- return TailCallModeBits::decode(bit_field_);
- }
-
- private:
- typedef BitField<ConvertReceiverMode, 0, 2> ConvertModeBits;
- typedef BitField<TailCallMode, ConvertModeBits::kNext, 1> TailCallModeBits;
-
- int const bit_field_;
-};
-
-
-std::ostream& operator<<(std::ostream& os, const CallICState& s);
-
-
class BinaryOpICState final BASE_EMBEDDED {
public:
BinaryOpICState(Isolate* isolate, ExtraICState extra_ic_state);
@@ -213,67 +182,6 @@ class CompareICState {
Handle<Object> y);
};
-class LoadGlobalICState final BASE_EMBEDDED {
- private:
- class TypeofModeBits : public BitField<TypeofMode, 0, 1> {};
- STATIC_ASSERT(static_cast<int>(INSIDE_TYPEOF) == 0);
- const ExtraICState state_;
-
- public:
- static const uint32_t kNextBitFieldOffset = TypeofModeBits::kNext;
-
- explicit LoadGlobalICState(ExtraICState extra_ic_state)
- : state_(extra_ic_state) {}
-
- explicit LoadGlobalICState(TypeofMode typeof_mode)
- : state_(TypeofModeBits::encode(typeof_mode)) {}
-
- ExtraICState GetExtraICState() const { return state_; }
-
- TypeofMode typeof_mode() const { return TypeofModeBits::decode(state_); }
-
- static TypeofMode GetTypeofMode(ExtraICState state) {
- return LoadGlobalICState(state).typeof_mode();
- }
-
- // For convenience, a statically declared encoding of typeof mode
- // IC state.
- static const ExtraICState kInsideTypeOfState = INSIDE_TYPEOF
- << TypeofModeBits::kShift;
- static const ExtraICState kNotInsideTypeOfState = NOT_INSIDE_TYPEOF
- << TypeofModeBits::kShift;
-};
-
-
-class StoreICState final BASE_EMBEDDED {
- public:
- explicit StoreICState(ExtraICState extra_ic_state) : state_(extra_ic_state) {}
-
- explicit StoreICState(LanguageMode mode)
- : state_(LanguageModeState::encode(mode)) {}
-
- ExtraICState GetExtraICState() const { return state_; }
-
- LanguageMode language_mode() const {
- return LanguageModeState::decode(state_);
- }
-
- static LanguageMode GetLanguageMode(ExtraICState state) {
- return StoreICState(state).language_mode();
- }
-
- class LanguageModeState : public BitField<LanguageMode, 1, 1> {};
- STATIC_ASSERT(i::LANGUAGE_END == 2);
-
- // For convenience, a statically declared encoding of strict mode extra
- // IC state.
- static const ExtraICState kStrictModeState = STRICT
- << LanguageModeState::kShift;
-
- private:
- const ExtraICState state_;
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index d328b3cb2c..f11f94a770 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -19,7 +19,6 @@
#include "src/ic/call-optimization.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/handler-configuration-inl.h"
-#include "src/ic/ic-compiler.h"
#include "src/ic/ic-inl.h"
#include "src/ic/ic-stats.h"
#include "src/ic/stub-cache.h"
@@ -66,30 +65,7 @@ const char* GetTransitionMarkModifier(KeyedAccessStoreMode mode) {
return "";
}
-
-#ifdef DEBUG
-
-#define TRACE_GENERIC_IC(isolate, type, reason) \
- do { \
- if (FLAG_trace_ic) { \
- PrintF("[%s patching generic stub in ", type); \
- JavaScriptFrame::PrintTop(isolate, stdout, false, true); \
- PrintF(" (%s)]\n", reason); \
- } \
- } while (false)
-
-#else
-
-#define TRACE_GENERIC_IC(isolate, type, reason) \
- do { \
- if (FLAG_trace_ic) { \
- PrintF("[%s patching generic stub in ", type); \
- PrintF("(see below) (%s)]\n", reason); \
- } \
- } while (false)
-
-#endif // DEBUG
-
+#define TRACE_GENERIC_IC(reason) set_slow_stub_reason(reason);
void IC::TraceIC(const char* type, Handle<Object> name) {
if (FLAG_ic_stats) {
@@ -100,107 +76,110 @@ void IC::TraceIC(const char* type, Handle<Object> name) {
}
}
+Address IC::GetAbstractPC(int* line, int* column) const {
+ JavaScriptFrameIterator it(isolate());
-void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
- State new_state) {
- if (V8_LIKELY(!FLAG_ic_stats)) return;
+ JavaScriptFrame* frame = it.frame();
+ DCHECK(!frame->is_builtin());
+ int position = frame->position();
- if (FLAG_ic_stats &
- v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING) {
- ICStats::instance()->Begin();
- ICInfo& ic_info = ICStats::instance()->Current();
- ic_info.type = is_keyed() ? "Keyed" : "";
- ic_info.type += type;
+ Object* maybe_script = frame->function()->shared()->script();
+ if (maybe_script->IsScript()) {
+ Handle<Script> script(Script::cast(maybe_script), isolate());
+ Script::PositionInfo info;
+ Script::GetPositionInfo(script, position, &info, Script::WITH_OFFSET);
+ *line = info.line + 1;
+ *column = info.column + 1;
} else {
- PrintF("[%s%s in ", is_keyed() ? "Keyed" : "", type);
+ *line = position;
+ *column = -1;
}
- // TODO(jkummerow): Add support for "apply". The logic is roughly:
- // marker = [fp_ + kMarkerOffset];
- // if marker is smi and marker.value == INTERNAL and
- // the frame's code == builtin(Builtins::kFunctionApply):
- // then print "apply from" and advance one frame
+ if (frame->is_interpreted()) {
+ InterpretedFrame* iframe = static_cast<InterpretedFrame*>(frame);
+ Address bytecode_start =
+ reinterpret_cast<Address>(iframe->GetBytecodeArray()) - kHeapObjectTag +
+ BytecodeArray::kHeaderSize;
+ return bytecode_start + iframe->GetBytecodeOffset();
+ }
- Object* maybe_function =
- Memory::Object_at(fp_ + JavaScriptFrameConstants::kFunctionOffset);
- if (maybe_function->IsJSFunction()) {
- JSFunction* function = JSFunction::cast(maybe_function);
- int code_offset = 0;
- if (function->IsInterpreted()) {
- code_offset = InterpretedFrame::GetBytecodeOffset(fp());
- } else {
- code_offset =
- static_cast<int>(pc() - function->code()->instruction_start());
- }
- if (FLAG_ic_stats &
- v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING) {
- JavaScriptFrame::CollectFunctionAndOffsetForICStats(
- function, function->abstract_code(), code_offset);
- } else {
- JavaScriptFrame::PrintFunctionAndOffset(
- function, function->abstract_code(), code_offset, stdout, true);
- }
+ return frame->pc();
+}
+
+void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
+ State new_state) {
+ if (V8_LIKELY(!FLAG_ic_stats)) return;
+
+ Map* map = nullptr;
+ if (!receiver_map().is_null()) {
+ map = *receiver_map();
}
const char* modifier = "";
- if (kind() == Code::KEYED_STORE_IC) {
+ if (IsKeyedStoreIC()) {
KeyedAccessStoreMode mode =
casted_nexus<KeyedStoreICNexus>()->GetKeyedAccessStoreMode();
modifier = GetTransitionMarkModifier(mode);
}
- Map* map = nullptr;
- if (!receiver_map().is_null()) {
- map = *receiver_map();
+
+ if (!(FLAG_ic_stats &
+ v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
+ int line;
+ int column;
+ Address pc = GetAbstractPC(&line, &column);
+ LOG(isolate(), ICEvent(type, is_keyed(), pc, line, column, map, *name,
+ TransitionMarkFromState(old_state),
+ TransitionMarkFromState(new_state), modifier,
+ slow_stub_reason_));
+ return;
}
- if (FLAG_ic_stats &
- v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING) {
- ICInfo& ic_info = ICStats::instance()->Current();
- // Reverse enough space for IC transition state, the longest length is 17.
- ic_info.state.reserve(17);
- ic_info.state = "(";
- ic_info.state += TransitionMarkFromState(old_state);
- ic_info.state += "->";
- ic_info.state += TransitionMarkFromState(new_state);
- ic_info.state += modifier;
- ic_info.state += ")";
- ic_info.map = reinterpret_cast<void*>(map);
+
+ ICStats::instance()->Begin();
+ ICInfo& ic_info = ICStats::instance()->Current();
+ ic_info.type = is_keyed() ? "Keyed" : "";
+ ic_info.type += type;
+
+ Object* maybe_function =
+ Memory::Object_at(fp_ + JavaScriptFrameConstants::kFunctionOffset);
+ DCHECK(maybe_function->IsJSFunction());
+ JSFunction* function = JSFunction::cast(maybe_function);
+ int code_offset = 0;
+ if (function->IsInterpreted()) {
+ code_offset = InterpretedFrame::GetBytecodeOffset(fp());
} else {
- PrintF(" (%c->%c%s) map=(%p", TransitionMarkFromState(old_state),
- TransitionMarkFromState(new_state), modifier,
- reinterpret_cast<void*>(map));
- }
+ code_offset =
+ static_cast<int>(pc() - function->code()->instruction_start());
+ }
+ JavaScriptFrame::CollectFunctionAndOffsetForICStats(
+ function, function->abstract_code(), code_offset);
+
+ // Reserve enough space for IC transition state, the longest length is 17.
+ ic_info.state.reserve(17);
+ ic_info.state = "(";
+ ic_info.state += TransitionMarkFromState(old_state);
+ ic_info.state += "->";
+ ic_info.state += TransitionMarkFromState(new_state);
+ ic_info.state += modifier;
+ ic_info.state += ")";
+ ic_info.map = reinterpret_cast<void*>(map);
if (map != nullptr) {
- if (FLAG_ic_stats &
- v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING) {
- ICInfo& ic_info = ICStats::instance()->Current();
- ic_info.is_dictionary_map = map->is_dictionary_map();
- ic_info.number_of_own_descriptors = map->NumberOfOwnDescriptors();
- ic_info.instance_type = std::to_string(map->instance_type());
- } else {
- PrintF(" dict=%u own=%u type=", map->is_dictionary_map(),
- map->NumberOfOwnDescriptors());
- std::cout << map->instance_type();
- }
- }
- if (FLAG_ic_stats &
- v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING) {
- // TODO(lpy) Add name as key field in ICStats.
- ICStats::instance()->End();
- } else {
- PrintF(") ");
- name->ShortPrint(stdout);
- PrintF("]\n");
+ ic_info.is_dictionary_map = map->is_dictionary_map();
+ ic_info.number_of_own_descriptors = map->NumberOfOwnDescriptors();
+ ic_info.instance_type = std::to_string(map->instance_type());
}
+ // TODO(lpy) Add name as key field in ICStats.
+ ICStats::instance()->End();
}
#define TRACE_IC(type, name) TraceIC(type, name)
-
IC::IC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus)
: isolate_(isolate),
vector_set_(false),
+ kind_(FeedbackSlotKind::kInvalid),
target_maps_set_(false),
+ slow_stub_reason_(nullptr),
nexus_(nexus) {
// To improve the performance of the (much used) IC code, we unfold a few
// levels of the stack frame iteration code. This yields a ~35% speedup when
@@ -237,9 +216,9 @@ IC::IC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus)
// function's frame. Check if the there is an additional frame, and if there
// is skip this frame. However, the pc should not be updated. The call to
// ICs happen from bytecode handlers.
- Object* frame_type =
- Memory::Object_at(fp + TypedFrameConstants::kFrameTypeOffset);
- if (frame_type == Smi::FromInt(StackFrame::STUB)) {
+ intptr_t frame_marker =
+ Memory::intptr_at(fp + TypedFrameConstants::kFrameTypeOffset);
+ if (frame_marker == StackFrame::TypeToMarker(StackFrame::STUB)) {
fp = Memory::Address_at(fp + TypedFrameConstants::kCallerFPOffset);
}
fp_ = fp;
@@ -247,18 +226,36 @@ IC::IC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus)
constant_pool_address_ = constant_pool;
}
pc_address_ = StackFrame::ResolveReturnAddressLocation(pc_address);
- Code* target = this->target();
- kind_ = target->kind();
- state_ = UseVector() ? nexus->StateFromFeedback() : StateFromCode(target);
+ if (nexus) {
+ kind_ = nexus->kind();
+ DCHECK(UseVector());
+ state_ = nexus->StateFromFeedback();
+ extra_ic_state_ = kNoExtraICState;
+ } else {
+ Code* target = this->target();
+ Code::Kind kind = target->kind();
+ if (kind == Code::BINARY_OP_IC) {
+ kind_ = FeedbackSlotKind::kBinaryOp;
+ } else if (kind == Code::COMPARE_IC) {
+ kind_ = FeedbackSlotKind::kCompareOp;
+ } else if (kind == Code::TO_BOOLEAN_IC) {
+ kind_ = FeedbackSlotKind::kToBoolean;
+ } else {
+ UNREACHABLE();
+ kind_ = FeedbackSlotKind::kInvalid;
+ }
+ DCHECK(!UseVector());
+ state_ = StateFromCode(target);
+ extra_ic_state_ = target->extra_ic_state();
+ }
old_state_ = state_;
- extra_ic_state_ = target->extra_ic_state();
}
// The ICs that don't pass slot and vector through the stack have to
// save/restore them in the dispatcher.
bool IC::ShouldPushPopSlotAndVector(Code::Kind kind) {
if (kind == Code::LOAD_IC || kind == Code::LOAD_GLOBAL_IC ||
- kind == Code::KEYED_LOAD_IC || kind == Code::CALL_IC) {
+ kind == Code::KEYED_LOAD_IC) {
return true;
}
if (kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC) {
@@ -289,7 +286,7 @@ InlineCacheState IC::StateFromCode(Code* code) {
}
}
-SharedFunctionInfo* IC::GetSharedFunctionInfo() const {
+JSFunction* IC::GetHostFunction() const {
// Compute the JavaScript frame for the frame pointer of this IC
// structure. We need this to be able to find the function
// corresponding to the frame.
@@ -298,16 +295,7 @@ SharedFunctionInfo* IC::GetSharedFunctionInfo() const {
JavaScriptFrame* frame = JavaScriptFrame::cast(it.frame());
// Find the function on the stack and both the active code for the
// function and the original code.
- JSFunction* function = frame->function();
- return function->shared();
-}
-
-
-Code* IC::GetCode() const {
- HandleScope scope(isolate());
- Handle<SharedFunctionInfo> shared(GetSharedFunctionInfo(), isolate());
- Code* code = shared->code();
- return code;
+ return frame->function();
}
static void LookupForRead(LookupIterator* it) {
@@ -350,7 +338,7 @@ bool IC::ShouldRecomputeHandler(Handle<String> name) {
// This is a contextual access, always just update the handler and stay
// monomorphic.
- if (kind() == Code::LOAD_GLOBAL_IC) return true;
+ if (IsLoadGlobalIC()) return true;
// The current map wasn't handled yet. There's no reason to stay monomorphic,
// *unless* we're moving from a deprecated map to its replacement, or
@@ -447,12 +435,14 @@ static void ComputeTypeInfoCountDelta(IC::State old_state, IC::State new_state,
}
// static
-void IC::OnTypeFeedbackChanged(Isolate* isolate, Code* host) {
- if (host->kind() != Code::FUNCTION) return;
+void IC::OnFeedbackChanged(Isolate* isolate, JSFunction* host_function) {
+ Code* host = host_function->shared()->code();
- TypeFeedbackInfo* info = TypeFeedbackInfo::cast(host->type_feedback_info());
- info->change_own_type_change_checksum();
- host->set_profiler_ticks(0);
+ if (host->kind() == Code::FUNCTION) {
+ TypeFeedbackInfo* info = TypeFeedbackInfo::cast(host->type_feedback_info());
+ info->change_own_type_change_checksum();
+ host->set_profiler_ticks(0);
+ }
isolate->runtime_profiler()->NotifyICChanged();
// TODO(2029): When an optimized function is patched, it would
// be nice to propagate the corresponding type information to its
@@ -462,6 +452,7 @@ void IC::OnTypeFeedbackChanged(Isolate* isolate, Code* host) {
void IC::PostPatching(Address address, Code* target, Code* old_target) {
// Type vector based ICs update these statistics at a different time because
// they don't always patch on state change.
+ // TODO(ishell): DCHECK
if (ICUseVector(target->kind())) return;
DCHECK(old_target->is_inline_cache_stub());
@@ -507,58 +498,6 @@ void IC::Clear(Isolate* isolate, Address address, Address constant_pool) {
}
}
-
-void KeyedLoadIC::Clear(Isolate* isolate, Code* host, KeyedLoadICNexus* nexus) {
- if (IsCleared(nexus)) return;
- // Make sure to also clear the map used in inline fast cases. If we
- // do not clear these maps, cached code can keep objects alive
- // through the embedded maps.
- nexus->ConfigurePremonomorphic();
- OnTypeFeedbackChanged(isolate, host);
-}
-
-
-void CallIC::Clear(Isolate* isolate, Code* host, CallICNexus* nexus) {
- // Determine our state.
- Object* feedback = nexus->vector()->Get(nexus->slot());
- State state = nexus->StateFromFeedback();
-
- if (state != UNINITIALIZED && !feedback->IsAllocationSite()) {
- nexus->ConfigureUninitialized();
- // The change in state must be processed.
- OnTypeFeedbackChanged(isolate, host);
- }
-}
-
-
-void LoadIC::Clear(Isolate* isolate, Code* host, LoadICNexus* nexus) {
- if (IsCleared(nexus)) return;
- nexus->ConfigurePremonomorphic();
- OnTypeFeedbackChanged(isolate, host);
-}
-
-void LoadGlobalIC::Clear(Isolate* isolate, Code* host,
- LoadGlobalICNexus* nexus) {
- if (IsCleared(nexus)) return;
- nexus->ConfigureUninitialized();
- OnTypeFeedbackChanged(isolate, host);
-}
-
-void StoreIC::Clear(Isolate* isolate, Code* host, StoreICNexus* nexus) {
- if (IsCleared(nexus)) return;
- nexus->ConfigurePremonomorphic();
- OnTypeFeedbackChanged(isolate, host);
-}
-
-
-void KeyedStoreIC::Clear(Isolate* isolate, Code* host,
- KeyedStoreICNexus* nexus) {
- if (IsCleared(nexus)) return;
- nexus->ConfigurePremonomorphic();
- OnTypeFeedbackChanged(isolate, host);
-}
-
-
void CompareIC::Clear(Isolate* isolate, Address address, Code* target,
Address constant_pool) {
DCHECK(CodeStub::GetMajorKey(target) == CodeStub::CompareIC);
@@ -583,13 +522,13 @@ void IC::ConfigureVectorState(IC::State new_state, Handle<Object> key) {
if (new_state == PREMONOMORPHIC) {
nexus()->ConfigurePremonomorphic();
} else if (new_state == MEGAMORPHIC) {
- if (kind() == Code::LOAD_IC || kind() == Code::STORE_IC) {
+ if (IsLoadIC() || IsStoreIC() || IsStoreOwnIC()) {
nexus()->ConfigureMegamorphic();
- } else if (kind() == Code::KEYED_LOAD_IC) {
+ } else if (IsKeyedLoadIC()) {
KeyedLoadICNexus* nexus = casted_nexus<KeyedLoadICNexus>();
nexus->ConfigureMegamorphicKeyed(key->IsName() ? PROPERTY : ELEMENT);
} else {
- DCHECK(kind() == Code::KEYED_STORE_IC);
+ DCHECK(IsKeyedStoreIC());
KeyedStoreICNexus* nexus = casted_nexus<KeyedStoreICNexus>();
nexus->ConfigureMegamorphicKeyed(key->IsName() ? PROPERTY : ELEMENT);
}
@@ -598,66 +537,117 @@ void IC::ConfigureVectorState(IC::State new_state, Handle<Object> key) {
}
vector_set_ = true;
- OnTypeFeedbackChanged(isolate(), get_host());
+ OnFeedbackChanged(isolate(), GetHostFunction());
}
void IC::ConfigureVectorState(Handle<Name> name, Handle<Map> map,
Handle<Object> handler) {
DCHECK(UseVector());
- if (kind() == Code::LOAD_IC) {
- LoadICNexus* nexus = casted_nexus<LoadICNexus>();
- nexus->ConfigureMonomorphic(map, handler);
- } else if (kind() == Code::LOAD_GLOBAL_IC) {
- LoadGlobalICNexus* nexus = casted_nexus<LoadGlobalICNexus>();
- nexus->ConfigureHandlerMode(handler);
- } else if (kind() == Code::KEYED_LOAD_IC) {
- KeyedLoadICNexus* nexus = casted_nexus<KeyedLoadICNexus>();
- nexus->ConfigureMonomorphic(name, map, handler);
- } else if (kind() == Code::STORE_IC) {
- StoreICNexus* nexus = casted_nexus<StoreICNexus>();
- nexus->ConfigureMonomorphic(map, handler);
- } else {
- DCHECK(kind() == Code::KEYED_STORE_IC);
- KeyedStoreICNexus* nexus = casted_nexus<KeyedStoreICNexus>();
- nexus->ConfigureMonomorphic(name, map, handler);
+ switch (kind_) {
+ case FeedbackSlotKind::kLoadProperty: {
+ LoadICNexus* nexus = casted_nexus<LoadICNexus>();
+ nexus->ConfigureMonomorphic(map, handler);
+ break;
+ }
+ case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
+ case FeedbackSlotKind::kLoadGlobalInsideTypeof: {
+ LoadGlobalICNexus* nexus = casted_nexus<LoadGlobalICNexus>();
+ nexus->ConfigureHandlerMode(handler);
+ break;
+ }
+ case FeedbackSlotKind::kLoadKeyed: {
+ KeyedLoadICNexus* nexus = casted_nexus<KeyedLoadICNexus>();
+ nexus->ConfigureMonomorphic(name, map, handler);
+ break;
+ }
+ case FeedbackSlotKind::kStoreNamedSloppy:
+ case FeedbackSlotKind::kStoreNamedStrict:
+ case FeedbackSlotKind::kStoreOwnNamed: {
+ StoreICNexus* nexus = casted_nexus<StoreICNexus>();
+ nexus->ConfigureMonomorphic(map, handler);
+ break;
+ }
+ case FeedbackSlotKind::kStoreKeyedSloppy:
+ case FeedbackSlotKind::kStoreKeyedStrict: {
+ KeyedStoreICNexus* nexus = casted_nexus<KeyedStoreICNexus>();
+ nexus->ConfigureMonomorphic(name, map, handler);
+ break;
+ }
+ case FeedbackSlotKind::kCall:
+ case FeedbackSlotKind::kBinaryOp:
+ case FeedbackSlotKind::kCompareOp:
+ case FeedbackSlotKind::kToBoolean:
+ case FeedbackSlotKind::kCreateClosure:
+ case FeedbackSlotKind::kLiteral:
+ case FeedbackSlotKind::kGeneral:
+ case FeedbackSlotKind::kStoreDataPropertyInLiteral:
+ case FeedbackSlotKind::kInvalid:
+ case FeedbackSlotKind::kKindsNumber:
+ UNREACHABLE();
+ break;
}
vector_set_ = true;
- OnTypeFeedbackChanged(isolate(), get_host());
+ OnFeedbackChanged(isolate(), GetHostFunction());
}
void IC::ConfigureVectorState(Handle<Name> name, MapHandleList* maps,
List<Handle<Object>>* handlers) {
DCHECK(UseVector());
- if (kind() == Code::LOAD_IC) {
- LoadICNexus* nexus = casted_nexus<LoadICNexus>();
- nexus->ConfigurePolymorphic(maps, handlers);
- } else if (kind() == Code::KEYED_LOAD_IC) {
- KeyedLoadICNexus* nexus = casted_nexus<KeyedLoadICNexus>();
- nexus->ConfigurePolymorphic(name, maps, handlers);
- } else if (kind() == Code::STORE_IC) {
- StoreICNexus* nexus = casted_nexus<StoreICNexus>();
- nexus->ConfigurePolymorphic(maps, handlers);
- } else {
- DCHECK(kind() == Code::KEYED_STORE_IC);
- KeyedStoreICNexus* nexus = casted_nexus<KeyedStoreICNexus>();
- nexus->ConfigurePolymorphic(name, maps, handlers);
+ switch (kind_) {
+ case FeedbackSlotKind::kLoadProperty: {
+ LoadICNexus* nexus = casted_nexus<LoadICNexus>();
+ nexus->ConfigurePolymorphic(maps, handlers);
+ break;
+ }
+ case FeedbackSlotKind::kLoadKeyed: {
+ KeyedLoadICNexus* nexus = casted_nexus<KeyedLoadICNexus>();
+ nexus->ConfigurePolymorphic(name, maps, handlers);
+ break;
+ }
+ case FeedbackSlotKind::kStoreNamedSloppy:
+ case FeedbackSlotKind::kStoreNamedStrict:
+ case FeedbackSlotKind::kStoreOwnNamed: {
+ StoreICNexus* nexus = casted_nexus<StoreICNexus>();
+ nexus->ConfigurePolymorphic(maps, handlers);
+ break;
+ }
+ case FeedbackSlotKind::kStoreKeyedSloppy:
+ case FeedbackSlotKind::kStoreKeyedStrict: {
+ KeyedStoreICNexus* nexus = casted_nexus<KeyedStoreICNexus>();
+ nexus->ConfigurePolymorphic(name, maps, handlers);
+ break;
+ }
+ case FeedbackSlotKind::kCall:
+ case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
+ case FeedbackSlotKind::kLoadGlobalInsideTypeof:
+ case FeedbackSlotKind::kBinaryOp:
+ case FeedbackSlotKind::kCompareOp:
+ case FeedbackSlotKind::kToBoolean:
+ case FeedbackSlotKind::kCreateClosure:
+ case FeedbackSlotKind::kLiteral:
+ case FeedbackSlotKind::kGeneral:
+ case FeedbackSlotKind::kStoreDataPropertyInLiteral:
+ case FeedbackSlotKind::kInvalid:
+ case FeedbackSlotKind::kKindsNumber:
+ UNREACHABLE();
+ break;
}
vector_set_ = true;
- OnTypeFeedbackChanged(isolate(), get_host());
+ OnFeedbackChanged(isolate(), GetHostFunction());
}
void IC::ConfigureVectorState(MapHandleList* maps,
MapHandleList* transitioned_maps,
List<Handle<Object>>* handlers) {
DCHECK(UseVector());
- DCHECK(kind() == Code::KEYED_STORE_IC);
+ DCHECK(IsKeyedStoreIC());
KeyedStoreICNexus* nexus = casted_nexus<KeyedStoreICNexus>();
nexus->ConfigurePolymorphic(maps, transitioned_maps, handlers);
vector_set_ = true;
- OnTypeFeedbackChanged(isolate(), get_host());
+ OnFeedbackChanged(isolate(), GetHostFunction());
}
@@ -843,12 +833,8 @@ bool IC::IsTransitionOfMonomorphicTarget(Map* source_map, Map* target_map) {
void IC::PatchCache(Handle<Name> name, Handle<Object> handler) {
DCHECK(IsHandler(*handler));
- // Currently only LoadIC and KeyedLoadIC support non-code handlers.
- DCHECK_IMPLIES(!handler->IsCode(), kind() == Code::LOAD_IC ||
- kind() == Code::LOAD_GLOBAL_IC ||
- kind() == Code::KEYED_LOAD_IC ||
- kind() == Code::STORE_IC ||
- kind() == Code::KEYED_STORE_IC);
+ // Currently only load and store ICs support non-code handlers.
+ DCHECK_IMPLIES(!handler->IsCode(), IsAnyLoad() || IsAnyStore());
switch (state()) {
case UNINITIALIZED:
case PREMONOMORPHIC:
@@ -856,7 +842,7 @@ void IC::PatchCache(Handle<Name> name, Handle<Object> handler) {
break;
case RECOMPUTE_HANDLER:
case MONOMORPHIC:
- if (kind() == Code::LOAD_GLOBAL_IC) {
+ if (IsLoadGlobalIC()) {
UpdateMonomorphicIC(handler, name);
break;
}
@@ -1101,7 +1087,7 @@ bool IsCompatibleReceiver(LookupIterator* lookup, Handle<Map> receiver_map) {
void LoadIC::UpdateCaches(LookupIterator* lookup) {
- if (state() == UNINITIALIZED && kind() != Code::LOAD_GLOBAL_IC) {
+ if (state() == UNINITIALIZED && !IsLoadGlobalIC()) {
// This is the first time we execute this inline cache. Set the target to
// the pre monomorphic stub to delay setting the monomorphic state.
TRACE_HANDLER_STATS(isolate(), LoadIC_Premonomorphic);
@@ -1115,15 +1101,10 @@ void LoadIC::UpdateCaches(LookupIterator* lookup) {
lookup->state() == LookupIterator::ACCESS_CHECK) {
code = slow_stub();
} else if (!lookup->IsFound()) {
- if (kind() == Code::LOAD_IC || kind() == Code::LOAD_GLOBAL_IC) {
- TRACE_HANDLER_STATS(isolate(), LoadIC_LoadNonexistentDH);
- code = LoadNonExistent(receiver_map(), lookup->name());
- } else {
- code = slow_stub();
- }
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadNonexistentDH);
+ code = LoadNonExistent(receiver_map(), lookup->name());
} else {
- if (kind() == Code::LOAD_GLOBAL_IC &&
- lookup->state() == LookupIterator::DATA &&
+ if (IsLoadGlobalIC() && lookup->state() == LookupIterator::DATA &&
lookup->GetReceiver().is_identical_to(lookup->GetHolder<Object>())) {
DCHECK(lookup->GetReceiver()->IsJSGlobalObject());
// Now update the cell in the feedback vector.
@@ -1133,7 +1114,7 @@ void LoadIC::UpdateCaches(LookupIterator* lookup) {
return;
} else if (lookup->state() == LookupIterator::ACCESSOR) {
if (!IsCompatibleReceiver(lookup, receiver_map())) {
- TRACE_GENERIC_IC(isolate(), "LoadIC", "incompatible receiver type");
+ TRACE_GENERIC_IC("incompatible receiver type");
code = slow_stub();
}
} else if (lookup->state() == LookupIterator::INTERCEPTOR) {
@@ -1144,7 +1125,7 @@ void LoadIC::UpdateCaches(LookupIterator* lookup) {
LookupForRead(&it);
if (it.state() == LookupIterator::ACCESSOR &&
!IsCompatibleReceiver(&it, receiver_map())) {
- TRACE_GENERIC_IC(isolate(), "LoadIC", "incompatible receiver type");
+ TRACE_GENERIC_IC("incompatible receiver type");
code = slow_stub();
}
}
@@ -1156,20 +1137,12 @@ void LoadIC::UpdateCaches(LookupIterator* lookup) {
}
StubCache* IC::stub_cache() {
- switch (kind()) {
- case Code::LOAD_IC:
- case Code::KEYED_LOAD_IC:
- return isolate()->load_stub_cache();
-
- case Code::STORE_IC:
- case Code::KEYED_STORE_IC:
- return isolate()->store_stub_cache();
-
- default:
- break;
+ if (IsAnyLoad()) {
+ return isolate()->load_stub_cache();
+ } else {
+ DCHECK(IsAnyStore());
+ return isolate()->store_stub_cache();
}
- UNREACHABLE();
- return nullptr;
}
void IC::UpdateMegamorphicCache(Map* map, Name* name, Object* handler) {
@@ -1179,8 +1152,7 @@ void IC::UpdateMegamorphicCache(Map* map, Name* name, Object* handler) {
void IC::TraceHandlerCacheHitStats(LookupIterator* lookup) {
if (!FLAG_runtime_call_stats) return;
- if (kind() == Code::LOAD_IC || kind() == Code::LOAD_GLOBAL_IC ||
- kind() == Code::KEYED_LOAD_IC) {
+ if (IsAnyLoad()) {
switch (lookup->state()) {
case LookupIterator::ACCESS_CHECK:
TRACE_HANDLER_STATS(isolate(), LoadIC_HandlerCacheHit_AccessCheck);
@@ -1207,7 +1179,7 @@ void IC::TraceHandlerCacheHitStats(LookupIterator* lookup) {
TRACE_HANDLER_STATS(isolate(), LoadIC_HandlerCacheHit_Transition);
break;
}
- } else if (kind() == Code::STORE_IC || kind() == Code::KEYED_STORE_IC) {
+ } else if (IsAnyStore()) {
switch (lookup->state()) {
case LookupIterator::ACCESS_CHECK:
TRACE_HANDLER_STATS(isolate(), StoreIC_HandlerCacheHit_AccessCheck);
@@ -1254,19 +1226,18 @@ Handle<Object> IC::ComputeHandler(LookupIterator* lookup,
lookup->GetReceiver().is_identical_to(lookup->GetHolder<JSObject>());
CacheHolderFlag flag;
Handle<Map> stub_holder_map;
- if (kind() == Code::LOAD_IC || kind() == Code::LOAD_GLOBAL_IC ||
- kind() == Code::KEYED_LOAD_IC) {
+ if (IsAnyLoad()) {
stub_holder_map = IC::GetHandlerCacheHolder(
receiver_map(), receiver_is_holder, isolate(), &flag);
} else {
- DCHECK(kind() == Code::STORE_IC || kind() == Code::KEYED_STORE_IC);
+ DCHECK(IsAnyStore());
// Store handlers cannot be cached on prototypes.
flag = kCacheOnReceiver;
stub_holder_map = receiver_map();
}
Handle<Object> handler = PropertyHandlerCompiler::Find(
- lookup->name(), stub_holder_map, kind(), flag);
+ lookup->name(), stub_holder_map, handler_kind(), flag);
// Use the cached value if it exists, and if it is different from the
// handler that just missed.
if (!handler.is_null()) {
@@ -1329,8 +1300,7 @@ Handle<Object> LoadIC::GetMapIndependentHandler(LookupIterator* lookup) {
->has_non_instance_prototype()) {
Handle<Code> stub;
TRACE_HANDLER_STATS(isolate(), LoadIC_FunctionPrototypeStub);
- FunctionPrototypeStub function_prototype_stub(isolate());
- return function_prototype_stub.GetCode();
+ return isolate()->builtins()->LoadIC_FunctionPrototype();
}
Handle<Map> map = receiver_map();
@@ -1359,7 +1329,7 @@ Handle<Object> LoadIC::GetMapIndependentHandler(LookupIterator* lookup) {
return slow_stub();
}
// When debugging we need to go the slow path to flood the accessor.
- if (GetSharedFunctionInfo()->HasDebugInfo()) {
+ if (GetHostFunction()->shared()->HasDebugInfo()) {
TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
return slow_stub();
}
@@ -1384,7 +1354,7 @@ Handle<Object> LoadIC::GetMapIndependentHandler(LookupIterator* lookup) {
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadApiGetterDH);
return smi_handler;
}
- if (kind() != Code::LOAD_GLOBAL_IC) {
+ if (!IsLoadGlobalIC()) {
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadApiGetterFromPrototypeDH);
return LoadFromPrototype(map, holder, lookup->name(), smi_handler);
}
@@ -1398,7 +1368,7 @@ Handle<Object> LoadIC::GetMapIndependentHandler(LookupIterator* lookup) {
case LookupIterator::DATA: {
DCHECK_EQ(kData, lookup->property_details().kind());
if (lookup->is_dictionary_holder()) {
- if (kind() != Code::LOAD_IC && kind() != Code::LOAD_GLOBAL_IC) {
+ if (!IsLoadIC() && !IsLoadGlobalIC()) { // IsKeyedLoadIC()?
TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
return slow_stub();
}
@@ -1505,7 +1475,7 @@ Handle<Object> LoadIC::CompileHandler(LookupIterator* lookup,
return ComputeHandler(lookup);
}
DCHECK(holder->HasFastProperties());
- DCHECK(!GetSharedFunctionInfo()->HasDebugInfo());
+ DCHECK(!GetHostFunction()->shared()->HasDebugInfo());
Handle<Object> getter(Handle<AccessorPair>::cast(accessors)->getter(),
isolate());
CallOptimization call_optimization(getter);
@@ -1542,7 +1512,7 @@ Handle<Object> LoadIC::CompileHandler(LookupIterator* lookup,
case LookupIterator::DATA: {
DCHECK(lookup->is_dictionary_holder());
- DCHECK(kind() == Code::LOAD_IC || kind() == Code::LOAD_GLOBAL_IC);
+ DCHECK(IsLoadIC() || IsLoadGlobalIC());
DCHECK(holder->IsJSGlobalObject());
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadGlobal);
NamedLoadHandlerCompiler compiler(isolate(), map, holder, cache_holder);
@@ -1579,6 +1549,8 @@ static Handle<Object> TryConvertKey(Handle<Object> key, Isolate* isolate) {
}
} else if (key->IsUndefined(isolate)) {
key = isolate->factory()->undefined_string();
+ } else if (key->IsString()) {
+ key = isolate->factory()->InternalizeString(Handle<String>::cast(key));
}
return key;
}
@@ -1600,11 +1572,11 @@ void KeyedLoadIC::UpdateLoadElement(Handle<HeapObject> receiver) {
Handle<Map> map = target_receiver_maps.at(i);
if (map.is_null()) continue;
if (map->instance_type() == JS_VALUE_TYPE) {
- TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "JSValue");
+ TRACE_GENERIC_IC("JSValue");
return;
}
if (map->instance_type() == JS_PROXY_TYPE) {
- TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "JSProxy");
+ TRACE_GENERIC_IC("JSProxy");
return;
}
}
@@ -1632,14 +1604,14 @@ void KeyedLoadIC::UpdateLoadElement(Handle<HeapObject> receiver) {
if (!AddOneReceiverMapIfMissing(&target_receiver_maps, receiver_map)) {
// If the miss wasn't due to an unseen map, a polymorphic stub
// won't help, use the generic stub.
- TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "same map added twice");
+ TRACE_GENERIC_IC("same map added twice");
return;
}
// If the maximum number of receiver maps has been exceeded, use the generic
// version of the IC.
if (target_receiver_maps.length() > kMaxKeyedPolymorphism) {
- TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "max polymorph exceeded");
+ TRACE_GENERIC_IC("max polymorph exceeded");
return;
}
@@ -1686,7 +1658,6 @@ MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object,
if (!is_vector_set()) {
ConfigureVectorState(MEGAMORPHIC, key);
- TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "set generic");
TRACE_IC("LoadIC", key);
}
@@ -1852,12 +1823,13 @@ void StoreIC::UpdateCaches(LookupIterator* lookup, Handle<Object> value,
return;
}
- bool use_ic = LookupForWrite(lookup, value, store_mode);
- if (!use_ic) {
- TRACE_GENERIC_IC(isolate(), "StoreIC", "LookupForWrite said 'false'");
+ Handle<Object> handler;
+ if (LookupForWrite(lookup, value, store_mode)) {
+ handler = ComputeHandler(lookup, value);
+ } else {
+ TRACE_GENERIC_IC("LookupForWrite said 'false'");
+ handler = slow_stub();
}
- Handle<Object> handler = use_ic ? ComputeHandler(lookup, value)
- : Handle<Object>::cast(slow_stub());
PatchCache(lookup->name(), handler);
TRACE_IC("StoreIC", lookup->name());
@@ -1955,7 +1927,7 @@ Handle<Object> StoreIC::GetMapIndependentHandler(LookupIterator* lookup) {
}
// Currently not handled by CompileStoreTransition.
if (!holder->HasFastProperties()) {
- TRACE_GENERIC_IC(isolate(), "StoreIC", "transition from slow");
+ TRACE_GENERIC_IC("transition from slow");
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
return slow_stub();
}
@@ -1975,7 +1947,7 @@ Handle<Object> StoreIC::GetMapIndependentHandler(LookupIterator* lookup) {
case LookupIterator::ACCESSOR: {
if (!holder->HasFastProperties()) {
- TRACE_GENERIC_IC(isolate(), "StoreIC", "accessor on slow map");
+ TRACE_GENERIC_IC("accessor on slow map");
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
return slow_stub();
}
@@ -1983,20 +1955,19 @@ Handle<Object> StoreIC::GetMapIndependentHandler(LookupIterator* lookup) {
if (accessors->IsAccessorInfo()) {
Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(accessors);
if (v8::ToCData<Address>(info->setter()) == nullptr) {
- TRACE_GENERIC_IC(isolate(), "StoreIC", "setter == nullptr");
+ TRACE_GENERIC_IC("setter == nullptr");
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
return slow_stub();
}
if (AccessorInfo::cast(*accessors)->is_special_data_property() &&
!lookup->HolderIsReceiverOrHiddenPrototype()) {
- TRACE_GENERIC_IC(isolate(), "StoreIC",
- "special data property in prototype chain");
+ TRACE_GENERIC_IC("special data property in prototype chain");
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
return slow_stub();
}
if (!AccessorInfo::IsCompatibleReceiverMap(isolate(), info,
receiver_map())) {
- TRACE_GENERIC_IC(isolate(), "StoreIC", "incompatible receiver type");
+ TRACE_GENERIC_IC("incompatible receiver type");
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
return slow_stub();
}
@@ -2009,7 +1980,7 @@ Handle<Object> StoreIC::GetMapIndependentHandler(LookupIterator* lookup) {
Handle<Object> setter(Handle<AccessorPair>::cast(accessors)->setter(),
isolate());
if (!setter->IsJSFunction() && !setter->IsFunctionTemplateInfo()) {
- TRACE_GENERIC_IC(isolate(), "StoreIC", "setter not a function");
+ TRACE_GENERIC_IC("setter not a function");
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
return slow_stub();
}
@@ -2018,7 +1989,7 @@ Handle<Object> StoreIC::GetMapIndependentHandler(LookupIterator* lookup) {
if (call_optimization.IsCompatibleReceiver(receiver, holder)) {
break; // Custom-compiled handler.
}
- TRACE_GENERIC_IC(isolate(), "StoreIC", "incompatible receiver");
+ TRACE_GENERIC_IC("incompatible receiver");
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
return slow_stub();
}
@@ -2045,12 +2016,13 @@ Handle<Object> StoreIC::GetMapIndependentHandler(LookupIterator* lookup) {
int descriptor = lookup->GetFieldDescriptorIndex();
FieldIndex index = lookup->GetFieldIndex();
return StoreHandler::StoreField(isolate(), descriptor, index,
+ lookup->constness(),
lookup->representation());
}
// -------------- Constant properties --------------
DCHECK_EQ(kDescriptor, lookup->property_details().location());
- TRACE_GENERIC_IC(isolate(), "StoreIC", "constant property");
+ TRACE_GENERIC_IC("constant property");
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
return slow_stub();
}
@@ -2106,6 +2078,11 @@ Handle<Object> StoreIC::CompileHandler(LookupIterator* lookup,
DCHECK(!info->is_sloppy() || receiver->IsJSReceiver());
TRACE_HANDLER_STATS(isolate(), StoreIC_StoreCallback);
NamedStoreHandlerCompiler compiler(isolate(), receiver_map(), holder);
+ // TODO(ishell): don't hard-code language mode into the handler because
+ // this handler can be re-used through megamorphic stub cache for wrong
+ // language mode.
+ // Better pass vector/slot to Runtime::kStoreCallbackProperty and
+ // let it decode the language mode from the IC kind.
Handle<Code> code = compiler.CompileStoreCallback(
receiver, lookup->name(), info, language_mode());
return code;
@@ -2167,16 +2144,14 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
Handle<Map> monomorphic_map =
ComputeTransitionedMap(receiver_map, store_mode);
store_mode = GetNonTransitioningStoreMode(store_mode);
- Handle<Object> handler =
- PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(monomorphic_map,
- store_mode);
+ Handle<Object> handler = StoreElementHandler(monomorphic_map, store_mode);
return ConfigureVectorState(Handle<Name>(), monomorphic_map, handler);
}
for (int i = 0; i < target_receiver_maps.length(); i++) {
if (!target_receiver_maps.at(i).is_null() &&
target_receiver_maps.at(i)->instance_type() == JS_VALUE_TYPE) {
- TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "JSValue");
+ TRACE_GENERIC_IC("JSValue");
return;
}
}
@@ -2202,8 +2177,7 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
// stay MONOMORPHIC and use the map for the most generic ElementsKind.
store_mode = GetNonTransitioningStoreMode(store_mode);
Handle<Object> handler =
- PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(
- transitioned_receiver_map, store_mode);
+ StoreElementHandler(transitioned_receiver_map, store_mode);
ConfigureVectorState(Handle<Name>(), transitioned_receiver_map, handler);
return;
}
@@ -2215,9 +2189,7 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
// A "normal" IC that handles stores can switch to a version that can
// grow at the end of the array, handle OOB accesses or copy COW arrays
// and still stay MONOMORPHIC.
- Handle<Object> handler =
- PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(receiver_map,
- store_mode);
+ Handle<Object> handler = StoreElementHandler(receiver_map, store_mode);
return ConfigureVectorState(Handle<Name>(), receiver_map, handler);
}
}
@@ -2237,7 +2209,7 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
if (!map_added) {
// If the miss wasn't due to an unseen map, a polymorphic stub
// won't help, use the megamorphic stub which can handle everything.
- TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "same map added twice");
+ TRACE_GENERIC_IC("same map added twice");
return;
}
@@ -2252,7 +2224,7 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
if (store_mode == STANDARD_STORE) {
store_mode = old_store_mode;
} else if (store_mode != old_store_mode) {
- TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "store mode mismatch");
+ TRACE_GENERIC_IC("store mode mismatch");
return;
}
}
@@ -2269,16 +2241,15 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
}
if (external_arrays != 0 &&
external_arrays != target_receiver_maps.length()) {
- TRACE_GENERIC_IC(isolate(), "KeyedStoreIC",
- "unsupported combination of external and normal arrays");
+ TRACE_GENERIC_IC("unsupported combination of external and normal arrays");
return;
}
}
MapHandleList transitioned_maps(target_receiver_maps.length());
List<Handle<Object>> handlers(target_receiver_maps.length());
- PropertyICCompiler::ComputeKeyedStorePolymorphicHandlers(
- &target_receiver_maps, &transitioned_maps, &handlers, store_mode);
+ StoreElementPolymorphicHandlers(&target_receiver_maps, &transitioned_maps,
+ &handlers, store_mode);
ConfigureVectorState(&target_receiver_maps, &transitioned_maps, &handlers);
}
@@ -2312,6 +2283,91 @@ Handle<Map> KeyedStoreIC::ComputeTransitionedMap(
return MaybeHandle<Map>().ToHandleChecked();
}
+Handle<Object> KeyedStoreIC::StoreElementHandler(
+ Handle<Map> receiver_map, KeyedAccessStoreMode store_mode) {
+ DCHECK(store_mode == STANDARD_STORE ||
+ store_mode == STORE_AND_GROW_NO_TRANSITION ||
+ store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
+ store_mode == STORE_NO_TRANSITION_HANDLE_COW);
+
+ ElementsKind elements_kind = receiver_map->elements_kind();
+ bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
+ Handle<Code> stub;
+ if (receiver_map->has_sloppy_arguments_elements()) {
+ TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_KeyedStoreSloppyArgumentsStub);
+ stub = KeyedStoreSloppyArgumentsStub(isolate(), store_mode).GetCode();
+ } else if (receiver_map->has_fast_elements() ||
+ receiver_map->has_fixed_typed_array_elements()) {
+ TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_StoreFastElementStub);
+ stub =
+ StoreFastElementStub(isolate(), is_jsarray, elements_kind, store_mode)
+ .GetCode();
+ } else {
+ TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_StoreElementStub);
+ DCHECK_EQ(DICTIONARY_ELEMENTS, elements_kind);
+ stub = StoreSlowElementStub(isolate(), store_mode).GetCode();
+ }
+ Handle<Object> validity_cell =
+ Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
+ if (validity_cell.is_null()) {
+ return stub;
+ }
+ return isolate()->factory()->NewTuple2(validity_cell, stub);
+}
+
+void KeyedStoreIC::StoreElementPolymorphicHandlers(
+ MapHandleList* receiver_maps, MapHandleList* transitioned_maps,
+ List<Handle<Object>>* handlers, KeyedAccessStoreMode store_mode) {
+ DCHECK(store_mode == STANDARD_STORE ||
+ store_mode == STORE_AND_GROW_NO_TRANSITION ||
+ store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
+ store_mode == STORE_NO_TRANSITION_HANDLE_COW);
+
+ for (int i = 0; i < receiver_maps->length(); ++i) {
+ Handle<Map> receiver_map(receiver_maps->at(i));
+ Handle<Object> handler;
+ Handle<Map> transitioned_map;
+ {
+ Map* tmap = receiver_map->FindElementsKindTransitionedMap(receiver_maps);
+ if (tmap != nullptr) transitioned_map = handle(tmap);
+ }
+
+ // TODO(mvstanton): The code below is doing pessimistic elements
+ // transitions. I would like to stop doing that and rely on Allocation Site
+ // Tracking to do a better job of ensuring the data types are what they need
+ // to be. Not all the elements are in place yet, pessimistic elements
+ // transitions are still important for performance.
+ if (!transitioned_map.is_null()) {
+ bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
+ ElementsKind elements_kind = receiver_map->elements_kind();
+ TRACE_HANDLER_STATS(isolate(),
+ KeyedStoreIC_ElementsTransitionAndStoreStub);
+ Handle<Code> stub =
+ ElementsTransitionAndStoreStub(isolate(), elements_kind,
+ transitioned_map->elements_kind(),
+ is_js_array, store_mode)
+ .GetCode();
+ Handle<Object> validity_cell =
+ Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
+ if (validity_cell.is_null()) {
+ handler = stub;
+ } else {
+ handler = isolate()->factory()->NewTuple2(validity_cell, stub);
+ }
+
+ } else if (receiver_map->instance_type() < FIRST_JS_RECEIVER_TYPE) {
+ // TODO(mvstanton): Consider embedding store_mode in the state of the slow
+ // keyed store ic for uniformity.
+ TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_SlowStub);
+ handler = isolate()->builtins()->KeyedStoreIC_Slow();
+ } else {
+ handler = StoreElementHandler(receiver_map, store_mode);
+ }
+ DCHECK(!handler.is_null());
+ handlers->Add(handler);
+ transitioned_maps->Add(transitioned_map);
+ }
+}
bool IsOutOfBoundsAccess(Handle<JSObject> receiver, uint32_t index) {
uint32_t length = 0;
@@ -2404,8 +2460,7 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
Object);
if (!is_vector_set()) {
ConfigureVectorState(MEGAMORPHIC, key);
- TRACE_GENERIC_IC(isolate(), "KeyedStoreIC",
- "unhandled internalized string key");
+ TRACE_GENERIC_IC("unhandled internalized string key");
TRACE_IC("StoreIC", key);
}
return store_handle;
@@ -2419,7 +2474,7 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
// the runtime to enable optimization of element hole access.
Handle<HeapObject> heap_object = Handle<HeapObject>::cast(object);
if (heap_object->map()->IsMapInArrayPrototypeChain()) {
- TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "map in array prototype");
+ TRACE_GENERIC_IC("map in array prototype");
use_ic = false;
}
}
@@ -2450,7 +2505,7 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
if (use_ic) {
if (!old_receiver_map.is_null()) {
if (is_arguments) {
- TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "arguments receiver");
+ TRACE_GENERIC_IC("arguments receiver");
} else if (key_is_valid_index) {
// We should go generic if receiver isn't a dictionary, but our
// prototype chain does have dictionary elements. This ensures that
@@ -2459,20 +2514,18 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
if (!old_receiver_map->DictionaryElementsInPrototypeChainOnly()) {
UpdateStoreElement(old_receiver_map, store_mode);
} else {
- TRACE_GENERIC_IC(isolate(), "KeyedStoreIC",
- "dictionary or proxy prototype");
+ TRACE_GENERIC_IC("dictionary or proxy prototype");
}
} else {
- TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "non-smi-like key");
+ TRACE_GENERIC_IC("non-smi-like key");
}
} else {
- TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "non-JSObject receiver");
+ TRACE_GENERIC_IC("non-JSObject receiver");
}
}
if (!is_vector_set()) {
ConfigureVectorState(MEGAMORPHIC, key);
- TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "set generic");
}
TRACE_IC("StoreIC", key);
@@ -2480,48 +2533,6 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
}
-void CallIC::HandleMiss(Handle<Object> function) {
- Handle<Object> name = isolate()->factory()->empty_string();
- CallICNexus* nexus = casted_nexus<CallICNexus>();
- Object* feedback = nexus->GetFeedback();
-
- // Hand-coded MISS handling is easier if CallIC slots don't contain smis.
- DCHECK(!feedback->IsSmi());
-
- if (feedback->IsWeakCell() || !function->IsJSFunction() ||
- feedback->IsAllocationSite()) {
- // We are going generic.
- nexus->ConfigureMegamorphic();
- } else {
- DCHECK(feedback == *FeedbackVector::UninitializedSentinel(isolate()));
- Handle<JSFunction> js_function = Handle<JSFunction>::cast(function);
-
- Handle<JSFunction> array_function =
- Handle<JSFunction>(isolate()->native_context()->array_function());
- if (array_function.is_identical_to(js_function)) {
- // Alter the slot.
- nexus->ConfigureMonomorphicArray();
- } else if (js_function->context()->native_context() !=
- *isolate()->native_context()) {
- // Don't collect cross-native context feedback for the CallIC.
- // TODO(bmeurer): We should collect the SharedFunctionInfo as
- // feedback in this case instead.
- nexus->ConfigureMegamorphic();
- } else {
- nexus->ConfigureMonomorphic(js_function);
- }
- }
-
- if (function->IsJSFunction()) {
- Handle<JSFunction> js_function = Handle<JSFunction>::cast(function);
- name = handle(js_function->shared()->name(), isolate());
- }
-
- OnTypeFeedbackChanged(isolate(), get_host());
- TRACE_IC("CallIC", name);
-}
-
-
#undef TRACE_IC
@@ -2530,22 +2541,6 @@ void CallIC::HandleMiss(Handle<Object> function) {
//
// Used from ic-<arch>.cc.
-RUNTIME_FUNCTION(Runtime_CallIC_Miss) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- // Runtime functions don't follow the IC's calling convention.
- Handle<Object> function = args.at(0);
- Handle<FeedbackVector> vector = args.at<FeedbackVector>(1);
- Handle<Smi> slot = args.at<Smi>(2);
- FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
- CallICNexus nexus(vector, vector_slot);
- CallIC ic(isolate, &nexus);
- ic.HandleMiss(function);
- return *function;
-}
-
-
-// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(Runtime_LoadIC_Miss) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
@@ -2554,28 +2549,28 @@ RUNTIME_FUNCTION(Runtime_LoadIC_Miss) {
Handle<Name> key = args.at<Name>(1);
Handle<Smi> slot = args.at<Smi>(2);
Handle<FeedbackVector> vector = args.at<FeedbackVector>(3);
- FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
+ FeedbackSlot vector_slot = vector->ToSlot(slot->value());
// A monomorphic or polymorphic KeyedLoadIC with a string key can call the
// LoadIC miss handler if the handler misses. Since the vector Nexus is
// set up outside the IC, handle that here.
- FeedbackVectorSlotKind kind = vector->GetKind(vector_slot);
- if (kind == FeedbackVectorSlotKind::LOAD_IC) {
+ FeedbackSlotKind kind = vector->GetKind(vector_slot);
+ if (IsLoadICKind(kind)) {
LoadICNexus nexus(vector, vector_slot);
- LoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+ LoadIC ic(isolate, &nexus);
ic.UpdateState(receiver, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Load(receiver, key));
- } else if (kind == FeedbackVectorSlotKind::LOAD_GLOBAL_IC) {
+ } else if (IsLoadGlobalICKind(kind)) {
DCHECK_EQ(*isolate->global_object(), *receiver);
LoadGlobalICNexus nexus(vector, vector_slot);
- LoadGlobalIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+ LoadGlobalIC ic(isolate, &nexus);
ic.UpdateState(receiver, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Load(key));
} else {
- DCHECK_EQ(FeedbackVectorSlotKind::KEYED_LOAD_IC, kind);
+ DCHECK(IsKeyedLoadICKind(kind));
KeyedLoadICNexus nexus(vector, vector_slot);
- KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+ KeyedLoadIC ic(isolate, &nexus);
ic.UpdateState(receiver, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Load(receiver, key));
}
@@ -2590,10 +2585,10 @@ RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Miss) {
Handle<String> name = args.at<String>(0);
Handle<Smi> slot = args.at<Smi>(1);
Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
- FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
+ FeedbackSlot vector_slot = vector->ToSlot(slot->value());
LoadGlobalICNexus nexus(vector, vector_slot);
- LoadGlobalIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+ LoadGlobalIC ic(isolate, &nexus);
ic.UpdateState(global, name);
Handle<Object> result;
@@ -2603,7 +2598,7 @@ RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Miss) {
RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Slow) {
HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
Handle<Context> native_context = isolate->native_context();
@@ -2630,11 +2625,13 @@ RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Slow) {
isolate, result,
Runtime::GetObjectProperty(isolate, global, name, &is_found));
if (!is_found) {
- LoadICNexus nexus(isolate);
- LoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+ Handle<Smi> slot = args.at<Smi>(1);
+ Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
+ FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+ FeedbackSlotKind kind = vector->GetKind(vector_slot);
// It is actually a LoadGlobalICs here but the predicate handles this case
// properly.
- if (ic.ShouldThrowReferenceError()) {
+ if (LoadIC::ShouldThrowReferenceError(kind)) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewReferenceError(MessageTemplate::kNotDefined, name));
}
@@ -2651,31 +2648,13 @@ RUNTIME_FUNCTION(Runtime_KeyedLoadIC_Miss) {
Handle<Object> key = args.at(1);
Handle<Smi> slot = args.at<Smi>(2);
Handle<FeedbackVector> vector = args.at<FeedbackVector>(3);
- FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
- KeyedLoadICNexus nexus(vector, vector_slot);
- KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
- ic.UpdateState(receiver, key);
- RETURN_RESULT_OR_FAILURE(isolate, ic.Load(receiver, key));
-}
-
-
-RUNTIME_FUNCTION(Runtime_KeyedLoadIC_MissFromStubFailure) {
- HandleScope scope(isolate);
- typedef LoadWithVectorDescriptor Descriptor;
- DCHECK_EQ(Descriptor::kParameterCount, args.length());
- Handle<Object> receiver = args.at(Descriptor::kReceiver);
- Handle<Object> key = args.at(Descriptor::kName);
- Handle<Smi> slot = args.at<Smi>(Descriptor::kSlot);
- Handle<FeedbackVector> vector =
- args.at<FeedbackVector>(Descriptor::kVector);
- FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
+ FeedbackSlot vector_slot = vector->ToSlot(slot->value());
KeyedLoadICNexus nexus(vector, vector_slot);
- KeyedLoadIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
+ KeyedLoadIC ic(isolate, &nexus);
ic.UpdateState(receiver, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Load(receiver, key));
}
-
// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(Runtime_StoreIC_Miss) {
HandleScope scope(isolate);
@@ -2686,23 +2665,22 @@ RUNTIME_FUNCTION(Runtime_StoreIC_Miss) {
Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
Handle<Object> receiver = args.at(3);
Handle<Name> key = args.at<Name>(4);
- FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
- if (vector->GetKind(vector_slot) == FeedbackVectorSlotKind::STORE_IC) {
+ FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+ FeedbackSlotKind kind = vector->GetKind(vector_slot);
+ if (IsStoreICKind(kind) || IsStoreOwnICKind(kind)) {
StoreICNexus nexus(vector, vector_slot);
- StoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+ StoreIC ic(isolate, &nexus);
ic.UpdateState(receiver, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
} else {
- DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC,
- vector->GetKind(vector_slot));
+ DCHECK(IsKeyedStoreICKind(kind));
KeyedStoreICNexus nexus(vector, vector_slot);
- KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+ KeyedStoreIC ic(isolate, &nexus);
ic.UpdateState(receiver, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
}
}
-
// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Miss) {
HandleScope scope(isolate);
@@ -2713,9 +2691,9 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Miss) {
Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
Handle<Object> receiver = args.at(3);
Handle<Object> key = args.at(4);
- FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
+ FeedbackSlot vector_slot = vector->ToSlot(slot->value());
KeyedStoreICNexus nexus(vector, vector_slot);
- KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+ KeyedStoreIC ic(isolate, &nexus);
ic.UpdateState(receiver, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
}
@@ -2726,13 +2704,12 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Slow) {
DCHECK_EQ(5, args.length());
// Runtime functions don't follow the IC's calling convention.
Handle<Object> value = args.at(0);
- // slot and vector parameters are not used.
+ Handle<Smi> slot = args.at<Smi>(1);
+ Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
Handle<Object> object = args.at(3);
Handle<Object> key = args.at(4);
- LanguageMode language_mode;
- KeyedStoreICNexus nexus(isolate);
- KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
- language_mode = ic.language_mode();
+ FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+ LanguageMode language_mode = vector->GetLanguageMode(vector_slot);
RETURN_RESULT_OR_FAILURE(
isolate,
Runtime::SetObjectProperty(isolate, object, key, value, language_mode));
@@ -2741,15 +2718,16 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Slow) {
RUNTIME_FUNCTION(Runtime_ElementsTransitionAndStoreIC_Miss) {
HandleScope scope(isolate);
+ DCHECK_EQ(6, args.length());
// Runtime functions don't follow the IC's calling convention.
Handle<Object> object = args.at(0);
Handle<Object> key = args.at(1);
Handle<Object> value = args.at(2);
Handle<Map> map = args.at<Map>(3);
- LanguageMode language_mode;
- KeyedStoreICNexus nexus(isolate);
- KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
- language_mode = ic.language_mode();
+ Handle<Smi> slot = args.at<Smi>(4);
+ Handle<FeedbackVector> vector = args.at<FeedbackVector>(5);
+ FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+ LanguageMode language_mode = vector->GetLanguageMode(vector_slot);
if (object->IsJSObject()) {
JSObject::TransitionElementsKind(Handle<JSObject>::cast(object),
map->elements_kind());
@@ -2867,15 +2845,13 @@ MaybeHandle<Object> BinaryOpIC::Transition(
JavaScriptFrame::CollectTopFrameForICStats(isolate());
ic_stats->End();
} else if (FLAG_ic_stats) {
- // if (FLAG_trace_ic) {
- OFStream os(stdout);
- os << "[BinaryOpIC" << old_state << " => " << state << " @ "
- << static_cast<void*>(*new_target) << " <- ";
- JavaScriptFrame::PrintTop(isolate(), stdout, false, true);
- if (!allocation_site.is_null()) {
- os << " using allocation site " << static_cast<void*>(*allocation_site);
- }
- os << "]" << std::endl;
+ int line;
+ int column;
+ Address pc = GetAbstractPC(&line, &column);
+ LOG(isolate(),
+ BinaryOpIC(pc, line, column, *new_target, old_state.ToString().c_str(),
+ state.ToString().c_str(),
+ allocation_site.is_null() ? nullptr : *allocation_site));
}
// Patch the inlined smi code as necessary.
@@ -2964,17 +2940,17 @@ Code* CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
ic_info.state += Token::Name(op_);
ic_stats->End();
} else if (FLAG_ic_stats) {
- // if (FLAG_trace_ic) {
- PrintF("[CompareIC in ");
- JavaScriptFrame::PrintTop(isolate(), stdout, false, true);
- PrintF(" ((%s+%s=%s)->(%s+%s=%s))#%s @ %p]\n",
- CompareICState::GetStateName(old_stub.left()),
- CompareICState::GetStateName(old_stub.right()),
- CompareICState::GetStateName(old_stub.state()),
- CompareICState::GetStateName(new_left),
- CompareICState::GetStateName(new_right),
- CompareICState::GetStateName(state), Token::Name(op_),
- static_cast<void*>(*stub.GetCode()));
+ int line;
+ int column;
+ Address pc = GetAbstractPC(&line, &column);
+ LOG(isolate(),
+ CompareIC(pc, line, column, *stub.GetCode(), Token::Name(op_),
+ CompareICState::GetStateName(old_stub.left()),
+ CompareICState::GetStateName(old_stub.right()),
+ CompareICState::GetStateName(old_stub.state()),
+ CompareICState::GetStateName(new_left),
+ CompareICState::GetStateName(new_right),
+ CompareICState::GetStateName(state)));
}
// Activate inlined smi code.
@@ -3004,9 +2980,36 @@ RUNTIME_FUNCTION(Runtime_Unreachable) {
Handle<Object> ToBooleanIC::ToBoolean(Handle<Object> object) {
ToBooleanICStub stub(isolate(), extra_ic_state());
+ ToBooleanHints old_hints = stub.hints();
bool to_boolean_value = stub.UpdateStatus(object);
+ ToBooleanHints new_hints = stub.hints();
Handle<Code> code = stub.GetCode();
set_target(*code);
+
+ // Note: Although a no-op transition is semantically OK, it is hinting at a
+ // bug somewhere in our state transition machinery.
+ DCHECK_NE(old_hints, new_hints);
+ if (V8_UNLIKELY(FLAG_ic_stats)) {
+ if (FLAG_ic_stats &
+ v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING) {
+ auto ic_stats = ICStats::instance();
+ ic_stats->Begin();
+ ICInfo& ic_info = ic_stats->Current();
+ ic_info.type = "ToBooleanIC";
+ ic_info.state = ToString(old_hints);
+ ic_info.state += "=>";
+ ic_info.state += ToString(new_hints);
+ ic_stats->End();
+ } else {
+ int line;
+ int column;
+ Address pc = GetAbstractPC(&line, &column);
+ LOG(isolate(),
+ ToBooleanIC(pc, line, column, *code, ToString(old_hints).c_str(),
+ ToString(new_hints).c_str()));
+ }
+ }
+
return isolate()->factory()->ToBoolean(to_boolean_value);
}
@@ -3101,7 +3104,7 @@ RUNTIME_FUNCTION(Runtime_LoadPropertyWithInterceptorOnly) {
*/
RUNTIME_FUNCTION(Runtime_LoadPropertyWithInterceptor) {
HandleScope scope(isolate);
- DCHECK(args.length() == NamedLoadHandlerCompiler::kInterceptorArgsLength);
+ DCHECK(args.length() == NamedLoadHandlerCompiler::kInterceptorArgsLength + 2);
Handle<Name> name =
args.at<Name>(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex);
Handle<Object> receiver =
@@ -3140,11 +3143,13 @@ RUNTIME_FUNCTION(Runtime_LoadPropertyWithInterceptor) {
if (it.IsFound()) return *result;
- LoadICNexus nexus(isolate);
- LoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
- // It could actually be any kind of LoadICs here but the predicate handles
- // all the cases properly.
- if (!ic.ShouldThrowReferenceError()) {
+ Handle<Smi> slot = args.at<Smi>(3);
+ Handle<FeedbackVector> vector = args.at<FeedbackVector>(4);
+ FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+ FeedbackSlotKind slot_kind = vector->GetKind(vector_slot);
+ // It could actually be any kind of load IC slot here but the predicate
+ // handles all the cases properly.
+ if (!LoadIC::ShouldThrowReferenceError(slot_kind)) {
return isolate->heap()->undefined_value();
}
@@ -3156,12 +3161,15 @@ RUNTIME_FUNCTION(Runtime_LoadPropertyWithInterceptor) {
RUNTIME_FUNCTION(Runtime_StorePropertyWithInterceptor) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- StoreICNexus nexus(isolate);
- StoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
- Handle<JSObject> receiver = args.at<JSObject>(0);
- Handle<Name> name = args.at<Name>(1);
- Handle<Object> value = args.at(2);
+ DCHECK_EQ(5, args.length());
+ // Runtime functions don't follow the IC's calling convention.
+ Handle<Object> value = args.at(0);
+ Handle<Smi> slot = args.at<Smi>(1);
+ Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
+ Handle<JSObject> receiver = args.at<JSObject>(3);
+ Handle<Name> name = args.at<Name>(4);
+ FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+ LanguageMode language_mode = vector->GetLanguageMode(vector_slot);
DCHECK(receiver->HasNamedInterceptor());
InterceptorInfo* interceptor = receiver->GetNamedInterceptor();
@@ -3186,7 +3194,7 @@ RUNTIME_FUNCTION(Runtime_StorePropertyWithInterceptor) {
DCHECK_EQ(LookupIterator::INTERCEPTOR, it.state());
it.Next();
- MAYBE_RETURN(Object::SetProperty(&it, value, ic.language_mode(),
+ MAYBE_RETURN(Object::SetProperty(&it, value, language_mode,
JSReceiver::CERTAINLY_NOT_STORE_FROM_KEYED),
isolate->heap()->exception());
return *value;
diff --git a/deps/v8/src/ic/ic.h b/deps/v8/src/ic/ic.h
index 74a034d0e7..c9818f5a5b 100644
--- a/deps/v8/src/ic/ic.h
+++ b/deps/v8/src/ic/ic.h
@@ -5,6 +5,8 @@
#ifndef V8_IC_H_
#define V8_IC_H_
+#include "src/factory.h"
+#include "src/feedback-vector.h"
#include "src/ic/ic-state.h"
#include "src/macro-assembler.h"
#include "src/messages.h"
@@ -45,16 +47,12 @@ class IC {
// Clear the inline cache to initial state.
static void Clear(Isolate* isolate, Address address, Address constant_pool);
-#ifdef DEBUG
- bool IsLoadStub() const {
- return kind_ == Code::LOAD_IC || kind_ == Code::LOAD_GLOBAL_IC ||
- kind_ == Code::KEYED_LOAD_IC;
+ bool IsAnyLoad() const {
+ return IsLoadIC() || IsLoadGlobalIC() || IsKeyedLoadIC();
}
- bool IsStoreStub() const {
- return kind_ == Code::STORE_IC || kind_ == Code::KEYED_STORE_IC;
+ bool IsAnyStore() const {
+ return IsStoreIC() || IsStoreOwnIC() || IsKeyedStoreIC();
}
- bool IsCallStub() const { return kind_ == Code::CALL_IC; }
-#endif
static inline Handle<Map> GetHandlerCacheHolder(Handle<Map> receiver_map,
bool receiver_is_holder,
@@ -64,15 +62,15 @@ class IC {
Isolate* isolate,
CacheHolderFlag* flag);
- static bool IsCleared(FeedbackNexus* nexus) {
- InlineCacheState state = nexus->StateFromFeedback();
- return !FLAG_use_ic || state == UNINITIALIZED || state == PREMONOMORPHIC;
- }
-
static bool ICUseVector(Code::Kind kind) {
return kind == Code::LOAD_IC || kind == Code::LOAD_GLOBAL_IC ||
- kind == Code::KEYED_LOAD_IC || kind == Code::CALL_IC ||
- kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC;
+ kind == Code::KEYED_LOAD_IC || kind == Code::STORE_IC ||
+ kind == Code::KEYED_STORE_IC;
+ }
+ static bool ICUseVector(FeedbackSlotKind kind) {
+ return IsLoadICKind(kind) || IsLoadGlobalICKind(kind) ||
+ IsKeyedLoadICKind(kind) || IsStoreICKind(kind) ||
+ IsStoreOwnICKind(kind) || IsKeyedStoreICKind(kind);
}
// The ICs that don't pass slot and vector through the stack have to
@@ -83,15 +81,20 @@ class IC {
static inline bool IsHandler(Object* object);
+ // Nofity the IC system that a feedback has changed.
+ static void OnFeedbackChanged(Isolate* isolate, JSFunction* host_function);
+
protected:
Address fp() const { return fp_; }
Address pc() const { return *pc_address_; }
+
+ void set_slow_stub_reason(const char* reason) { slow_stub_reason_ = reason; }
+
+ Address GetAbstractPC(int* line, int* column) const;
Isolate* isolate() const { return isolate_; }
- // Get the shared function info of the caller.
- SharedFunctionInfo* GetSharedFunctionInfo() const;
- // Get the code object of the caller.
- Code* GetCode() const;
+ // Get the caller function object.
+ JSFunction* GetHostFunction() const;
inline bool AddressIsDeoptimizedCode() const;
inline static bool AddressIsDeoptimizedCode(Isolate* isolate,
@@ -136,8 +139,6 @@ class IC {
Address constant_pool);
static inline void SetTargetAtAddress(Address address, Code* target,
Address constant_pool);
- // As a vector-based IC, type feedback must be updated differently.
- static void OnTypeFeedbackChanged(Isolate* isolate, Code* host);
static void PostPatching(Address address, Code* target, Code* old_target);
void TraceHandlerCacheHitStats(LookupIterator* lookup);
@@ -165,15 +166,18 @@ class IC {
void CopyICToMegamorphicCache(Handle<Name> name);
bool IsTransitionOfMonomorphicTarget(Map* source_map, Map* target_map);
void PatchCache(Handle<Name> name, Handle<Object> code);
- Code::Kind kind() const { return kind_; }
- bool is_keyed() const {
- return kind_ == Code::KEYED_LOAD_IC || kind_ == Code::KEYED_STORE_IC;
- }
+ FeedbackSlotKind kind() const { return kind_; }
+ bool IsLoadIC() const { return IsLoadICKind(kind_); }
+ bool IsLoadGlobalIC() const { return IsLoadGlobalICKind(kind_); }
+ bool IsKeyedLoadIC() const { return IsKeyedLoadICKind(kind_); }
+ bool IsStoreIC() const { return IsStoreICKind(kind_); }
+ bool IsStoreOwnIC() const { return IsStoreOwnICKind(kind_); }
+ bool IsKeyedStoreIC() const { return IsKeyedStoreICKind(kind_); }
+ bool is_keyed() const { return IsKeyedLoadIC() || IsKeyedStoreIC(); }
Code::Kind handler_kind() const {
- if (kind_ == Code::KEYED_LOAD_IC) return Code::LOAD_IC;
- DCHECK(kind_ == Code::LOAD_IC || kind_ == Code::STORE_IC ||
- kind_ == Code::KEYED_STORE_IC);
- return kind_;
+ if (IsAnyLoad()) return Code::LOAD_IC;
+ DCHECK(IsAnyStore());
+ return Code::STORE_IC;
}
bool ShouldRecomputeHandler(Handle<String> name);
@@ -201,7 +205,7 @@ class IC {
}
Handle<FeedbackVector> vector() const { return nexus()->vector_handle(); }
- FeedbackVectorSlot slot() const { return nexus()->slot(); }
+ FeedbackSlot slot() const { return nexus()->slot(); }
State saved_state() const {
return state() == RECOMPUTE_HANDLER ? old_state_ : state();
}
@@ -212,7 +216,6 @@ class IC {
}
FeedbackNexus* nexus() const { return nexus_; }
- inline Code* get_host();
inline Code* target() const;
private:
@@ -244,7 +247,7 @@ class IC {
bool vector_set_;
State old_state_; // For saving if we marked as prototype failure.
State state_;
- Code::Kind kind_;
+ FeedbackSlotKind kind_;
Handle<Map> receiver_map_;
MaybeHandle<Object> maybe_handler_;
@@ -252,6 +255,8 @@ class IC {
MapHandleList target_maps_;
bool target_maps_set_;
+ const char* slow_stub_reason_;
+
FeedbackNexus* nexus_;
DISALLOW_IMPLICIT_CONSTRUCTORS(IC);
@@ -264,32 +269,28 @@ class CallIC : public IC {
: IC(EXTRA_CALL_FRAME, isolate, nexus) {
DCHECK(nexus != NULL);
}
-
- void HandleMiss(Handle<Object> function);
-
- static void Clear(Isolate* isolate, Code* host, CallICNexus* nexus);
};
class LoadIC : public IC {
public:
- LoadIC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus = NULL)
- : IC(depth, isolate, nexus) {
+ LoadIC(Isolate* isolate, FeedbackNexus* nexus)
+ : IC(NO_EXTRA_FRAME, isolate, nexus) {
DCHECK(nexus != NULL);
- DCHECK(IsLoadStub());
+ DCHECK(IsAnyLoad());
+ }
+
+ static bool ShouldThrowReferenceError(FeedbackSlotKind kind) {
+ return kind == FeedbackSlotKind::kLoadGlobalNotInsideTypeof;
}
bool ShouldThrowReferenceError() const {
- return kind() == Code::LOAD_GLOBAL_IC &&
- LoadGlobalICState::GetTypeofMode(extra_ic_state()) ==
- NOT_INSIDE_TYPEOF;
+ return ShouldThrowReferenceError(kind());
}
MUST_USE_RESULT MaybeHandle<Object> Load(Handle<Object> object,
Handle<Name> name);
- static void Clear(Isolate* isolate, Code* host, LoadICNexus* nexus);
-
protected:
virtual Handle<Code> slow_stub() const {
return isolate()->builtins()->LoadIC_Slow();
@@ -324,13 +325,11 @@ class LoadIC : public IC {
class LoadGlobalIC : public LoadIC {
public:
- LoadGlobalIC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus = NULL)
- : LoadIC(depth, isolate, nexus) {}
+ LoadGlobalIC(Isolate* isolate, FeedbackNexus* nexus)
+ : LoadIC(isolate, nexus) {}
MUST_USE_RESULT MaybeHandle<Object> Load(Handle<Name> name);
- static void Clear(Isolate* isolate, Code* host, LoadGlobalICNexus* nexus);
-
protected:
Handle<Code> slow_stub() const override {
return isolate()->builtins()->LoadGlobalIC_Slow();
@@ -339,17 +338,14 @@ class LoadGlobalIC : public LoadIC {
class KeyedLoadIC : public LoadIC {
public:
- KeyedLoadIC(FrameDepth depth, Isolate* isolate,
- KeyedLoadICNexus* nexus = NULL)
- : LoadIC(depth, isolate, nexus) {
+ KeyedLoadIC(Isolate* isolate, KeyedLoadICNexus* nexus)
+ : LoadIC(isolate, nexus) {
DCHECK(nexus != NULL);
}
MUST_USE_RESULT MaybeHandle<Object> Load(Handle<Object> object,
Handle<Object> key);
- static void Clear(Isolate* isolate, Code* host, KeyedLoadICNexus* nexus);
-
protected:
// receiver is HeapObject because it could be a String or a JSObject
void UpdateLoadElement(Handle<HeapObject> receiver);
@@ -361,13 +357,13 @@ class KeyedLoadIC : public LoadIC {
class StoreIC : public IC {
public:
- StoreIC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus = NULL)
- : IC(depth, isolate, nexus) {
- DCHECK(IsStoreStub());
+ StoreIC(Isolate* isolate, FeedbackNexus* nexus)
+ : IC(NO_EXTRA_FRAME, isolate, nexus) {
+ DCHECK(IsAnyStore());
}
LanguageMode language_mode() const {
- return StoreICState::GetLanguageMode(extra_ic_state());
+ return nexus()->vector()->GetLanguageMode(nexus()->slot());
}
MUST_USE_RESULT MaybeHandle<Object> Store(
@@ -378,20 +374,11 @@ class StoreIC : public IC {
bool LookupForWrite(LookupIterator* it, Handle<Object> value,
JSReceiver::StoreFromKeyed store_mode);
- static void Clear(Isolate* isolate, Code* host, StoreICNexus* nexus);
-
protected:
// Stub accessors.
Handle<Code> slow_stub() const {
- switch (language_mode()) {
- case SLOPPY:
- return isolate()->builtins()->StoreIC_SlowSloppy();
- case STRICT:
- return isolate()->builtins()->StoreIC_SlowStrict();
- default:
- UNREACHABLE();
- return Handle<Code>();
- }
+ // StoreIC and KeyedStoreIC share the same slow stub.
+ return isolate()->builtins()->KeyedStoreIC_Slow();
}
// Update the inline cache and the global stub cache based on the
@@ -423,22 +410,13 @@ class KeyedStoreIC : public StoreIC {
return casted_nexus<KeyedStoreICNexus>()->GetKeyedAccessStoreMode();
}
- KeyedStoreIC(FrameDepth depth, Isolate* isolate,
- KeyedStoreICNexus* nexus = NULL)
- : StoreIC(depth, isolate, nexus) {}
+ KeyedStoreIC(Isolate* isolate, KeyedStoreICNexus* nexus)
+ : StoreIC(isolate, nexus) {}
MUST_USE_RESULT MaybeHandle<Object> Store(Handle<Object> object,
Handle<Object> name,
Handle<Object> value);
- // Code generators for stub routines. Only called once at startup.
- static void GenerateMiss(MacroAssembler* masm);
- static void GenerateSlow(MacroAssembler* masm);
- static void GenerateMegamorphic(MacroAssembler* masm,
- LanguageMode language_mode);
-
- static void Clear(Isolate* isolate, Code* host, KeyedStoreICNexus* nexus);
-
protected:
void UpdateStoreElement(Handle<Map> receiver_map,
KeyedAccessStoreMode store_mode);
@@ -447,6 +425,14 @@ class KeyedStoreIC : public StoreIC {
Handle<Map> ComputeTransitionedMap(Handle<Map> map,
KeyedAccessStoreMode store_mode);
+ Handle<Object> StoreElementHandler(Handle<Map> receiver_map,
+ KeyedAccessStoreMode store_mode);
+
+ void StoreElementPolymorphicHandlers(MapHandleList* receiver_maps,
+ MapHandleList* transitioned_maps,
+ List<Handle<Object>>* handlers,
+ KeyedAccessStoreMode store_mode);
+
friend class IC;
};
diff --git a/deps/v8/src/ic/keyed-store-generic.cc b/deps/v8/src/ic/keyed-store-generic.cc
index 81ce3a6a1f..8962386c93 100644
--- a/deps/v8/src/ic/keyed-store-generic.cc
+++ b/deps/v8/src/ic/keyed-store-generic.cc
@@ -7,19 +7,20 @@
#include "src/code-factory.h"
#include "src/code-stub-assembler.h"
#include "src/contexts.h"
-#include "src/ic/accessor-assembler-impl.h"
+#include "src/ic/accessor-assembler.h"
#include "src/interface-descriptors.h"
#include "src/isolate.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
using compiler::Node;
-class KeyedStoreGenericAssembler : public AccessorAssemblerImpl {
+class KeyedStoreGenericAssembler : public AccessorAssembler {
public:
explicit KeyedStoreGenericAssembler(compiler::CodeAssemblerState* state)
- : AccessorAssemblerImpl(state) {}
+ : AccessorAssembler(state) {}
void KeyedStoreGeneric(LanguageMode language_mode);
@@ -72,6 +73,13 @@ class KeyedStoreGenericAssembler : public AccessorAssemblerImpl {
Variable* var_accessor_pair,
Variable* var_accessor_holder,
Label* readonly, Label* bailout);
+
+ void CheckFieldType(Node* descriptors, Node* name_index, Node* representation,
+ Node* value, Label* bailout);
+ void OverwriteExistingFastProperty(Node* object, Node* object_map,
+ Node* properties, Node* descriptors,
+ Node* descriptor_name_index, Node* details,
+ Node* value, Label* slow);
};
void KeyedStoreGenericGenerator::Generate(compiler::CodeAssemblerState* state,
@@ -266,7 +274,7 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
// can always be stored.
{
Label non_smi_value(this);
- GotoUnless(TaggedIsSmi(value), &non_smi_value);
+ GotoIfNot(TaggedIsSmi(value), &non_smi_value);
// If we're about to introduce holes, ensure holey elements.
if (update_length == kBumpLengthWithGap) {
TryChangeToHoleyMapMulti(receiver, receiver_map, elements_kind, context,
@@ -461,6 +469,8 @@ void KeyedStoreGenericAssembler::EmitGenericElementStore(
// Out-of-capacity accesses (index >= capacity) jump here. Additionally,
// an ElementsKind transition might be necessary.
+ // The index can also be negative at this point! Jump to the runtime in that
+ // case to convert it to a named property.
Bind(&if_grow);
{
Comment("Grow backing store");
@@ -537,16 +547,12 @@ void KeyedStoreGenericAssembler::LookupPropertyOnPrototypeChain(
{
Node* descriptors = var_meta_storage.value();
Node* name_index = var_entry.value();
- // TODO(jkummerow): Add helper functions for accessing value and
- // details by entry.
- const int kNameToDetailsOffset = (DescriptorArray::kDescriptorDetails -
- DescriptorArray::kDescriptorKey) *
- kPointerSize;
- Node* details = LoadAndUntagToWord32FixedArrayElement(
- descriptors, name_index, kNameToDetailsOffset);
+ Node* details =
+ LoadDetailsByKeyIndex<DescriptorArray>(descriptors, name_index);
JumpIfDataProperty(details, &ok_to_write, readonly);
// Accessor case.
+ // TODO(jkummerow): Implement a trimmed-down LoadAccessorFromFastObject.
Variable var_details(this, MachineRepresentation::kWord32);
LoadPropertyFromFastObject(holder, holder_map, descriptors, name_index,
&var_details, var_accessor_pair);
@@ -558,19 +564,13 @@ void KeyedStoreGenericAssembler::LookupPropertyOnPrototypeChain(
{
Node* dictionary = var_meta_storage.value();
Node* entry = var_entry.value();
- const int kNameToDetailsOffset = (NameDictionary::kEntryDetailsIndex -
- NameDictionary::kEntryKeyIndex) *
- kPointerSize;
- Node* details = LoadAndUntagToWord32FixedArrayElement(
- dictionary, entry, kNameToDetailsOffset);
+ Node* details =
+ LoadDetailsByKeyIndex<NameDictionary>(dictionary, entry);
JumpIfDataProperty(details, &ok_to_write, readonly);
// Accessor case.
- const int kNameToValueOffset = (NameDictionary::kEntryValueIndex -
- NameDictionary::kEntryKeyIndex) *
- kPointerSize;
var_accessor_pair->Bind(
- LoadFixedArrayElement(dictionary, entry, kNameToValueOffset));
+ LoadValueByKeyIndex<NameDictionary>(dictionary, entry));
var_accessor_holder->Bind(holder);
Goto(accessor);
}
@@ -579,13 +579,8 @@ void KeyedStoreGenericAssembler::LookupPropertyOnPrototypeChain(
{
Node* dictionary = var_meta_storage.value();
Node* entry = var_entry.value();
- const int kNameToValueOffset = (GlobalDictionary::kEntryValueIndex -
- GlobalDictionary::kEntryKeyIndex) *
- kPointerSize;
-
Node* property_cell =
- LoadFixedArrayElement(dictionary, entry, kNameToValueOffset);
-
+ LoadValueByKeyIndex<GlobalDictionary>(dictionary, entry);
Node* value =
LoadObjectField(property_cell, PropertyCell::kValueOffset);
GotoIf(WordEqual(value, TheHoleConstant()), &next_proto);
@@ -613,6 +608,146 @@ void KeyedStoreGenericAssembler::LookupPropertyOnPrototypeChain(
Bind(&ok_to_write);
}
+void KeyedStoreGenericAssembler::CheckFieldType(Node* descriptors,
+ Node* name_index,
+ Node* representation,
+ Node* value, Label* bailout) {
+ Label r_smi(this), r_double(this), r_heapobject(this), all_fine(this);
+ // Ignore FLAG_track_fields etc. and always emit code for all checks,
+ // because this builtin is part of the snapshot and therefore should
+ // be flag independent.
+ GotoIf(Word32Equal(representation, Int32Constant(Representation::kSmi)),
+ &r_smi);
+ GotoIf(Word32Equal(representation, Int32Constant(Representation::kDouble)),
+ &r_double);
+ GotoIf(
+ Word32Equal(representation, Int32Constant(Representation::kHeapObject)),
+ &r_heapobject);
+ GotoIf(Word32Equal(representation, Int32Constant(Representation::kNone)),
+ bailout);
+ CSA_ASSERT(this, Word32Equal(representation,
+ Int32Constant(Representation::kTagged)));
+ Goto(&all_fine);
+
+ Bind(&r_smi);
+ { Branch(TaggedIsSmi(value), &all_fine, bailout); }
+
+ Bind(&r_double);
+ {
+ GotoIf(TaggedIsSmi(value), &all_fine);
+ Node* value_map = LoadMap(value);
+ // While supporting mutable HeapNumbers would be straightforward, such
+ // objects should not end up here anyway.
+ CSA_ASSERT(this,
+ WordNotEqual(value_map,
+ LoadRoot(Heap::kMutableHeapNumberMapRootIndex)));
+ Branch(IsHeapNumberMap(value_map), &all_fine, bailout);
+ }
+
+ Bind(&r_heapobject);
+ {
+ GotoIf(TaggedIsSmi(value), bailout);
+ Node* field_type =
+ LoadValueByKeyIndex<DescriptorArray>(descriptors, name_index);
+ intptr_t kNoneType = reinterpret_cast<intptr_t>(FieldType::None());
+ intptr_t kAnyType = reinterpret_cast<intptr_t>(FieldType::Any());
+ // FieldType::None can't hold any value.
+ GotoIf(WordEqual(field_type, IntPtrConstant(kNoneType)), bailout);
+ // FieldType::Any can hold any value.
+ GotoIf(WordEqual(field_type, IntPtrConstant(kAnyType)), &all_fine);
+ CSA_ASSERT(this, IsWeakCell(field_type));
+ // Cleared WeakCells count as FieldType::None, which can't hold any value.
+ field_type = LoadWeakCellValue(field_type, bailout);
+ // FieldType::Class(...) performs a map check.
+ CSA_ASSERT(this, IsMap(field_type));
+ Branch(WordEqual(LoadMap(value), field_type), &all_fine, bailout);
+ }
+
+ Bind(&all_fine);
+}
+
+void KeyedStoreGenericAssembler::OverwriteExistingFastProperty(
+ Node* object, Node* object_map, Node* properties, Node* descriptors,
+ Node* descriptor_name_index, Node* details, Node* value, Label* slow) {
+ // Properties in descriptors can't be overwritten without map transition.
+ GotoIf(Word32NotEqual(DecodeWord32<PropertyDetails::LocationField>(details),
+ Int32Constant(kField)),
+ slow);
+
+ if (FLAG_track_constant_fields) {
+ // TODO(ishell): Taking the slow path is not necessary if new and old
+ // values are identical.
+ GotoIf(Word32Equal(DecodeWord32<PropertyDetails::ConstnessField>(details),
+ Int32Constant(kConst)),
+ slow);
+ }
+
+ Label done(this);
+ Node* representation =
+ DecodeWord32<PropertyDetails::RepresentationField>(details);
+
+ CheckFieldType(descriptors, descriptor_name_index, representation, value,
+ slow);
+ Node* field_index =
+ DecodeWordFromWord32<PropertyDetails::FieldIndexField>(details);
+ Node* inobject_properties = LoadMapInobjectProperties(object_map);
+
+ Label inobject(this), backing_store(this);
+ Branch(UintPtrLessThan(field_index, inobject_properties), &inobject,
+ &backing_store);
+
+ Bind(&inobject);
+ {
+ Node* field_offset =
+ IntPtrMul(IntPtrSub(LoadMapInstanceSize(object_map),
+ IntPtrSub(inobject_properties, field_index)),
+ IntPtrConstant(kPointerSize));
+ Label tagged_rep(this), double_rep(this);
+ Branch(Word32Equal(representation, Int32Constant(Representation::kDouble)),
+ &double_rep, &tagged_rep);
+ Bind(&double_rep);
+ {
+ Node* double_value = ChangeNumberToFloat64(value);
+ if (FLAG_unbox_double_fields) {
+ StoreObjectFieldNoWriteBarrier(object, field_offset, double_value,
+ MachineRepresentation::kFloat64);
+ } else {
+ Node* mutable_heap_number = LoadObjectField(object, field_offset);
+ StoreHeapNumberValue(mutable_heap_number, double_value);
+ }
+ Goto(&done);
+ }
+
+ Bind(&tagged_rep);
+ {
+ StoreObjectField(object, field_offset, value);
+ Goto(&done);
+ }
+ }
+
+ Bind(&backing_store);
+ {
+ Node* backing_store_index = IntPtrSub(field_index, inobject_properties);
+ Label tagged_rep(this), double_rep(this);
+ Branch(Word32Equal(representation, Int32Constant(Representation::kDouble)),
+ &double_rep, &tagged_rep);
+ Bind(&double_rep);
+ {
+ Node* double_value = ChangeNumberToFloat64(value);
+ Node* mutable_heap_number =
+ LoadFixedArrayElement(properties, backing_store_index);
+ StoreHeapNumberValue(mutable_heap_number, double_value);
+ Goto(&done);
+ }
+ Bind(&tagged_rep);
+ {
+ StoreFixedArrayElement(properties, backing_store_index, value);
+ Goto(&done);
+ }
+ }
+ Bind(&done);
+}
+
void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
Node* receiver, Node* receiver_map, const StoreICParameters* p, Label* slow,
LanguageMode language_mode) {
@@ -627,10 +762,40 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
Bind(&fast_properties);
{
- // TODO(jkummerow): Does it make sense to support some cases here inline?
- // Maybe overwrite existing writable properties?
- // Maybe support map transitions?
- Goto(&stub_cache);
+ Comment("fast property store");
+ Node* bitfield3 = LoadMapBitField3(receiver_map);
+ Node* descriptors = LoadMapDescriptors(receiver_map);
+ Label descriptor_found(this);
+ Variable var_name_index(this, MachineType::PointerRepresentation());
+ // TODO(jkummerow): Maybe look for existing map transitions?
+ Label* notfound = &stub_cache;
+ DescriptorLookup(p->name, descriptors, bitfield3, &descriptor_found,
+ &var_name_index, notfound);
+
+ Bind(&descriptor_found);
+ {
+ Node* name_index = var_name_index.value();
+ Node* details =
+ LoadDetailsByKeyIndex<DescriptorArray>(descriptors, name_index);
+ Label data_property(this);
+ JumpIfDataProperty(details, &data_property, &readonly);
+
+ // Accessor case.
+ // TODO(jkummerow): Implement a trimmed-down LoadAccessorFromFastObject.
+ Variable var_details(this, MachineRepresentation::kWord32);
+ LoadPropertyFromFastObject(receiver, receiver_map, descriptors,
+ name_index, &var_details, &var_accessor_pair);
+ var_accessor_holder.Bind(receiver);
+ Goto(&accessor);
+
+ Bind(&data_property);
+ {
+ OverwriteExistingFastProperty(receiver, receiver_map, properties,
+ descriptors, name_index, details,
+ p->value, slow);
+ Return(p->value);
+ }
+ }
}
Bind(&dictionary_properties);
@@ -646,26 +811,20 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
Bind(&dictionary_found);
{
Label overwrite(this);
- const int kNameToDetailsOffset = (NameDictionary::kEntryDetailsIndex -
- NameDictionary::kEntryKeyIndex) *
- kPointerSize;
- Node* details = LoadAndUntagToWord32FixedArrayElement(
- properties, var_name_index.value(), kNameToDetailsOffset);
+ Node* details = LoadDetailsByKeyIndex<NameDictionary>(
+ properties, var_name_index.value());
JumpIfDataProperty(details, &overwrite, &readonly);
// Accessor case.
- const int kNameToValueOffset =
- (NameDictionary::kEntryValueIndex - NameDictionary::kEntryKeyIndex) *
- kPointerSize;
- var_accessor_pair.Bind(LoadFixedArrayElement(
- properties, var_name_index.value(), kNameToValueOffset));
+ var_accessor_pair.Bind(LoadValueByKeyIndex<NameDictionary>(
+ properties, var_name_index.value()));
var_accessor_holder.Bind(receiver);
Goto(&accessor);
Bind(&overwrite);
{
- StoreFixedArrayElement(properties, var_name_index.value(), p->value,
- UPDATE_WRITE_BARRIER, kNameToValueOffset);
+ StoreValueByKeyIndex<NameDictionary>(properties, var_name_index.value(),
+ p->value);
Return(p->value);
}
}
@@ -690,7 +849,7 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
Node* setter_map = LoadMap(setter);
// FunctionTemplateInfo setters are not supported yet.
GotoIf(IsFunctionTemplateInfoMap(setter_map), slow);
- GotoUnless(IsCallableMap(setter_map), &not_callable);
+ GotoIfNot(IsCallableMap(setter_map), &not_callable);
Callable callable = CodeFactory::Call(isolate());
CallJS(callable, p->context, setter, receiver, p->value);
@@ -734,7 +893,7 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
Bind(&found_handler);
{
Comment("KeyedStoreGeneric found handler");
- HandleStoreICHandlerCase(p, var_handler.value(), slow);
+ HandleStoreICHandlerCase(p, var_handler.value(), &stub_cache_miss);
}
Bind(&stub_cache_miss);
{
@@ -756,6 +915,8 @@ void KeyedStoreGenericAssembler::KeyedStoreGeneric(LanguageMode language_mode) {
Node* context = Parameter(Descriptor::kContext);
Variable var_index(this, MachineType::PointerRepresentation());
+ Variable var_unique(this, MachineRepresentation::kTagged);
+ var_unique.Bind(name); // Dummy initialization.
Label if_index(this), if_unique_name(this), slow(this);
GotoIf(TaggedIsSmi(receiver), &slow);
@@ -767,7 +928,7 @@ void KeyedStoreGenericAssembler::KeyedStoreGeneric(LanguageMode language_mode) {
Int32Constant(LAST_CUSTOM_ELEMENTS_RECEIVER)),
&slow);
- TryToName(name, &if_index, &var_index, &if_unique_name, &slow);
+ TryToName(name, &if_index, &var_index, &if_unique_name, &var_unique, &slow);
Bind(&if_index);
{
@@ -779,8 +940,8 @@ void KeyedStoreGenericAssembler::KeyedStoreGeneric(LanguageMode language_mode) {
Bind(&if_unique_name);
{
Comment("key is unique name");
- KeyedStoreGenericAssembler::StoreICParameters p(context, receiver, name,
- value, slot, vector);
+ StoreICParameters p(context, receiver, var_unique.value(), value, slot,
+ vector);
EmitGenericPropertyStore(receiver, receiver_map, &p, &slow, language_mode);
}
diff --git a/deps/v8/src/ic/mips/handler-compiler-mips.cc b/deps/v8/src/ic/mips/handler-compiler-mips.cc
index 43588b707a..c14652cf47 100644
--- a/deps/v8/src/ic/mips/handler-compiler-mips.cc
+++ b/deps/v8/src/ic/mips/handler-compiler-mips.cc
@@ -173,15 +173,6 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
__ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
}
-void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
- MacroAssembler* masm, Register receiver, Register scratch1,
- Register scratch2, Label* miss_label) {
- __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, scratch1);
-}
-
-
// Generate code to check that a global property cell is empty. Create
// the property cell at compilation time if no cell exists for the
// property.
@@ -199,24 +190,18 @@ void PropertyHandlerCompiler::GenerateCheckPropertyCell(
__ Branch(miss, ne, scratch, Operand(at));
}
+static void CompileCallLoadPropertyWithInterceptor(
+ MacroAssembler* masm, Register receiver, Register holder, Register name,
+ Handle<JSObject> holder_obj, Runtime::FunctionId id) {
+ DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
+ Runtime::FunctionForId(id)->nargs);
-static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
- Register holder, Register name,
- Handle<JSObject> holder_obj) {
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
__ Push(name, receiver, holder);
-}
-
-static void CompileCallLoadPropertyWithInterceptor(
- MacroAssembler* masm, Register receiver, Register holder, Register name,
- Handle<JSObject> holder_obj, Runtime::FunctionId id) {
- DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
- Runtime::FunctionForId(id)->nargs);
- PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
__ CallRuntime(id);
}
@@ -512,8 +497,18 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
// Call the runtime system to load the interceptor.
DCHECK(holder()->HasNamedInterceptor());
DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
- PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
- holder());
+
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+ __ Push(name(), receiver(), holder_reg);
+ // See NamedLoadHandlerCompiler::InterceptorVectorSlotPop() for details.
+ if (holder_reg.is(receiver())) {
+ __ Push(slot(), vector());
+ } else {
+ __ Push(scratch3(), scratch2()); // slot, vector
+ }
__ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
}
diff --git a/deps/v8/src/ic/mips/ic-mips.cc b/deps/v8/src/ic/mips/ic-mips.cc
index e31aab1d76..fd39972f0e 100644
--- a/deps/v8/src/ic/mips/ic-mips.cc
+++ b/deps/v8/src/ic/mips/ic-mips.cc
@@ -6,45 +6,12 @@
#include "src/codegen.h"
#include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
#include "src/ic/stub-cache.h"
namespace v8 {
namespace internal {
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
- __ Push(StoreWithVectorDescriptor::ValueRegister(),
- StoreWithVectorDescriptor::SlotRegister(),
- StoreWithVectorDescriptor::VectorRegister(),
- StoreWithVectorDescriptor::ReceiverRegister(),
- StoreWithVectorDescriptor::NameRegister());
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
- StoreIC_PushArgs(masm);
-
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
-}
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
- StoreIC_PushArgs(masm);
-
- // The slow case calls into the runtime to complete the store without causing
- // an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
-#undef __
-
-
Condition CompareIC::ComputeCondition(Token::Value op) {
switch (op) {
case Token::EQ_STRICT:
@@ -102,9 +69,7 @@ void PatchInlinedSmiCode(Isolate* isolate, Address address,
}
if (FLAG_trace_ic) {
- PrintF("[ patching ic at %p, andi=%p, delta=%d\n",
- static_cast<void*>(address),
- static_cast<void*>(andi_instruction_address), delta);
+ LOG(isolate, PatchIC(address, andi_instruction_address, delta));
}
Address patch_address =
diff --git a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
index 06af88d19e..1a38d329e7 100644
--- a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
+++ b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
@@ -173,15 +173,6 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
__ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
}
-void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
- MacroAssembler* masm, Register receiver, Register scratch1,
- Register scratch2, Label* miss_label) {
- __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, scratch1);
-}
-
-
// Generate code to check that a global property cell is empty. Create
// the property cell at compilation time if no cell exists for the
// property.
@@ -199,24 +190,18 @@ void PropertyHandlerCompiler::GenerateCheckPropertyCell(
__ Branch(miss, ne, scratch, Operand(at));
}
+static void CompileCallLoadPropertyWithInterceptor(
+ MacroAssembler* masm, Register receiver, Register holder, Register name,
+ Handle<JSObject> holder_obj, Runtime::FunctionId id) {
+ DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
+ Runtime::FunctionForId(id)->nargs);
-static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
- Register holder, Register name,
- Handle<JSObject> holder_obj) {
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
__ Push(name, receiver, holder);
-}
-
-static void CompileCallLoadPropertyWithInterceptor(
- MacroAssembler* masm, Register receiver, Register holder, Register name,
- Handle<JSObject> holder_obj, Runtime::FunctionId id) {
- DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
- Runtime::FunctionForId(id)->nargs);
- PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
__ CallRuntime(id);
}
@@ -512,8 +497,18 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
// Call the runtime system to load the interceptor.
DCHECK(holder()->HasNamedInterceptor());
DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
- PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
- holder());
+
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+ __ Push(name(), receiver(), holder_reg);
+ // See NamedLoadHandlerCompiler::InterceptorVectorSlotPop() for details.
+ if (holder_reg.is(receiver())) {
+ __ Push(slot(), vector());
+ } else {
+ __ Push(scratch3(), scratch2()); // slot, vector
+ }
__ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
}
diff --git a/deps/v8/src/ic/mips64/ic-mips64.cc b/deps/v8/src/ic/mips64/ic-mips64.cc
index fa351ba5a3..0e2032a41d 100644
--- a/deps/v8/src/ic/mips64/ic-mips64.cc
+++ b/deps/v8/src/ic/mips64/ic-mips64.cc
@@ -6,45 +6,12 @@
#include "src/codegen.h"
#include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
#include "src/ic/stub-cache.h"
namespace v8 {
namespace internal {
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
- __ Push(StoreWithVectorDescriptor::ValueRegister(),
- StoreWithVectorDescriptor::SlotRegister(),
- StoreWithVectorDescriptor::VectorRegister(),
- StoreWithVectorDescriptor::ReceiverRegister(),
- StoreWithVectorDescriptor::NameRegister());
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
- StoreIC_PushArgs(masm);
-
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
-}
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
- StoreIC_PushArgs(masm);
-
- // The slow case calls into the runtime to complete the store without causing
- // an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
-#undef __
-
-
Condition CompareIC::ComputeCondition(Token::Value op) {
switch (op) {
case Token::EQ_STRICT:
@@ -102,9 +69,7 @@ void PatchInlinedSmiCode(Isolate* isolate, Address address,
}
if (FLAG_trace_ic) {
- PrintF("[ patching ic at %p, andi=%p, delta=%d\n",
- static_cast<void*>(address),
- static_cast<void*>(andi_instruction_address), delta);
+ LOG(isolate, PatchIC(address, andi_instruction_address, delta));
}
Address patch_address =
diff --git a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
index d4edcc1ec9..3da558d10e 100644
--- a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
+++ b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
@@ -176,15 +176,6 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
__ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
}
-void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
- MacroAssembler* masm, Register receiver, Register scratch1,
- Register scratch2, Label* miss_label) {
- __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
- __ mr(r3, scratch1);
- __ Ret();
-}
-
-
// Generate code to check that a global property cell is empty. Create
// the property cell at compilation time if no cell exists for the
// property.
@@ -204,25 +195,18 @@ void PropertyHandlerCompiler::GenerateCheckPropertyCell(
}
-static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
- Register holder, Register name,
- Handle<JSObject> holder_obj) {
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
- __ push(name);
- __ push(receiver);
- __ push(holder);
-}
-
-
static void CompileCallLoadPropertyWithInterceptor(
MacroAssembler* masm, Register receiver, Register holder, Register name,
Handle<JSObject> holder_obj, Runtime::FunctionId id) {
DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
Runtime::FunctionForId(id)->nargs);
- PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+ __ Push(name, receiver, holder);
+
__ CallRuntime(id);
}
@@ -530,8 +514,18 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
// Call the runtime system to load the interceptor.
DCHECK(holder()->HasNamedInterceptor());
DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
- PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
- holder());
+
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+ __ Push(name(), receiver(), holder_reg);
+ // See NamedLoadHandlerCompiler::InterceptorVectorSlotPop() for details.
+ if (holder_reg.is(receiver())) {
+ __ Push(slot(), vector());
+ } else {
+ __ Push(scratch3(), scratch2()); // slot, vector
+ }
__ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
}
diff --git a/deps/v8/src/ic/ppc/ic-ppc.cc b/deps/v8/src/ic/ppc/ic-ppc.cc
index 3c325d8f92..0f25846870 100644
--- a/deps/v8/src/ic/ppc/ic-ppc.cc
+++ b/deps/v8/src/ic/ppc/ic-ppc.cc
@@ -6,45 +6,12 @@
#include "src/codegen.h"
#include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
#include "src/ic/stub-cache.h"
namespace v8 {
namespace internal {
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
- __ Push(StoreWithVectorDescriptor::ValueRegister(),
- StoreWithVectorDescriptor::SlotRegister(),
- StoreWithVectorDescriptor::VectorRegister(),
- StoreWithVectorDescriptor::ReceiverRegister(),
- StoreWithVectorDescriptor::NameRegister());
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
- StoreIC_PushArgs(masm);
-
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
-}
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
- StoreIC_PushArgs(masm);
-
- // The slow case calls into the runtime to complete the store without causing
- // an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
-#undef __
-
-
Condition CompareIC::ComputeCondition(Token::Value op) {
switch (op) {
case Token::EQ_STRICT:
@@ -103,9 +70,7 @@ void PatchInlinedSmiCode(Isolate* isolate, Address address,
}
if (FLAG_trace_ic) {
- PrintF("[ patching ic at %p, cmp=%p, delta=%d\n",
- static_cast<void*>(address),
- static_cast<void*>(cmp_instruction_address), delta);
+ LOG(isolate, PatchIC(address, cmp_instruction_address, delta));
}
Address patch_address =
diff --git a/deps/v8/src/ic/s390/handler-compiler-s390.cc b/deps/v8/src/ic/s390/handler-compiler-s390.cc
index 40a8c310d8..9f087977a1 100644
--- a/deps/v8/src/ic/s390/handler-compiler-s390.cc
+++ b/deps/v8/src/ic/s390/handler-compiler-s390.cc
@@ -169,14 +169,6 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
__ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
}
-void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
- MacroAssembler* masm, Register receiver, Register scratch1,
- Register scratch2, Label* miss_label) {
- __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
- __ LoadRR(r2, scratch1);
- __ Ret();
-}
-
// Generate code to check that a global property cell is empty. Create
// the property cell at compilation time if no cell exists for the
// property.
@@ -194,24 +186,18 @@ void PropertyHandlerCompiler::GenerateCheckPropertyCell(
__ bne(miss);
}
-static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
- Register holder, Register name,
- Handle<JSObject> holder_obj) {
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
- __ Push(name);
- __ Push(receiver);
- __ Push(holder);
-}
-
static void CompileCallLoadPropertyWithInterceptor(
MacroAssembler* masm, Register receiver, Register holder, Register name,
Handle<JSObject> holder_obj, Runtime::FunctionId id) {
DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
Runtime::FunctionForId(id)->nargs);
- PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+ __ Push(name, receiver, holder);
+
__ CallRuntime(id);
}
@@ -508,8 +494,18 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
// Call the runtime system to load the interceptor.
DCHECK(holder()->HasNamedInterceptor());
DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
- PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
- holder());
+
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+ __ Push(name(), receiver(), holder_reg);
+ // See NamedLoadHandlerCompiler::InterceptorVectorSlotPop() for details.
+ if (holder_reg.is(receiver())) {
+ __ Push(slot(), vector());
+ } else {
+ __ Push(scratch3(), scratch2()); // slot, vector
+ }
__ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
}
diff --git a/deps/v8/src/ic/s390/ic-s390.cc b/deps/v8/src/ic/s390/ic-s390.cc
index 6438cfca47..494a4cd1d7 100644
--- a/deps/v8/src/ic/s390/ic-s390.cc
+++ b/deps/v8/src/ic/s390/ic-s390.cc
@@ -6,41 +6,11 @@
#include "src/ic/ic.h"
#include "src/codegen.h"
-#include "src/ic/ic-compiler.h"
#include "src/ic/stub-cache.h"
namespace v8 {
namespace internal {
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
- __ Push(StoreWithVectorDescriptor::ValueRegister(),
- StoreWithVectorDescriptor::SlotRegister(),
- StoreWithVectorDescriptor::VectorRegister(),
- StoreWithVectorDescriptor::ReceiverRegister(),
- StoreWithVectorDescriptor::NameRegister());
-}
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
- StoreIC_PushArgs(masm);
-
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
-}
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
- StoreIC_PushArgs(masm);
-
- // The slow case calls into the runtime to complete the store without causing
- // an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
-#undef __
Condition CompareIC::ComputeCondition(Token::Value op) {
switch (op) {
@@ -100,9 +70,7 @@ void PatchInlinedSmiCode(Isolate* isolate, Address address,
}
if (FLAG_trace_ic) {
- PrintF("[ patching ic at %p, cmp=%p, delta=%d\n",
- static_cast<void*>(address),
- static_cast<void*>(cmp_instruction_address), delta);
+ LOG(isolate, PatchIC(address, cmp_instruction_address, delta));
}
// Expected sequence to enable by changing the following
diff --git a/deps/v8/src/ic/stub-cache.cc b/deps/v8/src/ic/stub-cache.cc
index 84dbf48436..5fc8cc318d 100644
--- a/deps/v8/src/ic/stub-cache.cc
+++ b/deps/v8/src/ic/stub-cache.cc
@@ -6,6 +6,8 @@
#include "src/ast/ast.h"
#include "src/base/bits.h"
+#include "src/counters.h"
+#include "src/heap/heap.h"
#include "src/ic/ic-inl.h"
#include "src/type-info.h"
@@ -99,12 +101,12 @@ void StubCache::Clear() {
Code* empty = isolate_->builtins()->builtin(Builtins::kIllegal);
for (int i = 0; i < kPrimaryTableSize; i++) {
primary_[i].key = isolate()->heap()->empty_string();
- primary_[i].map = NULL;
+ primary_[i].map = nullptr;
primary_[i].value = empty;
}
for (int j = 0; j < kSecondaryTableSize; j++) {
secondary_[j].key = isolate()->heap()->empty_string();
- secondary_[j].map = NULL;
+ secondary_[j].map = nullptr;
secondary_[j].value = empty;
}
}
@@ -116,9 +118,9 @@ void StubCache::CollectMatchingMaps(SmallMapList* types, Handle<Name> name,
for (int i = 0; i < kPrimaryTableSize; i++) {
if (primary_[i].key == *name) {
Map* map = primary_[i].map;
- // Map can be NULL, if the stub is constant function call
+ // Map can be nullptr, if the stub is constant function call
// with a primitive receiver.
- if (map == NULL) continue;
+ if (map == nullptr) continue;
int offset = PrimaryOffset(*name, map);
if (entry(primary_, offset) == &primary_[i] &&
@@ -131,9 +133,9 @@ void StubCache::CollectMatchingMaps(SmallMapList* types, Handle<Name> name,
for (int i = 0; i < kSecondaryTableSize; i++) {
if (secondary_[i].key == *name) {
Map* map = secondary_[i].map;
- // Map can be NULL, if the stub is constant function call
+ // Map can be nullptr, if the stub is constant function call
// with a primitive receiver.
- if (map == NULL) continue;
+ if (map == nullptr) continue;
// Lookup in primary table and skip duplicates.
int primary_offset = PrimaryOffset(*name, map);
diff --git a/deps/v8/src/ic/stub-cache.h b/deps/v8/src/ic/stub-cache.h
index e8df26d37b..4054b329d3 100644
--- a/deps/v8/src/ic/stub-cache.h
+++ b/deps/v8/src/ic/stub-cache.h
@@ -74,7 +74,7 @@ class StubCache {
return StubCache::secondary_;
}
UNREACHABLE();
- return NULL;
+ return nullptr;
}
Isolate* isolate() { return isolate_; }
@@ -92,7 +92,7 @@ class StubCache {
// Some magic number used in primary and secondary hash computations.
static const int kPrimaryMagic = 0x3d532433;
- static const int kSecondaryMagic = 0xb16b00b5;
+ static const int kSecondaryMagic = 0xb16ca6e5;
static int PrimaryOffsetForTesting(Name* name, Map* map) {
return PrimaryOffset(name, map);
diff --git a/deps/v8/src/ic/x64/access-compiler-x64.cc b/deps/v8/src/ic/x64/access-compiler-x64.cc
index 9e95b9506c..4bbbba5b5a 100644
--- a/deps/v8/src/ic/x64/access-compiler-x64.cc
+++ b/deps/v8/src/ic/x64/access-compiler-x64.cc
@@ -5,6 +5,7 @@
#if V8_TARGET_ARCH_X64
#include "src/ic/access-compiler.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/ic/x64/handler-compiler-x64.cc b/deps/v8/src/ic/x64/handler-compiler-x64.cc
index a89afa8a7e..425ed4762e 100644
--- a/deps/v8/src/ic/x64/handler-compiler-x64.cc
+++ b/deps/v8/src/ic/x64/handler-compiler-x64.cc
@@ -83,18 +83,12 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
__ DecrementCounter(counters->negative_lookups_miss(), 1);
}
-void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
- MacroAssembler* masm, Register receiver, Register result, Register scratch,
- Label* miss_label) {
- __ TryGetFunctionPrototype(receiver, result, miss_label);
- if (!result.is(rax)) __ movp(rax, result);
- __ ret(0);
-}
-
+static void CompileCallLoadPropertyWithInterceptor(
+ MacroAssembler* masm, Register receiver, Register holder, Register name,
+ Handle<JSObject> holder_obj, Runtime::FunctionId id) {
+ DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
+ Runtime::FunctionForId(id)->nargs);
-static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
- Register holder, Register name,
- Handle<JSObject> holder_obj) {
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
@@ -102,15 +96,7 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
__ Push(name);
__ Push(receiver);
__ Push(holder);
-}
-
-static void CompileCallLoadPropertyWithInterceptor(
- MacroAssembler* masm, Register receiver, Register holder, Register name,
- Handle<JSObject> holder_obj, Runtime::FunctionId id) {
- DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
- Runtime::FunctionForId(id)->nargs);
- PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
__ CallRuntime(id);
}
@@ -524,10 +510,26 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
// Call the runtime system to load the interceptor.
DCHECK(holder()->HasNamedInterceptor());
DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
- __ PopReturnAddressTo(scratch2());
- PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
- holder());
- __ PushReturnAddressFrom(scratch2());
+
+ // Stack:
+ // return address
+
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+ __ Push(receiver());
+ __ Push(holder_reg);
+ // See NamedLoadHandlerCompiler::InterceptorVectorSlotPop() for details.
+ if (holder_reg.is(receiver())) {
+ __ Push(slot());
+ __ Push(vector());
+ } else {
+ __ Push(scratch3()); // slot
+ __ Push(scratch2()); // vector
+ }
+ __ Push(Operand(rsp, 4 * kPointerSize)); // return address
+ __ movp(Operand(rsp, 5 * kPointerSize), name());
__ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
}
diff --git a/deps/v8/src/ic/x64/ic-x64.cc b/deps/v8/src/ic/x64/ic-x64.cc
index 587ebd3daa..3b87bc9b6a 100644
--- a/deps/v8/src/ic/x64/ic-x64.cc
+++ b/deps/v8/src/ic/x64/ic-x64.cc
@@ -6,54 +6,12 @@
#include "src/codegen.h"
#include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
#include "src/ic/stub-cache.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
- Register receiver = StoreWithVectorDescriptor::ReceiverRegister();
- Register name = StoreWithVectorDescriptor::NameRegister();
- Register value = StoreWithVectorDescriptor::ValueRegister();
- Register slot = StoreWithVectorDescriptor::SlotRegister();
- Register vector = StoreWithVectorDescriptor::VectorRegister();
- Register temp = r11;
- DCHECK(!AreAliased(receiver, name, value, slot, vector, temp));
-
- __ PopReturnAddressTo(temp);
- __ Push(value);
- __ Push(slot);
- __ Push(vector);
- __ Push(receiver);
- __ Push(name);
- __ PushReturnAddressFrom(temp);
-}
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
- // Return address is on the stack.
- StoreIC_PushArgs(masm);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
-}
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
- // Return address is on the stack.
- StoreIC_PushArgs(masm);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
-#undef __
-
Condition CompareIC::ComputeCondition(Token::Value op) {
switch (op) {
@@ -104,9 +62,7 @@ void PatchInlinedSmiCode(Isolate* isolate, Address address,
// condition code uses at the patched jump.
uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address);
if (FLAG_trace_ic) {
- PrintF("[ patching ic at %p, test=%p, delta=%d\n",
- static_cast<void*>(address),
- static_cast<void*>(test_instruction_address), delta);
+ LOG(isolate, PatchIC(address, test_instruction_address, delta));
}
// Patch with a short conditional jump. Enabling means switching from a short
diff --git a/deps/v8/src/ic/x87/handler-compiler-x87.cc b/deps/v8/src/ic/x87/handler-compiler-x87.cc
index 4a521b76d3..5a61eee163 100644
--- a/deps/v8/src/ic/x87/handler-compiler-x87.cc
+++ b/deps/v8/src/ic/x87/handler-compiler-x87.cc
@@ -122,15 +122,6 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
__ DecrementCounter(counters->negative_lookups_miss(), 1);
}
-void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
- MacroAssembler* masm, Register receiver, Register scratch1,
- Register scratch2, Label* miss_label) {
- // TODO(mvstanton): This isn't used on ia32. Move all the other
- // platform implementations into a code stub so this method can be removed.
- UNREACHABLE();
-}
-
-
// Generate call to api function.
// This function uses push() to generate smaller, faster code than
// the version above. It is an optimization that should will be removed
@@ -302,10 +293,12 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
}
}
+static void CompileCallLoadPropertyWithInterceptor(
+ MacroAssembler* masm, Register receiver, Register holder, Register name,
+ Handle<JSObject> holder_obj, Runtime::FunctionId id) {
+ DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
+ Runtime::FunctionForId(id)->nargs);
-static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
- Register holder, Register name,
- Handle<JSObject> holder_obj) {
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
@@ -313,15 +306,7 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
__ push(name);
__ push(receiver);
__ push(holder);
-}
-
-static void CompileCallLoadPropertyWithInterceptor(
- MacroAssembler* masm, Register receiver, Register holder, Register name,
- Handle<JSObject> holder_obj, Runtime::FunctionId id) {
- DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
- Runtime::FunctionForId(id)->nargs);
- PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
__ CallRuntime(id);
}
@@ -538,10 +523,26 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
DCHECK(holder()->HasNamedInterceptor());
DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
// Call the runtime system to load the interceptor.
- __ pop(scratch2()); // save old return address
- PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
- holder());
- __ push(scratch2()); // restore old return address
+
+ // Stack:
+ // return address
+
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+ __ push(receiver());
+ __ push(holder_reg);
+ // See NamedLoadHandlerCompiler::InterceptorVectorSlotPop() for details.
+ if (holder_reg.is(receiver())) {
+ __ push(slot());
+ __ push(vector());
+ } else {
+ __ push(scratch3()); // slot
+ __ push(scratch2()); // vector
+ }
+ __ push(Operand(esp, 4 * kPointerSize)); // return address
+ __ mov(Operand(esp, 5 * kPointerSize), name());
__ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
}
diff --git a/deps/v8/src/ic/x87/ic-x87.cc b/deps/v8/src/ic/x87/ic-x87.cc
index 049a85e92e..7564c006b8 100644
--- a/deps/v8/src/ic/x87/ic-x87.cc
+++ b/deps/v8/src/ic/x87/ic-x87.cc
@@ -6,54 +6,11 @@
#include "src/codegen.h"
#include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
#include "src/ic/stub-cache.h"
namespace v8 {
namespace internal {
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
- Register receiver = StoreWithVectorDescriptor::ReceiverRegister();
- Register name = StoreWithVectorDescriptor::NameRegister();
-
- STATIC_ASSERT(StoreWithVectorDescriptor::kStackArgumentsCount == 3);
- // Current stack layout:
- // - esp[12] -- value
- // - esp[8] -- slot
- // - esp[4] -- vector
- // - esp[0] -- return address
-
- Register return_address = StoreWithVectorDescriptor::SlotRegister();
- __ pop(return_address);
- __ push(receiver);
- __ push(name);
- __ push(return_address);
-}
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
- // Return address is on the stack.
- StoreIC_PushArgs(masm);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
-}
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
- // Return address is on the stack.
- StoreIC_PushArgs(masm);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
-#undef __
-
Condition CompareIC::ComputeCondition(Token::Value op) {
switch (op) {
@@ -104,9 +61,7 @@ void PatchInlinedSmiCode(Isolate* isolate, Address address,
// condition code uses at the patched jump.
uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address);
if (FLAG_trace_ic) {
- PrintF("[ patching ic at %p, test=%p, delta=%d\n",
- static_cast<void*>(address),
- static_cast<void*>(test_instruction_address), delta);
+ LOG(isolate, PatchIC(address, test_instruction_address, delta));
}
// Patch with a short conditional jump. Enabling means switching from a short
diff --git a/deps/v8/src/identity-map.cc b/deps/v8/src/identity-map.cc
index 58dbf6b1cc..9fee8b93d5 100644
--- a/deps/v8/src/identity-map.cc
+++ b/deps/v8/src/identity-map.cc
@@ -6,7 +6,6 @@
#include "src/base/functional.h"
#include "src/heap/heap-inl.h"
-#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
@@ -14,42 +13,45 @@ namespace internal {
static const int kInitialIdentityMapSize = 4;
static const int kResizeFactor = 4;
-IdentityMapBase::~IdentityMapBase() { Clear(); }
+IdentityMapBase::~IdentityMapBase() {
+ // Clear must be called by the subclass to avoid calling the virtual
+ // DeleteArray function from the destructor.
+ DCHECK_NULL(keys_);
+}
void IdentityMapBase::Clear() {
if (keys_) {
+ DCHECK(!is_iterable());
heap_->UnregisterStrongRoots(keys_);
+ DeleteArray(keys_);
+ DeleteArray(values_);
keys_ = nullptr;
values_ = nullptr;
size_ = 0;
+ capacity_ = 0;
mask_ = 0;
}
}
-IdentityMapBase::RawEntry IdentityMapBase::Lookup(Object* key) {
- int index = LookupIndex(key);
- return index >= 0 ? &values_[index] : nullptr;
-}
-
-
-IdentityMapBase::RawEntry IdentityMapBase::Insert(Object* key) {
- int index = InsertIndex(key);
- DCHECK_GE(index, 0);
- return &values_[index];
+void IdentityMapBase::EnableIteration() {
+ CHECK(!is_iterable());
+ is_iterable_ = true;
}
+void IdentityMapBase::DisableIteration() {
+ CHECK(is_iterable());
+ is_iterable_ = false;
-int IdentityMapBase::Hash(Object* address) {
- CHECK_NE(address, heap_->not_mapped_symbol());
- uintptr_t raw_address = reinterpret_cast<uintptr_t>(address);
- return static_cast<int>(hasher_(raw_address));
+ // We might need to resize due to iterator deletion - do this now.
+ if (size_ * kResizeFactor < capacity_ / kResizeFactor) {
+ Resize(capacity_ / kResizeFactor);
+ }
}
-
-int IdentityMapBase::LookupIndex(Object* address) {
+int IdentityMapBase::ScanKeysFor(Object* address) const {
int start = Hash(address) & mask_;
Object* not_mapped = heap_->not_mapped_symbol();
- for (int index = start; index < size_; index++) {
+ for (int index = start; index < capacity_; index++) {
if (keys_[index] == address) return index; // Found.
if (keys_[index] == not_mapped) return -1; // Not found.
}
@@ -60,12 +62,11 @@ int IdentityMapBase::LookupIndex(Object* address) {
return -1;
}
-
-int IdentityMapBase::InsertIndex(Object* address) {
+int IdentityMapBase::InsertKey(Object* address) {
Object* not_mapped = heap_->not_mapped_symbol();
while (true) {
int start = Hash(address) & mask_;
- int limit = size_ / 2;
+ int limit = capacity_ / 2;
// Search up to {limit} entries.
for (int index = start; --limit > 0; index = (index + 1) & mask_) {
if (keys_[index] == address) return index; // Found.
@@ -74,72 +75,162 @@ int IdentityMapBase::InsertIndex(Object* address) {
return index;
}
}
- Resize(); // Should only have to resize once, since we grow 4x.
+ // Should only have to resize once, since we grow 4x.
+ Resize(capacity_ * kResizeFactor);
}
UNREACHABLE();
return -1;
}
+void* IdentityMapBase::DeleteIndex(int index) {
+ void* ret_value = values_[index];
+ Object* not_mapped = heap_->not_mapped_symbol();
+ DCHECK_NE(keys_[index], not_mapped);
+ keys_[index] = not_mapped;
+ values_[index] = nullptr;
+ size_--;
+ DCHECK_GE(size_, 0);
+
+ if (!is_iterable() && (size_ * kResizeFactor < capacity_ / kResizeFactor)) {
+ Resize(capacity_ / kResizeFactor);
+ return ret_value; // No need to fix collisions as resize reinserts keys.
+ }
+
+ // Move any collisions to their new correct location.
+ int next_index = index;
+ for (;;) {
+ next_index = (next_index + 1) & mask_;
+ Object* key = keys_[next_index];
+ if (key == not_mapped) break;
+
+ int expected_index = Hash(key) & mask_;
+ if (index < next_index) {
+ if (index < expected_index && expected_index <= next_index) continue;
+ } else {
+ DCHECK_GT(index, next_index);
+ if (index < expected_index || expected_index <= next_index) continue;
+ }
+ DCHECK_EQ(not_mapped, keys_[index]);
+ DCHECK_NULL(values_[index]);
+ std::swap(keys_[index], keys_[next_index]);
+ std::swap(values_[index], values_[next_index]);
+ index = next_index;
+ }
+
+ return ret_value;
+}
+
+int IdentityMapBase::Lookup(Object* key) const {
+ int index = ScanKeysFor(key);
+ if (index < 0 && gc_counter_ != heap_->gc_count()) {
+ // Miss; rehash if there was a GC, then lookup again.
+ const_cast<IdentityMapBase*>(this)->Rehash();
+ index = ScanKeysFor(key);
+ }
+ return index;
+}
+
+int IdentityMapBase::LookupOrInsert(Object* key) {
+ // Perform an optimistic lookup.
+ int index = ScanKeysFor(key);
+ if (index < 0) {
+ // Miss; rehash if there was a GC, then insert.
+ if (gc_counter_ != heap_->gc_count()) Rehash();
+ index = InsertKey(key);
+ size_++;
+ DCHECK_LE(size_, capacity_);
+ }
+ DCHECK_GE(index, 0);
+ return index;
+}
+
+int IdentityMapBase::Hash(Object* address) const {
+ CHECK_NE(address, heap_->not_mapped_symbol());
+ uintptr_t raw_address = reinterpret_cast<uintptr_t>(address);
+ return static_cast<int>(hasher_(raw_address));
+}
// Searches this map for the given key using the object's address
// as the identity, returning:
// found => a pointer to the storage location for the value
// not found => a pointer to a new storage location for the value
IdentityMapBase::RawEntry IdentityMapBase::GetEntry(Object* key) {
- RawEntry result;
- if (size_ == 0) {
+ CHECK(!is_iterable()); // Don't allow insertion while iterable.
+ if (capacity_ == 0) {
// Allocate the initial storage for keys and values.
- size_ = kInitialIdentityMapSize;
+ capacity_ = kInitialIdentityMapSize;
mask_ = kInitialIdentityMapSize - 1;
gc_counter_ = heap_->gc_count();
- keys_ = zone_->NewArray<Object*>(size_);
+ keys_ = reinterpret_cast<Object**>(NewPointerArray(capacity_));
Object* not_mapped = heap_->not_mapped_symbol();
- for (int i = 0; i < size_; i++) keys_[i] = not_mapped;
- values_ = zone_->NewArray<void*>(size_);
- memset(values_, 0, sizeof(void*) * size_);
-
- heap_->RegisterStrongRoots(keys_, keys_ + size_);
- result = Insert(key);
- } else {
- // Perform an optimistic lookup.
- result = Lookup(key);
- if (result == nullptr) {
- // Miss; rehash if there was a GC, then insert.
- if (gc_counter_ != heap_->gc_count()) Rehash();
- result = Insert(key);
- }
+ for (int i = 0; i < capacity_; i++) keys_[i] = not_mapped;
+ values_ = NewPointerArray(capacity_);
+ memset(values_, 0, sizeof(void*) * capacity_);
+
+ heap_->RegisterStrongRoots(keys_, keys_ + capacity_);
}
- return result;
+ int index = LookupOrInsert(key);
+ return &values_[index];
}
-
// Searches this map for the given key using the object's address
// as the identity, returning:
// found => a pointer to the storage location for the value
// not found => {nullptr}
-IdentityMapBase::RawEntry IdentityMapBase::FindEntry(Object* key) {
+IdentityMapBase::RawEntry IdentityMapBase::FindEntry(Object* key) const {
+ // Don't allow find by key while iterable (might rehash).
+ CHECK(!is_iterable());
if (size_ == 0) return nullptr;
+ // Remove constness since lookup might have to rehash.
+ int index = Lookup(key);
+ return index >= 0 ? &values_[index] : nullptr;
+}
- RawEntry result = Lookup(key);
- if (result == nullptr && gc_counter_ != heap_->gc_count()) {
- Rehash(); // Rehash is expensive, so only do it in case of a miss.
- result = Lookup(key);
- }
- return result;
+// Deletes the given key from the map using the object's address as the
+// identity, returning:
+// found => the value
+// not found => {nullptr}
+void* IdentityMapBase::DeleteEntry(Object* key) {
+ CHECK(!is_iterable()); // Don't allow deletion by key while iterable.
+ if (size_ == 0) return nullptr;
+ int index = Lookup(key);
+ if (index < 0) return nullptr; // No entry found.
+ return DeleteIndex(index);
}
+IdentityMapBase::RawEntry IdentityMapBase::EntryAtIndex(int index) const {
+ DCHECK_LE(0, index);
+ DCHECK_LT(index, capacity_);
+ DCHECK_NE(keys_[index], heap_->not_mapped_symbol());
+ CHECK(is_iterable()); // Must be iterable to access by index;
+ return &values_[index];
+}
+
+int IdentityMapBase::NextIndex(int index) const {
+ DCHECK_LE(-1, index);
+ DCHECK_LE(index, capacity_);
+ CHECK(is_iterable()); // Must be iterable to access by index;
+ Object* not_mapped = heap_->not_mapped_symbol();
+ for (index++; index < capacity_; index++) {
+ if (keys_[index] != not_mapped) {
+ return index;
+ }
+ }
+ return capacity_;
+}
void IdentityMapBase::Rehash() {
+ CHECK(!is_iterable()); // Can't rehash while iterating.
// Record the current GC counter.
gc_counter_ = heap_->gc_count();
// Assume that most objects won't be moved.
- ZoneVector<std::pair<Object*, void*>> reinsert(zone_);
+ std::vector<std::pair<Object*, void*>> reinsert;
// Search the table looking for keys that wouldn't be found with their
// current hashcode and evacuate them.
int last_empty = -1;
Object* not_mapped = heap_->not_mapped_symbol();
- for (int i = 0; i < size_; i++) {
+ for (int i = 0; i < capacity_; i++) {
if (keys_[i] == not_mapped) {
last_empty = i;
} else {
@@ -155,42 +246,45 @@ void IdentityMapBase::Rehash() {
}
// Reinsert all the key/value pairs that were in the wrong place.
for (auto pair : reinsert) {
- int index = InsertIndex(pair.first);
+ int index = InsertKey(pair.first);
DCHECK_GE(index, 0);
- DCHECK_NE(heap_->not_mapped_symbol(), values_[index]);
values_[index] = pair.second;
}
}
-
-void IdentityMapBase::Resize() {
- // Grow the internal storage and reinsert all the key/value pairs.
- int old_size = size_;
+void IdentityMapBase::Resize(int new_capacity) {
+ CHECK(!is_iterable()); // Can't resize while iterating.
+ // Resize the internal storage and reinsert all the key/value pairs.
+ DCHECK_GT(new_capacity, size_);
+ int old_capacity = capacity_;
Object** old_keys = keys_;
void** old_values = values_;
- size_ = size_ * kResizeFactor;
- mask_ = size_ - 1;
+ capacity_ = new_capacity;
+ mask_ = capacity_ - 1;
gc_counter_ = heap_->gc_count();
- CHECK_LE(size_, (1024 * 1024 * 16)); // that would be extreme...
-
- keys_ = zone_->NewArray<Object*>(size_);
+ keys_ = reinterpret_cast<Object**>(NewPointerArray(capacity_));
Object* not_mapped = heap_->not_mapped_symbol();
- for (int i = 0; i < size_; i++) keys_[i] = not_mapped;
- values_ = zone_->NewArray<void*>(size_);
- memset(values_, 0, sizeof(void*) * size_);
+ for (int i = 0; i < capacity_; i++) keys_[i] = not_mapped;
+ values_ = NewPointerArray(capacity_);
+ memset(values_, 0, sizeof(void*) * capacity_);
- for (int i = 0; i < old_size; i++) {
+ for (int i = 0; i < old_capacity; i++) {
if (old_keys[i] == not_mapped) continue;
- int index = InsertIndex(old_keys[i]);
+ int index = InsertKey(old_keys[i]);
DCHECK_GE(index, 0);
values_[index] = old_values[i];
}
// Unregister old keys and register new keys.
heap_->UnregisterStrongRoots(old_keys);
- heap_->RegisterStrongRoots(keys_, keys_ + size_);
+ heap_->RegisterStrongRoots(keys_, keys_ + capacity_);
+
+ // Delete old storage;
+ DeleteArray(old_keys);
+ DeleteArray(old_values);
}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/identity-map.h b/deps/v8/src/identity-map.h
index ad2a260769..5fa223a964 100644
--- a/deps/v8/src/identity-map.h
+++ b/deps/v8/src/identity-map.h
@@ -13,11 +13,16 @@ namespace internal {
// Forward declarations.
class Heap;
-class Zone;
// Base class of identity maps contains shared code for all template
// instantions.
class IdentityMapBase {
+ public:
+ bool empty() const { return size_ == 0; }
+ int size() const { return size_; }
+ int capacity() const { return capacity_; }
+ bool is_iterable() const { return is_iterable_; }
+
protected:
// Allow Tester to access internals, including changing the address of objects
// within the {keys_} array in order to simulate a moving GC.
@@ -25,51 +30,68 @@ class IdentityMapBase {
typedef void** RawEntry;
- IdentityMapBase(Heap* heap, Zone* zone)
+ explicit IdentityMapBase(Heap* heap)
: heap_(heap),
- zone_(zone),
gc_counter_(-1),
size_(0),
+ capacity_(0),
mask_(0),
keys_(nullptr),
- values_(nullptr) {}
- ~IdentityMapBase();
+ values_(nullptr),
+ is_iterable_(false) {}
+ virtual ~IdentityMapBase();
RawEntry GetEntry(Object* key);
- RawEntry FindEntry(Object* key);
+ RawEntry FindEntry(Object* key) const;
+ void* DeleteEntry(Object* key);
+ void* DeleteIndex(int index);
void Clear();
+ V8_EXPORT_PRIVATE RawEntry EntryAtIndex(int index) const;
+ V8_EXPORT_PRIVATE int NextIndex(int index) const;
+
+ void EnableIteration();
+ void DisableIteration();
+
+ virtual void** NewPointerArray(size_t length) = 0;
+ virtual void DeleteArray(void* array) = 0;
+
private:
// Internal implementation should not be called directly by subclasses.
- int LookupIndex(Object* address);
- int InsertIndex(Object* address);
+ int ScanKeysFor(Object* address) const;
+ int InsertKey(Object* address);
+ int Lookup(Object* key) const;
+ int LookupOrInsert(Object* key);
void Rehash();
- void Resize();
- RawEntry Lookup(Object* key);
- RawEntry Insert(Object* key);
- int Hash(Object* address);
+ void Resize(int new_capacity);
+ int Hash(Object* address) const;
base::hash<uintptr_t> hasher_;
Heap* heap_;
- Zone* zone_;
int gc_counter_;
int size_;
+ int capacity_;
int mask_;
Object** keys_;
void** values_;
+ bool is_iterable_;
+
+ DISALLOW_COPY_AND_ASSIGN(IdentityMapBase);
};
// Implements an identity map from object addresses to a given value type {V}.
// The map is robust w.r.t. garbage collection by synchronization with the
// supplied {heap}.
// * Keys are treated as strong roots.
-// * SMIs are valid keys, except SMI #0.
// * The value type {V} must be reinterpret_cast'able to {void*}
// * The value type {V} must not be a heap type.
-template <typename V>
+template <typename V, class AllocationPolicy>
class IdentityMap : public IdentityMapBase {
public:
- IdentityMap(Heap* heap, Zone* zone) : IdentityMapBase(heap, zone) {}
+ explicit IdentityMap(Heap* heap,
+ AllocationPolicy allocator = AllocationPolicy())
+ : IdentityMapBase(heap), allocator_(allocator) {}
+ ~IdentityMap() override { Clear(); };
// Searches this map for the given key using the object's address
// as the identity, returning:
@@ -82,16 +104,77 @@ class IdentityMap : public IdentityMapBase {
// as the identity, returning:
// found => a pointer to the storage location for the value
// not found => {nullptr}
- V* Find(Handle<Object> key) { return Find(*key); }
- V* Find(Object* key) { return reinterpret_cast<V*>(FindEntry(key)); }
+ V* Find(Handle<Object> key) const { return Find(*key); }
+ V* Find(Object* key) const { return reinterpret_cast<V*>(FindEntry(key)); }
// Set the value for the given key.
void Set(Handle<Object> key, V v) { Set(*key, v); }
void Set(Object* key, V v) { *(reinterpret_cast<V*>(GetEntry(key))) = v; }
+ V Delete(Handle<Object> key) { return Delete(*key); }
+ V Delete(Object* key) { return reinterpret_cast<V>(DeleteEntry(key)); }
+
// Removes all elements from the map.
void Clear() { IdentityMapBase::Clear(); }
+
+ // Iterator over IdentityMap. The IteratableScope used to create this Iterator
+ // must be live for the duration of the iteration.
+ class Iterator {
+ public:
+ Iterator& operator++() {
+ index_ = map_->NextIndex(index_);
+ return *this;
+ }
+
+ Iterator& DeleteAndIncrement() {
+ map_->DeleteIndex(index_);
+ index_ = map_->NextIndex(index_);
+ return *this;
+ }
+
+ V* operator*() { return reinterpret_cast<V*>(map_->EntryAtIndex(index_)); }
+ V* operator->() { return reinterpret_cast<V*>(map_->EntryAtIndex(index_)); }
+ bool operator!=(const Iterator& other) { return index_ != other.index_; }
+
+ private:
+ Iterator(IdentityMap* map, int index) : map_(map), index_(index) {}
+
+ IdentityMap* map_;
+ int index_;
+
+ friend class IdentityMap;
+ };
+
+ class IteratableScope {
+ public:
+ explicit IteratableScope(IdentityMap* map) : map_(map) {
+ CHECK(!map_->is_iterable());
+ map_->EnableIteration();
+ }
+ ~IteratableScope() {
+ CHECK(map_->is_iterable());
+ map_->DisableIteration();
+ }
+
+ Iterator begin() { return Iterator(map_, map_->NextIndex(-1)); }
+ Iterator end() { return Iterator(map_, map_->capacity()); }
+
+ private:
+ IdentityMap* map_;
+ DISALLOW_COPY_AND_ASSIGN(IteratableScope);
+ };
+
+ protected:
+ void** NewPointerArray(size_t length) override {
+ return static_cast<void**>(allocator_.New(sizeof(void*) * length));
+ }
+ void DeleteArray(void* array) override { allocator_.Delete(array); }
+
+ private:
+ AllocationPolicy allocator_;
+ DISALLOW_COPY_AND_ASSIGN(IdentityMap);
};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/inspector/DEPS b/deps/v8/src/inspector/DEPS
index 748a7c12d9..2d77fb7aa7 100644
--- a/deps/v8/src/inspector/DEPS
+++ b/deps/v8/src/inspector/DEPS
@@ -1,5 +1,6 @@
include_rules = [
"-src",
+ "-include/v8-debug.h",
"+src/base/atomicops.h",
"+src/base/macros.h",
"+src/base/logging.h",
@@ -7,6 +8,6 @@ include_rules = [
"+src/conversions.h",
"+src/inspector",
"+src/tracing",
- "-include/v8-debug.h",
"+src/debug/debug-interface.h",
+ "+src/debug/interface-types.h",
]
diff --git a/deps/v8/src/inspector/debugger-script.js b/deps/v8/src/inspector/debugger-script.js
index 7843dc9d67..d9cb12a09a 100644
--- a/deps/v8/src/inspector/debugger-script.js
+++ b/deps/v8/src/inspector/debugger-script.js
@@ -102,56 +102,13 @@ DebuggerScript.getGeneratorScopes = function(gen)
}
/**
- * @param {Object} object
- * @return {?RawLocation}
- */
-DebuggerScript.getGeneratorObjectLocation = function(object)
-{
- var mirror = MakeMirror(object, true /* transient */);
- if (!mirror.isGenerator())
- return null;
- var generatorMirror = /** @type {!GeneratorMirror} */(mirror);
- var funcMirror = generatorMirror.func();
- if (!funcMirror.resolved())
- return null;
- var location = generatorMirror.sourceLocation() || funcMirror.sourceLocation();
- var script = funcMirror.script();
- if (script && location) {
- return {
- scriptId: "" + script.id(),
- lineNumber: location.line,
- columnNumber: location.column
- };
- }
- return null;
-}
-
-/**
- * @param {Object} object
- * @return {!Array<!{value: *}>|undefined}
- */
-DebuggerScript.getCollectionEntries = function(object)
-{
- var mirror = MakeMirror(object, true /* transient */);
- if (mirror.isMap())
- return /** @type {!MapMirror} */(mirror).entries();
- if (mirror.isSet() || mirror.isIterator()) {
- var result = [];
- var values = mirror.isSet() ? /** @type {!SetMirror} */(mirror).values() : /** @type {!IteratorMirror} */(mirror).preview();
- for (var i = 0; i < values.length; ++i)
- result.push({ value: values[i] });
- return result;
- }
-}
-
-/**
* @param {!ExecutionState} execState
* @param {!BreakpointInfo} info
* @return {string|undefined}
*/
DebuggerScript.setBreakpoint = function(execState, info)
{
- var breakId = Debug.setScriptBreakPointById(info.sourceID, info.lineNumber, info.columnNumber, info.condition, undefined, Debug.BreakPositionAlignment.Statement);
+ var breakId = Debug.setScriptBreakPointById(info.sourceID, info.lineNumber, info.columnNumber, info.condition, undefined, Debug.BreakPositionAlignment.BreakPosition);
var locations = Debug.findBreakPointActualLocations(breakId);
if (!locations.length)
return undefined;
@@ -230,20 +187,10 @@ DebuggerScript.clearBreakpoints = function(execState)
}
/**
- * @param {!ExecutionState} execState
- * @param {!{enabled: boolean}} info
- */
-DebuggerScript.setBreakpointsActivated = function(execState, info)
-{
- Debug.debuggerFlags().breakPointsActive.setValue(info.enabled);
-}
-
-/**
- * @param {!BreakEvent} eventData
+ * @param {!Array<!BreakPoint>|undefined} breakpoints
*/
-DebuggerScript.getBreakpointNumbers = function(eventData)
+DebuggerScript.getBreakpointNumbers = function(breakpoints)
{
- var breakpoints = eventData.breakPointsHit();
var numbers = [];
if (!breakpoints)
return numbers;
@@ -391,8 +338,8 @@ DebuggerScript._frameMirrorToJSCallFrame = function(frameMirror)
details = {
"functionName": ensureFuncMirror().debugName(),
"location": {
- "lineNumber": line(),
- "columnNumber": column(),
+ "lineNumber": ensureLocation().line,
+ "columnNumber": ensureLocation().column,
"scriptId": String(script.id())
},
"this": thisObject,
@@ -453,22 +400,6 @@ DebuggerScript._frameMirrorToJSCallFrame = function(frameMirror)
/**
* @return {number}
*/
- function line()
- {
- return ensureLocation().line;
- }
-
- /**
- * @return {number}
- */
- function column()
- {
- return ensureLocation().column;
- }
-
- /**
- * @return {number}
- */
function contextId()
{
var mirror = ensureFuncMirror();
@@ -479,21 +410,13 @@ DebuggerScript._frameMirrorToJSCallFrame = function(frameMirror)
}
/**
- * @return {number}
- */
- function sourceID()
- {
- var script = ensureScriptMirror();
- return script.id();
- }
-
- /**
* @param {string} expression
+ * @param {boolean} throwOnSideEffect
* @return {*}
*/
- function evaluate(expression)
+ function evaluate(expression, throwOnSideEffect)
{
- return frameMirror.evaluate(expression).value();
+ return frameMirror.evaluate(expression, throwOnSideEffect).value();
}
/** @return {undefined} */
@@ -516,9 +439,6 @@ DebuggerScript._frameMirrorToJSCallFrame = function(frameMirror)
}
return {
- "sourceID": sourceID,
- "line": line,
- "column": column,
"contextId": contextId,
"thisObject": thisObject,
"evaluate": evaluate,
@@ -549,7 +469,7 @@ DebuggerScript._buildScopeObject = function(scopeType, scopeObject)
// the same properties.
// Reset scope object prototype to null so that the proto properties
// don't appear in the local scope section.
- var properties = /** @type {!ObjectMirror} */(MakeMirror(scopeObject, true /* transient */)).properties();
+ var properties = /** @type {!ObjectMirror} */(MakeMirror(scopeObject)).properties();
// Almost always Script scope will be empty, so just filter out that noise.
// Also drop empty Block, Eval and Script scopes, should we get any.
if (!properties.length && (scopeType === ScopeType.Script ||
@@ -574,8 +494,5 @@ DebuggerScript._buildScopeObject = function(scopeType, scopeObject)
return result;
}
-// We never resolve Mirror by its handle so to avoid memory leaks caused by Mirrors in the cache we disable it.
-ToggleMirrorCache(false);
-
return DebuggerScript;
})();
diff --git a/deps/v8/src/inspector/debugger_script_externs.js b/deps/v8/src/inspector/debugger_script_externs.js
index 4fa3a0fbe3..6f36fb9c41 100644
--- a/deps/v8/src/inspector/debugger_script_externs.js
+++ b/deps/v8/src/inspector/debugger_script_externs.js
@@ -29,12 +29,9 @@ var RawLocation;
var JavaScriptCallFrameDetails;
/** @typedef {{
- sourceID: function():(number),
- line: function():number,
- column: function():number,
contextId: function():number,
thisObject: !Object,
- evaluate: function(string):*,
+ evaluate: function(string, boolean):*,
restart: function():undefined,
setVariableValue: function(number, string, *):undefined,
isAtReturn: boolean,
@@ -75,10 +72,6 @@ Debug.findBreakPointActualLocations = function(breakId) {}
*/
Debug.findBreakPoint = function(breakId, remove) {}
-/** @return {!DebuggerFlags} */
-Debug.debuggerFlags = function() {}
-
-
/** @enum */
const BreakPositionAlignment = {
Statement: 0,
@@ -86,32 +79,6 @@ const BreakPositionAlignment = {
};
Debug.BreakPositionAlignment = BreakPositionAlignment;
-/** @enum */
-Debug.StepAction = { StepOut: 0,
- StepNext: 1,
- StepIn: 2,
- StepFrame: 3 };
-
-/** @enum */
-const ScriptCompilationType = { Host: 0,
- Eval: 1,
- JSON: 2 };
-Debug.ScriptCompilationType = ScriptCompilationType;
-
-
-/** @interface */
-function DebuggerFlag() {}
-
-/** @param {boolean} value */
-DebuggerFlag.prototype.setValue = function(value) {}
-
-
-/** @typedef {{
- * breakPointsActive: !DebuggerFlag
- * }}
- */
-var DebuggerFlags;
-
/** @const */
var LiveEdit = {}
@@ -160,13 +127,6 @@ BreakPoint.prototype.number = function() {}
/** @interface */
-function BreakEvent() {}
-
-/** @return {!Array<!BreakPoint>|undefined} */
-BreakEvent.prototype.breakPointsHit = function() {}
-
-
-/** @interface */
function ExecutionState() {}
/**
@@ -257,16 +217,11 @@ FrameDetails.prototype.returnValue = function() {}
/** @return {number} */
FrameDetails.prototype.scopeCount = function() {}
-
-/** @param {boolean} value */
-function ToggleMirrorCache(value) {}
-
/**
* @param {*} value
- * @param {boolean=} transient
* @return {!Mirror}
*/
-function MakeMirror(value, transient) {}
+function MakeMirror(value) {}
/** @interface */
@@ -278,16 +233,6 @@ Mirror.prototype.isFunction = function() {}
/** @return {boolean} */
Mirror.prototype.isGenerator = function() {}
-/** @return {boolean} */
-Mirror.prototype.isMap = function() {}
-
-/** @return {boolean} */
-Mirror.prototype.isSet = function() {}
-
-/** @return {boolean} */
-Mirror.prototype.isIterator = function() {}
-
-
/**
* @interface
* @extends {Mirror}
@@ -337,61 +282,12 @@ FunctionMirror.prototype.context = function() {}
*/
function UnresolvedFunctionMirror(value) {}
-
-/**
- * @interface
- * @extends {ObjectMirror}
- */
-function MapMirror () {}
-
-/**
- * @param {number=} limit
- * @return {!Array<!{key: *, value: *}>}
- */
-MapMirror.prototype.entries = function(limit) {}
-
-
-/**
- * @interface
- * @extends {ObjectMirror}
- */
-function SetMirror () {}
-
-/**
- * @param {number=} limit
- * @return {!Array<*>}
- */
-SetMirror.prototype.values = function(limit) {}
-
-
-/**
- * @interface
- * @extends {ObjectMirror}
- */
-function IteratorMirror () {}
-
-/**
- * @param {number=} limit
- * @return {!Array<*>}
- */
-IteratorMirror.prototype.preview = function(limit) {}
-
-
/**
* @interface
* @extends {ObjectMirror}
*/
function GeneratorMirror () {}
-/** @return {string} */
-GeneratorMirror.prototype.status = function() {}
-
-/** @return {!SourceLocation|undefined} */
-GeneratorMirror.prototype.sourceLocation = function() {}
-
-/** @return {!FunctionMirror} */
-GeneratorMirror.prototype.func = function() {}
-
/** @return {number} */
GeneratorMirror.prototype.scopeCount = function() {}
@@ -437,8 +333,9 @@ FrameMirror.prototype.script = function() {}
/**
* @param {string} source
+ * @param {boolean} throwOnSideEffect
*/
-FrameMirror.prototype.evaluate = function(source) {}
+FrameMirror.prototype.evaluate = function(source, throwOnSideEffect) {}
FrameMirror.prototype.restart = function() {}
diff --git a/deps/v8/src/inspector/injected-script-source.js b/deps/v8/src/inspector/injected-script-source.js
index b52277a8eb..a828b76e4a 100644
--- a/deps/v8/src/inspector/injected-script-source.js
+++ b/deps/v8/src/inspector/injected-script-source.js
@@ -157,11 +157,11 @@ function isSymbol(obj)
* @type {!Object<string, !Object<string, boolean>>}
* @const
*/
-var domAttributesWithObservableSideEffectOnGet = nullifyObjectProto({});
-domAttributesWithObservableSideEffectOnGet["Request"] = nullifyObjectProto({});
-domAttributesWithObservableSideEffectOnGet["Request"]["body"] = true;
-domAttributesWithObservableSideEffectOnGet["Response"] = nullifyObjectProto({});
-domAttributesWithObservableSideEffectOnGet["Response"]["body"] = true;
+var domAttributesWithObservableSideEffectOnGet = {
+ Request: { body: true, __proto__: null },
+ Response: { body: true, __proto__: null },
+ __proto__: null
+}
/**
* @param {!Object} object
@@ -186,6 +186,7 @@ function doesAttributeHaveObservableSideEffectOnGet(object, attribute)
var InjectedScript = function()
{
}
+InjectedScriptHost.nullifyPrototype(InjectedScript);
/**
* @type {!Object.<string, boolean>}
diff --git a/deps/v8/src/inspector/injected_script_externs.js b/deps/v8/src/inspector/injected_script_externs.js
index b6339c6eb0..14b14e6bbf 100644
--- a/deps/v8/src/inspector/injected_script_externs.js
+++ b/deps/v8/src/inspector/injected_script_externs.js
@@ -9,6 +9,11 @@ function InjectedScriptHostClass()
/**
* @param {*} obj
+ */
+InjectedScriptHostClass.prototype.nullifyPrototype = function(obj) {}
+
+/**
+ * @param {*} obj
* @return {string}
*/
InjectedScriptHostClass.prototype.internalConstructorName = function(obj) {}
diff --git a/deps/v8/src/inspector/java-script-call-frame.cc b/deps/v8/src/inspector/java-script-call-frame.cc
index f9d0585a8e..9847944243 100644
--- a/deps/v8/src/inspector/java-script-call-frame.cc
+++ b/deps/v8/src/inspector/java-script-call-frame.cc
@@ -61,18 +61,6 @@ int JavaScriptCallFrame::callV8FunctionReturnInt(const char* name) const {
return result.As<v8::Int32>()->Value();
}
-int JavaScriptCallFrame::sourceID() const {
- return callV8FunctionReturnInt("sourceID");
-}
-
-int JavaScriptCallFrame::line() const {
- return callV8FunctionReturnInt("line");
-}
-
-int JavaScriptCallFrame::column() const {
- return callV8FunctionReturnInt("column");
-}
-
int JavaScriptCallFrame::contextId() const {
return callV8FunctionReturnInt("contextId");
}
@@ -110,7 +98,7 @@ v8::MaybeLocal<v8::Object> JavaScriptCallFrame::details() const {
}
v8::MaybeLocal<v8::Value> JavaScriptCallFrame::evaluate(
- v8::Local<v8::Value> expression) {
+ v8::Local<v8::Value> expression, bool throwOnSideEffect) {
v8::MicrotasksScope microtasks(m_isolate,
v8::MicrotasksScope::kRunMicrotasks);
v8::Local<v8::Context> context =
@@ -120,7 +108,9 @@ v8::MaybeLocal<v8::Value> JavaScriptCallFrame::evaluate(
v8::Local<v8::Function> evalFunction = v8::Local<v8::Function>::Cast(
callFrame->Get(context, toV8StringInternalized(m_isolate, "evaluate"))
.ToLocalChecked());
- return evalFunction->Call(context, callFrame, 1, &expression);
+ v8::Local<v8::Value> argv[] = {
+ expression, v8::Boolean::New(m_isolate, throwOnSideEffect)};
+ return evalFunction->Call(context, callFrame, arraysize(argv), argv);
}
v8::MaybeLocal<v8::Value> JavaScriptCallFrame::restart() {
diff --git a/deps/v8/src/inspector/java-script-call-frame.h b/deps/v8/src/inspector/java-script-call-frame.h
index 6b73abf0ad..b3930c0818 100644
--- a/deps/v8/src/inspector/java-script-call-frame.h
+++ b/deps/v8/src/inspector/java-script-call-frame.h
@@ -49,15 +49,13 @@ class JavaScriptCallFrame {
}
~JavaScriptCallFrame();
- int sourceID() const;
- int line() const;
- int column() const;
int contextId() const;
bool isAtReturn() const;
v8::MaybeLocal<v8::Object> details() const;
- v8::MaybeLocal<v8::Value> evaluate(v8::Local<v8::Value> expression);
+ v8::MaybeLocal<v8::Value> evaluate(v8::Local<v8::Value> expression,
+ bool throwOnSideEffect);
v8::MaybeLocal<v8::Value> restart();
v8::MaybeLocal<v8::Value> setVariableValue(int scopeNumber,
v8::Local<v8::Value> variableName,
diff --git a/deps/v8/src/inspector/js_protocol.json b/deps/v8/src/inspector/js_protocol.json
index d0af43ded5..ef046a52f6 100644
--- a/deps/v8/src/inspector/js_protocol.json
+++ b/deps/v8/src/inspector/js_protocol.json
@@ -201,7 +201,8 @@
"properties": [
{ "name": "description", "type": "string", "optional": true, "description": "String label of this stack trace. For async traces this may be a name of the function that initiated the async call." },
{ "name": "callFrames", "type": "array", "items": { "$ref": "CallFrame" }, "description": "JavaScript function name." },
- { "name": "parent", "$ref": "StackTrace", "optional": true, "description": "Asynchronous JavaScript stack trace that preceded this stack, if available." }
+ { "name": "parent", "$ref": "StackTrace", "optional": true, "description": "Asynchronous JavaScript stack trace that preceded this stack, if available." },
+ { "name": "promiseCreationFrame", "$ref": "CallFrame", "optional": true, "experimental": true, "description": "Creation frame of the Promise which produced the next synchronous trace when resolved, if available." }
]
}
],
@@ -383,7 +384,7 @@
"name": "consoleAPICalled",
"description": "Issued when console API was called.",
"parameters": [
- { "name": "type", "type": "string", "enum": ["log", "debug", "info", "error", "warning", "dir", "dirxml", "table", "trace", "clear", "startGroup", "startGroupCollapsed", "endGroup", "assert", "profile", "profileEnd"], "description": "Type of the call." },
+ { "name": "type", "type": "string", "enum": ["log", "debug", "info", "error", "warning", "dir", "dirxml", "table", "trace", "clear", "startGroup", "startGroupCollapsed", "endGroup", "assert", "profile", "profileEnd", "count", "timeEnd"], "description": "Type of the call." },
{ "name": "args", "type": "array", "items": { "$ref": "RemoteObject" }, "description": "Call arguments." },
{ "name": "executionContextId", "$ref": "ExecutionContextId", "description": "Identifier of the context where the call was made." },
{ "name": "timestamp", "$ref": "Timestamp", "description": "Call timestamp." },
@@ -634,7 +635,8 @@
{ "name": "includeCommandLineAPI", "type": "boolean", "optional": true, "description": "Specifies whether command line API should be available to the evaluated expression, defaults to false." },
{ "name": "silent", "type": "boolean", "optional": true, "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state." },
{ "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object that should be sent by value." },
- { "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the result." }
+ { "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the result." },
+ { "name": "throwOnSideEffect", "type": "boolean", "optional": true, "experimental": true, "description": "Whether to throw an exception if side effect cannot be ruled out during evaluation." }
],
"returns": [
{ "name": "result", "$ref": "Runtime.RemoteObject", "description": "Object wrapper for the evaluation result." },
@@ -692,7 +694,8 @@
{ "name": "executionContextAuxData", "type": "object", "optional": true, "description": "Embedder-specific auxiliary data." },
{ "name": "isLiveEdit", "type": "boolean", "optional": true, "description": "True, if this script is generated as a result of the live edit operation.", "experimental": true },
{ "name": "sourceMapURL", "type": "string", "optional": true, "description": "URL of source map associated with script (if any)." },
- { "name": "hasSourceURL", "type": "boolean", "optional": true, "description": "True, if this script has sourceURL.", "experimental": true }
+ { "name": "hasSourceURL", "type": "boolean", "optional": true, "description": "True, if this script has sourceURL.", "experimental": true },
+ { "name": "isModule", "type": "boolean", "optional": true, "description": "True, if this script is ES6 module.", "experimental": true }
],
"description": "Fired when virtual machine parses script. This event is also fired for all known and uncollected scripts upon enabling debugger."
},
@@ -709,7 +712,8 @@
{ "name": "hash", "type": "string", "description": "Content hash of the script."},
{ "name": "executionContextAuxData", "type": "object", "optional": true, "description": "Embedder-specific auxiliary data." },
{ "name": "sourceMapURL", "type": "string", "optional": true, "description": "URL of source map associated with script (if any)." },
- { "name": "hasSourceURL", "type": "boolean", "optional": true, "description": "True, if this script has sourceURL.", "experimental": true }
+ { "name": "hasSourceURL", "type": "boolean", "optional": true, "description": "True, if this script has sourceURL.", "experimental": true },
+ { "name": "isModule", "type": "boolean", "optional": true, "description": "True, if this script is ES6 module.", "experimental": true }
],
"description": "Fired when virtual machine fails to parse the script."
},
@@ -725,7 +729,7 @@
"name": "paused",
"parameters": [
{ "name": "callFrames", "type": "array", "items": { "$ref": "CallFrame" }, "description": "Call stack the virtual machine stopped on." },
- { "name": "reason", "type": "string", "enum": [ "XHR", "DOM", "EventListener", "exception", "assert", "debugCommand", "promiseRejection", "other" ], "description": "Pause reason." },
+ { "name": "reason", "type": "string", "enum": [ "XHR", "DOM", "EventListener", "exception", "assert", "debugCommand", "promiseRejection", "OOM", "other", "ambiguous" ], "description": "Pause reason." },
{ "name": "data", "type": "object", "optional": true, "description": "Object containing break-specific auxiliary properties." },
{ "name": "hitBreakpoints", "type": "array", "optional": true, "items": { "type": "string" }, "description": "Hit breakpoints IDs" },
{ "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any." }
@@ -820,6 +824,38 @@
{ "name": "line", "type": "integer", "description": "Source line number (1-based)." },
{ "name": "ticks", "type": "integer", "description": "Number of samples attributed to the source line." }
]
+ },
+ { "id": "CoverageRange",
+ "type": "object",
+ "description": "Coverage data for a source range.",
+ "properties": [
+ { "name": "startLineNumber", "type": "integer", "description": "JavaScript script line number (0-based) for the range start." },
+ { "name": "startColumnNumber", "type": "integer", "description": "JavaScript script column number (0-based) for the range start." },
+ { "name": "endLineNumber", "type": "integer", "description": "JavaScript script line number (0-based) for the range end." },
+ { "name": "endColumnNumber", "type": "integer", "description": "JavaScript script column number (0-based) for the range end." },
+ { "name": "count", "type": "integer", "description": "Collected execution count of the source range." }
+ ],
+ "experimental": true
+ },
+ { "id": "FunctionCoverage",
+ "type": "object",
+ "description": "Coverage data for a JavaScript function.",
+ "properties": [
+ { "name": "functionName", "type": "string", "description": "JavaScript function name." },
+ { "name": "ranges", "type": "array", "items": { "$ref": "CoverageRange" }, "description": "Source ranges inside the function with coverage data." }
+ ],
+ "experimental": true
+ },
+ {
+ "id": "ScriptCoverage",
+ "type": "object",
+ "description": "Coverage data for a JavaScript script.",
+ "properties": [
+ { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "JavaScript script id." },
+ { "name": "url", "type": "string", "description": "JavaScript script name or url." },
+ { "name": "functions", "type": "array", "items": { "$ref": "FunctionCoverage" }, "description": "Functions contained in the script that has coverage data." }
+ ],
+ "experimental": true
}
],
"commands": [
@@ -844,6 +880,32 @@
"returns": [
{ "name": "profile", "$ref": "Profile", "description": "Recorded profile." }
]
+ },
+ {
+ "name": "startPreciseCoverage",
+ "description": "Enable precise code coverage. Coverage data for JavaScript executed before enabling precise code coverage may be incomplete. Enabling prevents running optimized code and resets execution counters.",
+ "experimental": true
+ },
+ {
+ "name": "stopPreciseCoverage",
+ "description": "Disable precise code coverage. Disabling releases unnecessary execution count records and allows executing optimized code.",
+ "experimental": true
+ },
+ {
+ "name": "takePreciseCoverage",
+ "returns": [
+ { "name": "result", "type": "array", "items": { "$ref": "ScriptCoverage" }, "description": "Coverage data for the current isolate." }
+ ],
+ "description": "Collect coverage data for the current isolate, and resets execution counters. Precise code coverage needs to have started.",
+ "experimental": true
+ },
+ {
+ "name": "getBestEffortCoverage",
+ "returns": [
+ { "name": "result", "type": "array", "items": { "$ref": "ScriptCoverage" }, "description": "Coverage data for the current isolate." }
+ ],
+ "description": "Collect coverage data for the current isolate. The coverage data may be incomplete due to garbage collection.",
+ "experimental": true
}
],
"events": [
diff --git a/deps/v8/src/inspector/string-util.h b/deps/v8/src/inspector/string-util.h
index c484aab2ed..6f0e3d5ff5 100644
--- a/deps/v8/src/inspector/string-util.h
+++ b/deps/v8/src/inspector/string-util.h
@@ -32,10 +32,28 @@ class StringUtil {
return String::fromInteger(number);
}
static String fromDouble(double number) { return String::fromDouble(number); }
+ static size_t find(const String& s, const char* needle) {
+ return s.find(needle);
+ }
+ static size_t find(const String& s, const String& needle) {
+ return s.find(needle);
+ }
static const size_t kNotFound = String::kNotFound;
+ static void builderAppend(StringBuilder& builder, const String& s) {
+ builder.append(s);
+ }
+ static void builderAppend(StringBuilder& builder, UChar c) {
+ builder.append(c);
+ }
+ static void builderAppend(StringBuilder& builder, const char* s, size_t len) {
+ builder.append(s, len);
+ }
static void builderReserve(StringBuilder& builder, size_t capacity) {
builder.reserveCapacity(capacity);
}
+ static String builderToString(StringBuilder& builder) {
+ return builder.toString();
+ }
static std::unique_ptr<protocol::Value> parseJSON(const String16& json);
static std::unique_ptr<protocol::Value> parseJSON(const StringView& json);
};
diff --git a/deps/v8/src/inspector/v8-console-message.cc b/deps/v8/src/inspector/v8-console-message.cc
index 281a0b1d90..73f74e4f67 100644
--- a/deps/v8/src/inspector/v8-console-message.cc
+++ b/deps/v8/src/inspector/v8-console-message.cc
@@ -51,9 +51,9 @@ String16 consoleAPITypeValue(ConsoleAPIType type) {
case ConsoleAPIType::kAssert:
return protocol::Runtime::ConsoleAPICalled::TypeEnum::Assert;
case ConsoleAPIType::kTimeEnd:
- return protocol::Runtime::ConsoleAPICalled::TypeEnum::Debug;
+ return protocol::Runtime::ConsoleAPICalled::TypeEnum::TimeEnd;
case ConsoleAPIType::kCount:
- return protocol::Runtime::ConsoleAPICalled::TypeEnum::Debug;
+ return protocol::Runtime::ConsoleAPICalled::TypeEnum::Count;
}
return protocol::Runtime::ConsoleAPICalled::TypeEnum::Log;
}
@@ -382,22 +382,25 @@ std::unique_ptr<V8ConsoleMessage> V8ConsoleMessage::createForConsoleAPI(
if (arguments.size())
message->m_message = V8ValueStringBuilder::toString(arguments[0], context);
- V8ConsoleAPIType clientType = V8ConsoleAPIType::kLog;
+ v8::Isolate::MessageErrorLevel clientLevel = v8::Isolate::kMessageInfo;
if (type == ConsoleAPIType::kDebug || type == ConsoleAPIType::kCount ||
- type == ConsoleAPIType::kTimeEnd)
- clientType = V8ConsoleAPIType::kDebug;
- else if (type == ConsoleAPIType::kError || type == ConsoleAPIType::kAssert)
- clientType = V8ConsoleAPIType::kError;
- else if (type == ConsoleAPIType::kWarning)
- clientType = V8ConsoleAPIType::kWarning;
- else if (type == ConsoleAPIType::kInfo)
- clientType = V8ConsoleAPIType::kInfo;
- else if (type == ConsoleAPIType::kClear)
- clientType = V8ConsoleAPIType::kClear;
- inspector->client()->consoleAPIMessage(
- contextGroupId, clientType, toStringView(message->m_message),
- toStringView(message->m_url), message->m_lineNumber,
- message->m_columnNumber, message->m_stackTrace.get());
+ type == ConsoleAPIType::kTimeEnd) {
+ clientLevel = v8::Isolate::kMessageDebug;
+ } else if (type == ConsoleAPIType::kError ||
+ type == ConsoleAPIType::kAssert) {
+ clientLevel = v8::Isolate::kMessageError;
+ } else if (type == ConsoleAPIType::kWarning) {
+ clientLevel = v8::Isolate::kMessageWarning;
+ } else if (type == ConsoleAPIType::kInfo || type == ConsoleAPIType::kLog) {
+ clientLevel = v8::Isolate::kMessageInfo;
+ }
+
+ if (type != ConsoleAPIType::kClear) {
+ inspector->client()->consoleAPIMessage(
+ contextGroupId, clientLevel, toStringView(message->m_message),
+ toStringView(message->m_url), message->m_lineNumber,
+ message->m_columnNumber, message->m_stackTrace.get());
+ }
return message;
}
diff --git a/deps/v8/src/inspector/v8-console.cc b/deps/v8/src/inspector/v8-console.cc
index 3b47d2f6b4..cfe7fc1532 100644
--- a/deps/v8/src/inspector/v8-console.cc
+++ b/deps/v8/src/inspector/v8-console.cc
@@ -336,8 +336,14 @@ void V8Console::groupEndCallback(
}
void V8Console::clearCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
- ConsoleHelper(info).reportCallWithDefaultArgument(ConsoleAPIType::kClear,
- String16("console.clear"));
+ ConsoleHelper helper(info);
+ InspectedContext* context = helper.ensureInspectedContext();
+ if (!context) return;
+ int contextGroupId = context->contextGroupId();
+ if (V8InspectorClient* client = helper.ensureDebuggerClient())
+ client->consoleClear(contextGroupId);
+ helper.reportCallWithDefaultArgument(ConsoleAPIType::kClear,
+ String16("console.clear"));
}
void V8Console::countCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
@@ -360,8 +366,10 @@ void V8Console::countCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
if (!helper.privateMap("V8Console#countMap").ToLocal(&countMap)) return;
int32_t count = helper.getIntFromMap(countMap, identifier, 0) + 1;
helper.setIntOnMap(countMap, identifier, count);
- helper.reportCallWithArgument(ConsoleAPIType::kCount,
- title + ": " + String16::fromInteger(count));
+ String16 countString = String16::fromInteger(count);
+ helper.reportCallWithArgument(
+ ConsoleAPIType::kCount,
+ title.isEmpty() ? countString : (title + ": " + countString));
}
void V8Console::assertCallback(
@@ -431,7 +439,7 @@ static void timeEndFunction(const v8::FunctionCallbackInfo<v8::Value>& info,
double elapsed = client->currentTimeMS() -
helper.getDoubleFromMap(timeMap, protocolTitle, 0.0);
String16 message =
- protocolTitle + ": " + String16::fromDouble(elapsed, 3) + "ms";
+ protocolTitle + ": " + String16::fromDouble(elapsed) + "ms";
helper.reportCallWithArgument(ConsoleAPIType::kTimeEnd, message);
}
}
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.cc b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
index b287d1c082..7de46a1787 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
@@ -54,7 +54,6 @@ static const char skipAllPauses[] = "skipAllPauses";
} // namespace DebuggerAgentState
-static const int kMaxSkipStepFrameCount = 128;
static const char kBacktraceObjectGroup[] = "backtrace";
static const char kDebuggerNotEnabled[] = "Debugger agent is not enabled";
static const char kDebuggerNotPaused[] =
@@ -132,24 +131,14 @@ V8DebuggerAgentImpl::V8DebuggerAgentImpl(
m_state(state),
m_frontend(frontendChannel),
m_isolate(m_inspector->isolate()),
- m_breakReason(protocol::Debugger::Paused::ReasonEnum::Other),
m_scheduledDebuggerStep(NoStep),
- m_skipNextDebuggerStepOut(false),
m_javaScriptPauseScheduled(false),
- m_steppingFromFramework(false),
- m_pausingOnNativeEvent(false),
- m_skippedStepFrameCount(0),
- m_recursionLevelForStepOut(0),
- m_recursionLevelForStepFrame(0),
- m_skipAllPauses(false) {
- clearBreakDetails();
+ m_recursionLevelForStepOut(0) {
}
V8DebuggerAgentImpl::~V8DebuggerAgentImpl() {}
void V8DebuggerAgentImpl::enableImpl() {
- // m_inspector->addListener may result in reporting all parsed scripts to
- // the agent so it should already be in enabled state by then.
m_enabled = true;
m_state->setBoolean(DebuggerAgentState::debuggerEnabled, true);
m_debugger->enable();
@@ -185,26 +174,22 @@ Response V8DebuggerAgentImpl::disable() {
v8::debug::NoBreakOnException);
m_state->setInteger(DebuggerAgentState::asyncCallStackDepth, 0);
- if (!m_pausedContext.IsEmpty()) m_debugger->continueProgram();
+ if (isPaused()) m_debugger->continueProgram();
m_debugger->disable();
- m_pausedContext.Reset();
JavaScriptCallFrames emptyCallFrames;
m_pausedCallFrames.swap(emptyCallFrames);
- m_scripts.clear();
m_blackboxedPositions.clear();
+ m_blackboxPattern.reset();
+ resetBlackboxedStateCache();
+ m_scripts.clear();
m_breakpointIdToDebuggerBreakpointIds.clear();
m_debugger->setAsyncCallStackDepth(this, 0);
m_continueToLocationBreakpointId = String16();
clearBreakDetails();
m_scheduledDebuggerStep = NoStep;
- m_skipNextDebuggerStepOut = false;
m_javaScriptPauseScheduled = false;
- m_steppingFromFramework = false;
- m_pausingOnNativeEvent = false;
- m_skippedStepFrameCount = 0;
- m_recursionLevelForStepFrame = 0;
m_skipAllPauses = false;
- m_blackboxPattern = nullptr;
+ m_state->setBoolean(DebuggerAgentState::skipAllPauses, false);
m_state->remove(DebuggerAgentState::blackboxPattern);
m_enabled = false;
m_state->setBoolean(DebuggerAgentState::debuggerEnabled, false);
@@ -246,8 +231,8 @@ Response V8DebuggerAgentImpl::setBreakpointsActive(bool active) {
}
Response V8DebuggerAgentImpl::setSkipAllPauses(bool skip) {
+ m_state->setBoolean(DebuggerAgentState::skipAllPauses, skip);
m_skipAllPauses = skip;
- m_state->setBoolean(DebuggerAgentState::skipAllPauses, m_skipAllPauses);
return Response::OK();
}
@@ -434,28 +419,10 @@ Response V8DebuggerAgentImpl::continueToLocation(
return resume();
}
-bool V8DebuggerAgentImpl::isCurrentCallStackEmptyOrBlackboxed() {
- DCHECK(enabled());
- JavaScriptCallFrames callFrames = m_debugger->currentCallFrames();
- for (size_t index = 0; index < callFrames.size(); ++index) {
- if (!isCallFrameWithUnknownScriptOrBlackboxed(callFrames[index].get()))
- return false;
- }
- return true;
-}
-
-bool V8DebuggerAgentImpl::isTopPausedCallFrameBlackboxed() {
- DCHECK(enabled());
- JavaScriptCallFrame* frame =
- m_pausedCallFrames.size() ? m_pausedCallFrames[0].get() : nullptr;
- return isCallFrameWithUnknownScriptOrBlackboxed(frame);
-}
-
-bool V8DebuggerAgentImpl::isCallFrameWithUnknownScriptOrBlackboxed(
- JavaScriptCallFrame* frame) {
- if (!frame) return true;
- ScriptsMap::iterator it =
- m_scripts.find(String16::fromInteger(frame->sourceID()));
+bool V8DebuggerAgentImpl::isFunctionBlackboxed(const String16& scriptId,
+ const v8::debug::Location& start,
+ const v8::debug::Location& end) {
+ ScriptsMap::iterator it = m_scripts.find(scriptId);
if (it == m_scripts.end()) {
// Unknown scripts are blackboxed.
return true;
@@ -466,48 +433,24 @@ bool V8DebuggerAgentImpl::isCallFrameWithUnknownScriptOrBlackboxed(
m_blackboxPattern->match(scriptSourceURL) != -1)
return true;
}
- auto itBlackboxedPositions =
- m_blackboxedPositions.find(String16::fromInteger(frame->sourceID()));
+ auto itBlackboxedPositions = m_blackboxedPositions.find(scriptId);
if (itBlackboxedPositions == m_blackboxedPositions.end()) return false;
const std::vector<std::pair<int, int>>& ranges =
itBlackboxedPositions->second;
- auto itRange = std::lower_bound(
+ auto itStartRange = std::lower_bound(
ranges.begin(), ranges.end(),
- std::make_pair(frame->line(), frame->column()), positionComparator);
+ std::make_pair(start.GetLineNumber(), start.GetColumnNumber()),
+ positionComparator);
+ auto itEndRange = std::lower_bound(
+ itStartRange, ranges.end(),
+ std::make_pair(end.GetLineNumber(), end.GetColumnNumber()),
+ positionComparator);
// Ranges array contains positions in script where blackbox state is changed.
// [(0,0) ... ranges[0]) isn't blackboxed, [ranges[0] ... ranges[1]) is
// blackboxed...
- return std::distance(ranges.begin(), itRange) % 2;
-}
-
-V8DebuggerAgentImpl::SkipPauseRequest
-V8DebuggerAgentImpl::shouldSkipExceptionPause(
- JavaScriptCallFrame* topCallFrame) {
- if (m_steppingFromFramework) return RequestNoSkip;
- if (isCallFrameWithUnknownScriptOrBlackboxed(topCallFrame))
- return RequestContinue;
- return RequestNoSkip;
-}
-
-V8DebuggerAgentImpl::SkipPauseRequest V8DebuggerAgentImpl::shouldSkipStepPause(
- JavaScriptCallFrame* topCallFrame) {
- if (m_steppingFromFramework) return RequestNoSkip;
-
- if (m_skipNextDebuggerStepOut) {
- m_skipNextDebuggerStepOut = false;
- if (m_scheduledDebuggerStep == StepOut) return RequestStepOut;
- }
-
- if (!isCallFrameWithUnknownScriptOrBlackboxed(topCallFrame))
- return RequestNoSkip;
-
- if (m_skippedStepFrameCount >= kMaxSkipStepFrameCount) return RequestStepOut;
-
- if (!m_skippedStepFrameCount) m_recursionLevelForStepFrame = 1;
-
- ++m_skippedStepFrameCount;
- return RequestStepFrame;
+ return itStartRange == itEndRange &&
+ std::distance(ranges.begin(), itStartRange) % 2;
}
std::unique_ptr<protocol::Debugger::Location>
@@ -525,6 +468,7 @@ V8DebuggerAgentImpl::resolveBreakpoint(const String16& breakpointId,
scriptIterator->second->endLine() < breakpoint.line_number)
return nullptr;
+ // Translate from protocol location to v8 location for the debugger.
ScriptBreakpoint translatedBreakpoint = breakpoint;
m_debugger->wasmTranslation()->TranslateProtocolLocationToWasmScriptLocation(
&translatedBreakpoint.script_id, &translatedBreakpoint.line_number,
@@ -536,6 +480,10 @@ V8DebuggerAgentImpl::resolveBreakpoint(const String16& breakpointId,
translatedBreakpoint, &actualLineNumber, &actualColumnNumber);
if (debuggerBreakpointId.isEmpty()) return nullptr;
+ // Translate back from v8 location to protocol location for the return value.
+ m_debugger->wasmTranslation()->TranslateWasmScriptLocationToProtocolLocation(
+ &translatedBreakpoint.script_id, &actualLineNumber, &actualColumnNumber);
+
m_serverBreakpoints[debuggerBreakpointId] =
std::make_pair(breakpointId, source);
CHECK(!breakpointId.isEmpty());
@@ -572,6 +520,15 @@ Response V8DebuggerAgentImpl::setScriptSource(
Maybe<protocol::Runtime::ExceptionDetails>* optOutCompileError) {
if (!enabled()) return Response::Error(kDebuggerNotEnabled);
+ ScriptsMap::iterator it = m_scripts.find(scriptId);
+ if (it == m_scripts.end()) {
+ return Response::Error("No script with given id found");
+ }
+ if (it->second->isModule()) {
+ // TODO(kozyatinskiy): LiveEdit should support ES6 module
+ return Response::Error("Editing module's script is not supported.");
+ }
+
v8::HandleScope handles(m_isolate);
v8::Local<v8::String> newSource = toV8String(m_isolate, newContent);
bool compileError = false;
@@ -580,9 +537,7 @@ Response V8DebuggerAgentImpl::setScriptSource(
&m_pausedCallFrames, stackChanged, &compileError);
if (!response.isSuccess() || compileError) return response;
- ScriptsMap::iterator it = m_scripts.find(scriptId);
- if (it != m_scripts.end()) it->second->setSource(newSource);
-
+ it->second->setSource(newSource);
std::unique_ptr<Array<CallFrame>> callFrames;
response = currentCallFrames(&callFrames);
if (!response.isSuccess()) return response;
@@ -595,7 +550,7 @@ Response V8DebuggerAgentImpl::restartFrame(
const String16& callFrameId,
std::unique_ptr<Array<CallFrame>>* newCallFrames,
Maybe<StackTrace>* asyncStackTrace) {
- if (m_pausedContext.IsEmpty()) return Response::Error(kDebuggerNotPaused);
+ if (!isPaused()) return Response::Error(kDebuggerNotPaused);
InjectedScript::CallFrameScope scope(m_inspector, m_session->contextGroupId(),
callFrameId);
Response response = scope.initialize();
@@ -632,89 +587,89 @@ Response V8DebuggerAgentImpl::getScriptSource(const String16& scriptId,
return Response::OK();
}
+void V8DebuggerAgentImpl::pushBreakDetails(
+ const String16& breakReason,
+ std::unique_ptr<protocol::DictionaryValue> breakAuxData) {
+ m_breakReason.push_back(std::make_pair(breakReason, std::move(breakAuxData)));
+}
+
+void V8DebuggerAgentImpl::popBreakDetails() {
+ if (m_breakReason.empty()) return;
+ m_breakReason.pop_back();
+}
+
+void V8DebuggerAgentImpl::clearBreakDetails() {
+ std::vector<BreakReason> emptyBreakReason;
+ m_breakReason.swap(emptyBreakReason);
+}
+
void V8DebuggerAgentImpl::schedulePauseOnNextStatement(
const String16& breakReason,
std::unique_ptr<protocol::DictionaryValue> data) {
if (!enabled() || m_scheduledDebuggerStep == StepInto ||
- m_javaScriptPauseScheduled || m_debugger->isPaused() ||
+ m_javaScriptPauseScheduled || isPaused() ||
!m_debugger->breakpointsActivated())
return;
- m_breakReason = breakReason;
- m_breakAuxData = std::move(data);
- m_pausingOnNativeEvent = true;
- m_skipNextDebuggerStepOut = false;
- m_debugger->setPauseOnNextStatement(true);
+ if (m_breakReason.empty()) m_debugger->setPauseOnNextStatement(true);
+ pushBreakDetails(breakReason, std::move(data));
}
void V8DebuggerAgentImpl::schedulePauseOnNextStatementIfSteppingInto() {
DCHECK(enabled());
if (m_scheduledDebuggerStep != StepInto || m_javaScriptPauseScheduled ||
- m_debugger->isPaused())
+ isPaused())
return;
- clearBreakDetails();
- m_pausingOnNativeEvent = false;
- m_skippedStepFrameCount = 0;
- m_recursionLevelForStepFrame = 0;
m_debugger->setPauseOnNextStatement(true);
}
void V8DebuggerAgentImpl::cancelPauseOnNextStatement() {
- if (m_javaScriptPauseScheduled || m_debugger->isPaused()) return;
- clearBreakDetails();
- m_pausingOnNativeEvent = false;
- m_debugger->setPauseOnNextStatement(false);
+ if (m_javaScriptPauseScheduled || isPaused()) return;
+ popBreakDetails();
+ if (m_breakReason.empty()) m_debugger->setPauseOnNextStatement(false);
}
Response V8DebuggerAgentImpl::pause() {
if (!enabled()) return Response::Error(kDebuggerNotEnabled);
- if (m_javaScriptPauseScheduled || m_debugger->isPaused())
- return Response::OK();
+ if (m_javaScriptPauseScheduled || isPaused()) return Response::OK();
clearBreakDetails();
m_javaScriptPauseScheduled = true;
m_scheduledDebuggerStep = NoStep;
- m_skippedStepFrameCount = 0;
- m_steppingFromFramework = false;
m_debugger->setPauseOnNextStatement(true);
return Response::OK();
}
Response V8DebuggerAgentImpl::resume() {
- if (m_pausedContext.IsEmpty()) return Response::Error(kDebuggerNotPaused);
+ if (!isPaused()) return Response::Error(kDebuggerNotPaused);
m_scheduledDebuggerStep = NoStep;
- m_steppingFromFramework = false;
m_session->releaseObjectGroup(kBacktraceObjectGroup);
m_debugger->continueProgram();
return Response::OK();
}
Response V8DebuggerAgentImpl::stepOver() {
- if (m_pausedContext.IsEmpty()) return Response::Error(kDebuggerNotPaused);
+ if (!isPaused()) return Response::Error(kDebuggerNotPaused);
// StepOver at function return point should fallback to StepInto.
JavaScriptCallFrame* frame =
!m_pausedCallFrames.empty() ? m_pausedCallFrames[0].get() : nullptr;
if (frame && frame->isAtReturn()) return stepInto();
m_scheduledDebuggerStep = StepOver;
- m_steppingFromFramework = isTopPausedCallFrameBlackboxed();
m_session->releaseObjectGroup(kBacktraceObjectGroup);
m_debugger->stepOverStatement();
return Response::OK();
}
Response V8DebuggerAgentImpl::stepInto() {
- if (m_pausedContext.IsEmpty()) return Response::Error(kDebuggerNotPaused);
+ if (!isPaused()) return Response::Error(kDebuggerNotPaused);
m_scheduledDebuggerStep = StepInto;
- m_steppingFromFramework = isTopPausedCallFrameBlackboxed();
m_session->releaseObjectGroup(kBacktraceObjectGroup);
m_debugger->stepIntoStatement();
return Response::OK();
}
Response V8DebuggerAgentImpl::stepOut() {
- if (m_pausedContext.IsEmpty()) return Response::Error(kDebuggerNotPaused);
+ if (!isPaused()) return Response::Error(kDebuggerNotPaused);
m_scheduledDebuggerStep = StepOut;
- m_skipNextDebuggerStepOut = false;
m_recursionLevelForStepOut = 1;
- m_steppingFromFramework = isTopPausedCallFrameBlackboxed();
m_session->releaseObjectGroup(kBacktraceObjectGroup);
m_debugger->stepOutOfFunction();
return Response::OK();
@@ -748,9 +703,9 @@ Response V8DebuggerAgentImpl::evaluateOnCallFrame(
const String16& callFrameId, const String16& expression,
Maybe<String16> objectGroup, Maybe<bool> includeCommandLineAPI,
Maybe<bool> silent, Maybe<bool> returnByValue, Maybe<bool> generatePreview,
- std::unique_ptr<RemoteObject>* result,
+ Maybe<bool> throwOnSideEffect, std::unique_ptr<RemoteObject>* result,
Maybe<protocol::Runtime::ExceptionDetails>* exceptionDetails) {
- if (m_pausedContext.IsEmpty()) return Response::Error(kDebuggerNotPaused);
+ if (!isPaused()) return Response::Error(kDebuggerNotPaused);
InjectedScript::CallFrameScope scope(m_inspector, m_session->contextGroupId(),
callFrameId);
Response response = scope.initialize();
@@ -763,7 +718,8 @@ Response V8DebuggerAgentImpl::evaluateOnCallFrame(
v8::MaybeLocal<v8::Value> maybeResultValue =
m_pausedCallFrames[scope.frameOrdinal()]->evaluate(
- toV8String(m_isolate, expression));
+ toV8String(m_isolate, expression),
+ throwOnSideEffect.fromMaybe(false));
// Re-initialize after running client's code, as it could have destroyed
// context or session.
@@ -780,7 +736,7 @@ Response V8DebuggerAgentImpl::setVariableValue(
std::unique_ptr<protocol::Runtime::CallArgument> newValueArgument,
const String16& callFrameId) {
if (!enabled()) return Response::Error(kDebuggerNotEnabled);
- if (m_pausedContext.IsEmpty()) return Response::Error(kDebuggerNotPaused);
+ if (!isPaused()) return Response::Error(kDebuggerNotPaused);
InjectedScript::CallFrameScope scope(m_inspector, m_session->contextGroupId(),
callFrameId);
Response response = scope.initialize();
@@ -811,6 +767,7 @@ Response V8DebuggerAgentImpl::setBlackboxPatterns(
std::unique_ptr<protocol::Array<String16>> patterns) {
if (!patterns->length()) {
m_blackboxPattern = nullptr;
+ resetBlackboxedStateCache();
m_state->remove(DebuggerAgentState::blackboxPattern);
return Response::OK();
}
@@ -826,6 +783,7 @@ Response V8DebuggerAgentImpl::setBlackboxPatterns(
String16 pattern = patternBuilder.toString();
Response response = setBlackboxPattern(pattern);
if (!response.isSuccess()) return response;
+ resetBlackboxedStateCache();
m_state->setString(DebuggerAgentState::blackboxPattern, pattern);
return Response::OK();
}
@@ -839,15 +797,23 @@ Response V8DebuggerAgentImpl::setBlackboxPattern(const String16& pattern) {
return Response::OK();
}
+void V8DebuggerAgentImpl::resetBlackboxedStateCache() {
+ for (const auto& it : m_scripts) {
+ it.second->resetBlackboxedStateCache();
+ }
+}
+
Response V8DebuggerAgentImpl::setBlackboxedRanges(
const String16& scriptId,
std::unique_ptr<protocol::Array<protocol::Debugger::ScriptPosition>>
inPositions) {
- if (m_scripts.find(scriptId) == m_scripts.end())
+ auto it = m_scripts.find(scriptId);
+ if (it == m_scripts.end())
return Response::Error("No script with passed id.");
if (!inPositions->length()) {
m_blackboxedPositions.erase(scriptId);
+ it->second->resetBlackboxedStateCache();
return Response::OK();
}
@@ -873,12 +839,12 @@ Response V8DebuggerAgentImpl::setBlackboxedRanges(
}
m_blackboxedPositions[scriptId] = positions;
+ it->second->resetBlackboxedStateCache();
return Response::OK();
}
void V8DebuggerAgentImpl::willExecuteScript(int scriptId) {
changeJavaScriptRecursionLevel(+1);
- // Fast return.
if (m_scheduledDebuggerStep != StepInto) return;
schedulePauseOnNextStatementIfSteppingInto();
}
@@ -888,8 +854,7 @@ void V8DebuggerAgentImpl::didExecuteScript() {
}
void V8DebuggerAgentImpl::changeJavaScriptRecursionLevel(int step) {
- if (m_javaScriptPauseScheduled && !m_skipAllPauses &&
- !m_debugger->isPaused()) {
+ if (m_javaScriptPauseScheduled && !m_skipAllPauses && !isPaused()) {
// Do not ever loose user's pause request until we have actually paused.
m_debugger->setPauseOnNextStatement(true);
}
@@ -901,34 +866,13 @@ void V8DebuggerAgentImpl::changeJavaScriptRecursionLevel(int step) {
// switch stepping to step into a next JS task, as if we exited to a
// blackboxed framework.
m_scheduledDebuggerStep = StepInto;
- m_skipNextDebuggerStepOut = false;
- }
- }
- if (m_recursionLevelForStepFrame) {
- m_recursionLevelForStepFrame += step;
- if (!m_recursionLevelForStepFrame) {
- // We have walked through a blackboxed framework and got back to where we
- // started.
- // If there was no stepping scheduled, we should cancel the stepping
- // explicitly,
- // since there may be a scheduled StepFrame left.
- // Otherwise, if we were stepping in/over, the StepFrame will stop at the
- // right location,
- // whereas if we were stepping out, we should continue doing so after
- // debugger pauses
- // from the old StepFrame.
- m_skippedStepFrameCount = 0;
- if (m_scheduledDebuggerStep == NoStep)
- m_debugger->clearStepping();
- else if (m_scheduledDebuggerStep == StepOut)
- m_skipNextDebuggerStepOut = true;
}
}
}
Response V8DebuggerAgentImpl::currentCallFrames(
std::unique_ptr<Array<CallFrame>>* result) {
- if (m_pausedContext.IsEmpty() || !m_pausedCallFrames.size()) {
+ if (!isPaused()) {
*result = Array<CallFrame>::create();
return Response::OK();
}
@@ -1037,12 +981,14 @@ Response V8DebuggerAgentImpl::currentCallFrames(
}
std::unique_ptr<StackTrace> V8DebuggerAgentImpl::currentAsyncStackTrace() {
- if (m_pausedContext.IsEmpty()) return nullptr;
+ if (!isPaused()) return nullptr;
V8StackTraceImpl* stackTrace = m_debugger->currentAsyncCallChain();
return stackTrace ? stackTrace->buildInspectorObjectForTail(m_debugger)
: nullptr;
}
+bool V8DebuggerAgentImpl::isPaused() const { return m_debugger->isPaused(); }
+
void V8DebuggerAgentImpl::didParseSource(
std::unique_ptr<V8DebuggerScript> script, bool success) {
v8::HandleScope handles(m_isolate);
@@ -1064,6 +1010,7 @@ void V8DebuggerAgentImpl::didParseSource(
}
bool isLiveEdit = script->isLiveEdit();
bool hasSourceURL = script->hasSourceURL();
+ bool isModule = script->isModule();
String16 scriptId = script->scriptId();
String16 scriptURL = script->sourceURL();
@@ -1072,24 +1019,31 @@ void V8DebuggerAgentImpl::didParseSource(
ScriptsMap::iterator scriptIterator = m_scripts.find(scriptId);
DCHECK(scriptIterator != m_scripts.end());
V8DebuggerScript* scriptRef = scriptIterator->second.get();
+ // V8 could create functions for parsed scripts before reporting and asks
+ // inspector about blackboxed state, we should reset state each time when we
+ // make any change that change isFunctionBlackboxed output - adding parsed
+ // script is changing.
+ scriptRef->resetBlackboxedStateCache();
Maybe<String16> sourceMapURLParam = scriptRef->sourceMappingURL();
Maybe<protocol::DictionaryValue> executionContextAuxDataParam(
std::move(executionContextAuxData));
const bool* isLiveEditParam = isLiveEdit ? &isLiveEdit : nullptr;
const bool* hasSourceURLParam = hasSourceURL ? &hasSourceURL : nullptr;
+ const bool* isModuleParam = isModule ? &isModule : nullptr;
if (success)
m_frontend.scriptParsed(
scriptId, scriptURL, scriptRef->startLine(), scriptRef->startColumn(),
scriptRef->endLine(), scriptRef->endColumn(), contextId,
scriptRef->hash(m_isolate), std::move(executionContextAuxDataParam),
- isLiveEditParam, std::move(sourceMapURLParam), hasSourceURLParam);
+ isLiveEditParam, std::move(sourceMapURLParam), hasSourceURLParam,
+ isModuleParam);
else
m_frontend.scriptFailedToParse(
scriptId, scriptURL, scriptRef->startLine(), scriptRef->startColumn(),
scriptRef->endLine(), scriptRef->endColumn(), contextId,
scriptRef->hash(m_isolate), std::move(executionContextAuxDataParam),
- std::move(sourceMapURLParam), hasSourceURLParam);
+ std::move(sourceMapURLParam), hasSourceURLParam, isModuleParam);
if (scriptURL.isEmpty() || !success) return;
@@ -1121,63 +1075,46 @@ void V8DebuggerAgentImpl::didParseSource(
}
}
-V8DebuggerAgentImpl::SkipPauseRequest V8DebuggerAgentImpl::didPause(
- v8::Local<v8::Context> context, v8::Local<v8::Value> exception,
- const std::vector<String16>& hitBreakpoints, bool isPromiseRejection,
- bool isUncaught) {
- JavaScriptCallFrames callFrames = m_debugger->currentCallFrames(1);
- JavaScriptCallFrame* topCallFrame =
- !callFrames.empty() ? callFrames.begin()->get() : nullptr;
-
- V8DebuggerAgentImpl::SkipPauseRequest result;
- if (m_skipAllPauses)
- result = RequestContinue;
- else if (!hitBreakpoints.empty())
- result = RequestNoSkip; // Don't skip explicit breakpoints even if set in
- // frameworks.
- else if (!exception.IsEmpty())
- result = shouldSkipExceptionPause(topCallFrame);
- else if (m_scheduledDebuggerStep != NoStep || m_javaScriptPauseScheduled ||
- m_pausingOnNativeEvent)
- result = shouldSkipStepPause(topCallFrame);
- else
- result = RequestNoSkip;
-
- m_skipNextDebuggerStepOut = false;
- if (result != RequestNoSkip) return result;
- // Skip pauses inside V8 internal scripts and on syntax errors.
- if (!topCallFrame) return RequestContinue;
-
- DCHECK(m_pausedContext.IsEmpty());
+void V8DebuggerAgentImpl::didPause(int contextId,
+ v8::Local<v8::Value> exception,
+ const std::vector<String16>& hitBreakpoints,
+ bool isPromiseRejection, bool isUncaught,
+ bool isOOMBreak) {
JavaScriptCallFrames frames = m_debugger->currentCallFrames();
m_pausedCallFrames.swap(frames);
- m_pausedContext.Reset(m_isolate, context);
v8::HandleScope handles(m_isolate);
- if (!exception.IsEmpty()) {
+ std::vector<BreakReason> hitReasons;
+
+ if (isOOMBreak) {
+ hitReasons.push_back(
+ std::make_pair(protocol::Debugger::Paused::ReasonEnum::OOM, nullptr));
+ } else if (!exception.IsEmpty()) {
InjectedScript* injectedScript = nullptr;
- m_session->findInjectedScript(InspectedContext::contextId(context),
- injectedScript);
+ m_session->findInjectedScript(contextId, injectedScript);
if (injectedScript) {
- m_breakReason =
+ String16 breakReason =
isPromiseRejection
? protocol::Debugger::Paused::ReasonEnum::PromiseRejection
: protocol::Debugger::Paused::ReasonEnum::Exception;
std::unique_ptr<protocol::Runtime::RemoteObject> obj;
injectedScript->wrapObject(exception, kBacktraceObjectGroup, false, false,
&obj);
+ std::unique_ptr<protocol::DictionaryValue> breakAuxData;
if (obj) {
- m_breakAuxData = obj->toValue();
- m_breakAuxData->setBoolean("uncaught", isUncaught);
+ breakAuxData = obj->toValue();
+ breakAuxData->setBoolean("uncaught", isUncaught);
} else {
- m_breakAuxData = nullptr;
+ breakAuxData = nullptr;
}
- // m_breakAuxData might be null after this.
+ hitReasons.push_back(
+ std::make_pair(breakReason, std::move(breakAuxData)));
}
}
std::unique_ptr<Array<String16>> hitBreakpointIds = Array<String16>::create();
+ bool hasDebugCommandBreakpointReason = false;
for (const auto& point : hitBreakpoints) {
DebugServerBreakpointToBreakpointIdAndSourceMap::iterator
breakpointIterator = m_serverBreakpoints.find(point);
@@ -1186,34 +1123,57 @@ V8DebuggerAgentImpl::SkipPauseRequest V8DebuggerAgentImpl::didPause(
hitBreakpointIds->addItem(localId);
BreakpointSource source = breakpointIterator->second.second;
- if (m_breakReason == protocol::Debugger::Paused::ReasonEnum::Other &&
- source == DebugCommandBreakpointSource)
- m_breakReason = protocol::Debugger::Paused::ReasonEnum::DebugCommand;
+ if (!hasDebugCommandBreakpointReason &&
+ source == DebugCommandBreakpointSource) {
+ hasDebugCommandBreakpointReason = true;
+ hitReasons.push_back(std::make_pair(
+ protocol::Debugger::Paused::ReasonEnum::DebugCommand, nullptr));
+ }
}
}
+ for (size_t i = 0; i < m_breakReason.size(); ++i) {
+ hitReasons.push_back(std::move(m_breakReason[i]));
+ }
+ clearBreakDetails();
+
+ String16 breakReason = protocol::Debugger::Paused::ReasonEnum::Other;
+ std::unique_ptr<protocol::DictionaryValue> breakAuxData;
+ if (hitReasons.size() == 1) {
+ breakReason = hitReasons[0].first;
+ breakAuxData = std::move(hitReasons[0].second);
+ } else if (hitReasons.size() > 1) {
+ breakReason = protocol::Debugger::Paused::ReasonEnum::Ambiguous;
+ std::unique_ptr<protocol::ListValue> reasons =
+ protocol::ListValue::create();
+ for (size_t i = 0; i < hitReasons.size(); ++i) {
+ std::unique_ptr<protocol::DictionaryValue> reason =
+ protocol::DictionaryValue::create();
+ reason->setString("reason", hitReasons[i].first);
+ if (hitReasons[i].second)
+ reason->setObject("auxData", std::move(hitReasons[i].second));
+ reasons->pushValue(std::move(reason));
+ }
+ breakAuxData = protocol::DictionaryValue::create();
+ breakAuxData->setArray("reasons", std::move(reasons));
+ }
+
std::unique_ptr<Array<CallFrame>> protocolCallFrames;
Response response = currentCallFrames(&protocolCallFrames);
if (!response.isSuccess()) protocolCallFrames = Array<CallFrame>::create();
- m_frontend.paused(std::move(protocolCallFrames), m_breakReason,
- std::move(m_breakAuxData), std::move(hitBreakpointIds),
+ m_frontend.paused(std::move(protocolCallFrames), breakReason,
+ std::move(breakAuxData), std::move(hitBreakpointIds),
currentAsyncStackTrace());
m_scheduledDebuggerStep = NoStep;
m_javaScriptPauseScheduled = false;
- m_steppingFromFramework = false;
- m_pausingOnNativeEvent = false;
- m_skippedStepFrameCount = 0;
- m_recursionLevelForStepFrame = 0;
if (!m_continueToLocationBreakpointId.isEmpty()) {
m_debugger->removeBreakpoint(m_continueToLocationBreakpointId);
m_continueToLocationBreakpointId = "";
}
- return result;
}
void V8DebuggerAgentImpl::didContinue() {
- m_pausedContext.Reset();
JavaScriptCallFrames emptyCallFrames;
m_pausedCallFrames.swap(emptyCallFrames);
clearBreakDetails();
@@ -1223,16 +1183,14 @@ void V8DebuggerAgentImpl::didContinue() {
void V8DebuggerAgentImpl::breakProgram(
const String16& breakReason,
std::unique_ptr<protocol::DictionaryValue> data) {
- if (!enabled() || m_skipAllPauses || !m_pausedContext.IsEmpty() ||
- isCurrentCallStackEmptyOrBlackboxed() ||
- !m_debugger->breakpointsActivated())
- return;
- m_breakReason = breakReason;
- m_breakAuxData = std::move(data);
+ if (!enabled() || !m_debugger->canBreakProgram() || m_skipAllPauses) return;
+ std::vector<BreakReason> currentScheduledReason;
+ currentScheduledReason.swap(m_breakReason);
+ pushBreakDetails(breakReason, std::move(data));
m_scheduledDebuggerStep = NoStep;
- m_steppingFromFramework = false;
- m_pausingOnNativeEvent = false;
m_debugger->breakProgram();
+ popBreakDetails();
+ m_breakReason.swap(currentScheduledReason);
}
void V8DebuggerAgentImpl::breakProgramOnException(
@@ -1244,11 +1202,6 @@ void V8DebuggerAgentImpl::breakProgramOnException(
breakProgram(breakReason, std::move(data));
}
-void V8DebuggerAgentImpl::clearBreakDetails() {
- m_breakReason = protocol::Debugger::Paused::ReasonEnum::Other;
- m_breakAuxData = nullptr;
-}
-
void V8DebuggerAgentImpl::setBreakpointAt(const String16& scriptId,
int lineNumber, int columnNumber,
BreakpointSource source,
@@ -1269,8 +1222,9 @@ void V8DebuggerAgentImpl::removeBreakpointAt(const String16& scriptId,
void V8DebuggerAgentImpl::reset() {
if (!enabled()) return;
m_scheduledDebuggerStep = NoStep;
- m_scripts.clear();
m_blackboxedPositions.clear();
+ resetBlackboxedStateCache();
+ m_scripts.clear();
m_breakpointIdToDebuggerBreakpointIds.clear();
}
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.h b/deps/v8/src/inspector/v8-debugger-agent-impl.h
index 4e8e336545..41a18a8d36 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.h
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.h
@@ -8,6 +8,7 @@
#include <vector>
#include "src/base/macros.h"
+#include "src/debug/interface-types.h"
#include "src/inspector/java-script-call-frame.h"
#include "src/inspector/protocol/Debugger.h"
#include "src/inspector/protocol/Forward.h"
@@ -29,14 +30,6 @@ using protocol::Response;
class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
public:
- enum SkipPauseRequest {
- RequestNoSkip,
- RequestContinue,
- RequestStepInto,
- RequestStepOut,
- RequestStepFrame
- };
-
enum BreakpointSource {
UserBreakpointSource,
DebugCommandBreakpointSource,
@@ -100,7 +93,7 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
const String16& callFrameId, const String16& expression,
Maybe<String16> objectGroup, Maybe<bool> includeCommandLineAPI,
Maybe<bool> silent, Maybe<bool> returnByValue,
- Maybe<bool> generatePreview,
+ Maybe<bool> generatePreview, Maybe<bool> throwOnSideEffect,
std::unique_ptr<protocol::Runtime::RemoteObject>* result,
Maybe<protocol::Runtime::ExceptionDetails>*) override;
Response setVariableValue(
@@ -134,23 +127,25 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
void reset();
// Interface for V8InspectorImpl
- SkipPauseRequest didPause(v8::Local<v8::Context>,
- v8::Local<v8::Value> exception,
- const std::vector<String16>& hitBreakpoints,
- bool isPromiseRejection, bool isUncaught);
+ void didPause(int contextId, v8::Local<v8::Value> exception,
+ const std::vector<String16>& hitBreakpoints,
+ bool isPromiseRejection, bool isUncaught, bool isOOMBreak);
void didContinue();
void didParseSource(std::unique_ptr<V8DebuggerScript>, bool success);
void willExecuteScript(int scriptId);
void didExecuteScript();
+ bool isFunctionBlackboxed(const String16& scriptId,
+ const v8::debug::Location& start,
+ const v8::debug::Location& end);
+
+ bool skipAllPauses() const { return m_skipAllPauses; }
+
v8::Isolate* isolate() { return m_isolate; }
private:
void enableImpl();
- SkipPauseRequest shouldSkipExceptionPause(JavaScriptCallFrame* topCallFrame);
- SkipPauseRequest shouldSkipStepPause(JavaScriptCallFrame* topCallFrame);
-
void schedulePauseOnNextStatementIfSteppingInto();
Response currentCallFrames(
@@ -166,14 +161,13 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
void removeBreakpointImpl(const String16& breakpointId);
void clearBreakDetails();
- bool isCurrentCallStackEmptyOrBlackboxed();
- bool isTopPausedCallFrameBlackboxed();
- bool isCallFrameWithUnknownScriptOrBlackboxed(JavaScriptCallFrame*);
-
void internalSetAsyncCallStackDepth(int);
void increaseCachedSkipStackGeneration();
Response setBlackboxPattern(const String16& pattern);
+ void resetBlackboxedStateCache();
+
+ bool isPaused() const;
using ScriptsMap =
protocol::HashMap<String16, std::unique_ptr<V8DebuggerScript>>;
@@ -192,24 +186,26 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
protocol::DictionaryValue* m_state;
protocol::Debugger::Frontend m_frontend;
v8::Isolate* m_isolate;
- v8::Global<v8::Context> m_pausedContext;
JavaScriptCallFrames m_pausedCallFrames;
ScriptsMap m_scripts;
BreakpointIdToDebuggerBreakpointIdsMap m_breakpointIdToDebuggerBreakpointIds;
DebugServerBreakpointToBreakpointIdAndSourceMap m_serverBreakpoints;
String16 m_continueToLocationBreakpointId;
- String16 m_breakReason;
- std::unique_ptr<protocol::DictionaryValue> m_breakAuxData;
+
+ using BreakReason =
+ std::pair<String16, std::unique_ptr<protocol::DictionaryValue>>;
+ std::vector<BreakReason> m_breakReason;
+
+ void pushBreakDetails(
+ const String16& breakReason,
+ std::unique_ptr<protocol::DictionaryValue> breakAuxData);
+ void popBreakDetails();
+
DebuggerStep m_scheduledDebuggerStep;
- bool m_skipNextDebuggerStepOut;
bool m_javaScriptPauseScheduled;
- bool m_steppingFromFramework;
- bool m_pausingOnNativeEvent;
- int m_skippedStepFrameCount;
int m_recursionLevelForStepOut;
- int m_recursionLevelForStepFrame;
- bool m_skipAllPauses;
+ bool m_skipAllPauses = false;
std::unique_ptr<V8Regex> m_blackboxPattern;
protocol::HashMap<String16, std::vector<std::pair<int, int>>>
diff --git a/deps/v8/src/inspector/v8-debugger-script.cc b/deps/v8/src/inspector/v8-debugger-script.cc
index d6d15e5ae6..200cdc71a2 100644
--- a/deps/v8/src/inspector/v8-debugger-script.cc
+++ b/deps/v8/src/inspector/v8-debugger-script.cc
@@ -6,6 +6,7 @@
#include "src/inspector/inspected-context.h"
#include "src/inspector/string-util.h"
+#include "src/inspector/wasm-translation.h"
namespace v8_inspector {
@@ -69,6 +70,32 @@ String16 calculateHash(const String16& str) {
return hash.toString();
}
+void TranslateProtocolLocationToV8Location(WasmTranslation* wasmTranslation,
+ v8::debug::Location* loc,
+ const String16& scriptId,
+ const String16& expectedV8ScriptId) {
+ if (loc->IsEmpty()) return;
+ int lineNumber = loc->GetLineNumber();
+ int columnNumber = loc->GetColumnNumber();
+ String16 translatedScriptId = scriptId;
+ wasmTranslation->TranslateProtocolLocationToWasmScriptLocation(
+ &translatedScriptId, &lineNumber, &columnNumber);
+ DCHECK_EQ(expectedV8ScriptId.utf8(), translatedScriptId.utf8());
+ *loc = v8::debug::Location(lineNumber, columnNumber);
+}
+
+void TranslateV8LocationToProtocolLocation(
+ WasmTranslation* wasmTranslation, v8::debug::Location* loc,
+ const String16& scriptId, const String16& expectedProtocolScriptId) {
+ int lineNumber = loc->GetLineNumber();
+ int columnNumber = loc->GetColumnNumber();
+ String16 translatedScriptId = scriptId;
+ wasmTranslation->TranslateWasmScriptLocationToProtocolLocation(
+ &translatedScriptId, &lineNumber, &columnNumber);
+ DCHECK_EQ(expectedProtocolScriptId.utf8(), translatedScriptId.utf8());
+ *loc = v8::debug::Location(lineNumber, columnNumber);
+}
+
class ActualScript : public V8DebuggerScript {
friend class V8DebuggerScript;
@@ -115,10 +142,13 @@ class ActualScript : public V8DebuggerScript {
}
}
+ m_isModule = script->IsModule();
+
m_script.Reset(m_isolate, script);
}
bool isLiveEdit() const override { return m_isLiveEdit; }
+ bool isModule() const override { return m_isModule; }
const String16& sourceMappingURL() const override {
return m_sourceMappingURL;
@@ -148,6 +178,11 @@ class ActualScript : public V8DebuggerScript {
return script->GetPossibleBreakpoints(start, end, locations);
}
+ void resetBlackboxedStateCache() override {
+ v8::HandleScope scope(m_isolate);
+ v8::debug::ResetBlackboxedStateCache(m_isolate, m_script.Get(m_isolate));
+ }
+
private:
String16 GetNameOrSourceUrl(v8::Local<v8::debug::Script> script) {
v8::Local<v8::String> name;
@@ -159,6 +194,7 @@ class ActualScript : public V8DebuggerScript {
String16 m_sourceMappingURL;
v8::Global<v8::String> m_sourceObj;
bool m_isLiveEdit = false;
+ bool m_isModule = false;
v8::Global<v8::debug::Script> m_script;
};
@@ -166,11 +202,12 @@ class WasmVirtualScript : public V8DebuggerScript {
friend class V8DebuggerScript;
public:
- WasmVirtualScript(v8::Isolate* isolate,
+ WasmVirtualScript(v8::Isolate* isolate, WasmTranslation* wasmTranslation,
v8::Local<v8::debug::WasmScript> script, String16 id,
String16 url, String16 source)
: V8DebuggerScript(isolate, std::move(id), std::move(url)),
- m_script(isolate, script) {
+ m_script(isolate, script),
+ m_wasmTranslation(wasmTranslation) {
int num_lines = 0;
int last_newline = -1;
size_t next_newline = source.find('\n', last_newline + 1);
@@ -186,17 +223,41 @@ class WasmVirtualScript : public V8DebuggerScript {
const String16& sourceMappingURL() const override { return emptyString(); }
bool isLiveEdit() const override { return false; }
+ bool isModule() const override { return false; }
void setSourceMappingURL(const String16&) override {}
bool getPossibleBreakpoints(
const v8::debug::Location& start, const v8::debug::Location& end,
std::vector<v8::debug::Location>* locations) override {
- // TODO(clemensh): Returning false produces the protocol error "Internal
- // error". Implement and fix expected output of
- // wasm-get-breakable-locations.js.
- return false;
+ v8::HandleScope scope(m_isolate);
+ v8::Local<v8::debug::Script> script = m_script.Get(m_isolate);
+ String16 v8ScriptId = String16::fromInteger(script->Id());
+
+ v8::debug::Location translatedStart = start;
+ TranslateProtocolLocationToV8Location(m_wasmTranslation, &translatedStart,
+ scriptId(), v8ScriptId);
+
+ v8::debug::Location translatedEnd = end;
+ if (translatedEnd.IsEmpty()) {
+ // Stop before the start of the next function.
+ translatedEnd =
+ v8::debug::Location(translatedStart.GetLineNumber() + 1, 0);
+ } else {
+ TranslateProtocolLocationToV8Location(m_wasmTranslation, &translatedEnd,
+ scriptId(), v8ScriptId);
+ }
+
+ bool success = script->GetPossibleBreakpoints(translatedStart,
+ translatedEnd, locations);
+ for (v8::debug::Location& loc : *locations) {
+ TranslateV8LocationToProtocolLocation(m_wasmTranslation, &loc, v8ScriptId,
+ scriptId());
+ }
+ return success;
}
+ void resetBlackboxedStateCache() override {}
+
private:
static const String16& emptyString() {
static const String16 singleEmptyString;
@@ -204,6 +265,7 @@ class WasmVirtualScript : public V8DebuggerScript {
}
v8::Global<v8::debug::WasmScript> m_script;
+ WasmTranslation* m_wasmTranslation;
};
} // namespace
@@ -216,11 +278,12 @@ std::unique_ptr<V8DebuggerScript> V8DebuggerScript::Create(
}
std::unique_ptr<V8DebuggerScript> V8DebuggerScript::CreateWasm(
- v8::Isolate* isolate, v8::Local<v8::debug::WasmScript> underlyingScript,
- String16 id, String16 url, String16 source) {
+ v8::Isolate* isolate, WasmTranslation* wasmTranslation,
+ v8::Local<v8::debug::WasmScript> underlyingScript, String16 id,
+ String16 url, String16 source) {
return std::unique_ptr<WasmVirtualScript>(
- new WasmVirtualScript(isolate, underlyingScript, std::move(id),
- std::move(url), std::move(source)));
+ new WasmVirtualScript(isolate, wasmTranslation, underlyingScript,
+ std::move(id), std::move(url), std::move(source)));
}
V8DebuggerScript::V8DebuggerScript(v8::Isolate* isolate, String16 id,
diff --git a/deps/v8/src/inspector/v8-debugger-script.h b/deps/v8/src/inspector/v8-debugger-script.h
index 58beefe5ec..9250c9d872 100644
--- a/deps/v8/src/inspector/v8-debugger-script.h
+++ b/deps/v8/src/inspector/v8-debugger-script.h
@@ -39,14 +39,18 @@
namespace v8_inspector {
+// Forward declaration.
+class WasmTranslation;
+
class V8DebuggerScript {
public:
static std::unique_ptr<V8DebuggerScript> Create(
v8::Isolate* isolate, v8::Local<v8::debug::Script> script,
bool isLiveEdit);
static std::unique_ptr<V8DebuggerScript> CreateWasm(
- v8::Isolate* isolate, v8::Local<v8::debug::WasmScript> underlyingScript,
- String16 id, String16 url, String16 source);
+ v8::Isolate* isolate, WasmTranslation* wasmTranslation,
+ v8::Local<v8::debug::WasmScript> underlyingScript, String16 id,
+ String16 url, String16 source);
virtual ~V8DebuggerScript();
@@ -63,6 +67,7 @@ class V8DebuggerScript {
int endColumn() const { return m_endColumn; }
int executionContextId() const { return m_executionContextId; }
virtual bool isLiveEdit() const = 0;
+ virtual bool isModule() const = 0;
void setSourceURL(const String16&);
virtual void setSourceMappingURL(const String16&) = 0;
@@ -73,6 +78,7 @@ class V8DebuggerScript {
virtual bool getPossibleBreakpoints(
const v8::debug::Location& start, const v8::debug::Location& end,
std::vector<v8::debug::Location>* locations) = 0;
+ virtual void resetBlackboxedStateCache() = 0;
protected:
V8DebuggerScript(v8::Isolate*, String16 id, String16 url);
diff --git a/deps/v8/src/inspector/v8-debugger.cc b/deps/v8/src/inspector/v8-debugger.cc
index 2563f4f36c..3a2fc89f00 100644
--- a/deps/v8/src/inspector/v8-debugger.cc
+++ b/deps/v8/src/inspector/v8-debugger.cc
@@ -30,6 +30,112 @@ inline v8::Local<v8::Boolean> v8Boolean(bool value, v8::Isolate* isolate) {
return value ? v8::True(isolate) : v8::False(isolate);
}
+V8DebuggerAgentImpl* agentForScript(V8InspectorImpl* inspector,
+ v8::Local<v8::debug::Script> script) {
+ v8::Local<v8::Value> contextData;
+ if (!script->ContextData().ToLocal(&contextData) || !contextData->IsInt32()) {
+ return nullptr;
+ }
+ int contextId = static_cast<int>(contextData.As<v8::Int32>()->Value());
+ int contextGroupId = inspector->contextGroupId(contextId);
+ if (!contextGroupId) return nullptr;
+ return inspector->enabledDebuggerAgentForGroup(contextGroupId);
+}
+
+v8::MaybeLocal<v8::Array> collectionsEntries(v8::Local<v8::Context> context,
+ v8::Local<v8::Value> value) {
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::Local<v8::Array> entries;
+ bool isKeyValue = false;
+ if (!v8::debug::EntriesPreview(isolate, value, &isKeyValue).ToLocal(&entries))
+ return v8::MaybeLocal<v8::Array>();
+
+ v8::Local<v8::Array> wrappedEntries = v8::Array::New(isolate);
+ CHECK(!isKeyValue || wrappedEntries->Length() % 2 == 0);
+ if (!wrappedEntries->SetPrototype(context, v8::Null(isolate))
+ .FromMaybe(false))
+ return v8::MaybeLocal<v8::Array>();
+ for (uint32_t i = 0; i < entries->Length(); i += isKeyValue ? 2 : 1) {
+ v8::Local<v8::Value> item;
+ if (!entries->Get(context, i).ToLocal(&item)) continue;
+ v8::Local<v8::Value> value;
+ if (isKeyValue && !entries->Get(context, i + 1).ToLocal(&value)) continue;
+ v8::Local<v8::Object> wrapper = v8::Object::New(isolate);
+ if (!wrapper->SetPrototype(context, v8::Null(isolate)).FromMaybe(false))
+ continue;
+ createDataProperty(
+ context, wrapper,
+ toV8StringInternalized(isolate, isKeyValue ? "key" : "value"), item);
+ if (isKeyValue) {
+ createDataProperty(context, wrapper,
+ toV8StringInternalized(isolate, "value"), value);
+ }
+ createDataProperty(context, wrappedEntries, wrappedEntries->Length(),
+ wrapper);
+ }
+ if (!markArrayEntriesAsInternal(context, wrappedEntries,
+ V8InternalValueType::kEntry)) {
+ return v8::MaybeLocal<v8::Array>();
+ }
+ return wrappedEntries;
+}
+
+v8::MaybeLocal<v8::Object> buildLocation(v8::Local<v8::Context> context,
+ int scriptId, int lineNumber,
+ int columnNumber) {
+ if (scriptId == v8::UnboundScript::kNoScriptId)
+ return v8::MaybeLocal<v8::Object>();
+ if (lineNumber == v8::Function::kLineOffsetNotFound ||
+ columnNumber == v8::Function::kLineOffsetNotFound) {
+ return v8::MaybeLocal<v8::Object>();
+ }
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::Local<v8::Object> location = v8::Object::New(isolate);
+ if (!location->SetPrototype(context, v8::Null(isolate)).FromMaybe(false)) {
+ return v8::MaybeLocal<v8::Object>();
+ }
+ if (!createDataProperty(context, location,
+ toV8StringInternalized(isolate, "scriptId"),
+ toV8String(isolate, String16::fromInteger(scriptId)))
+ .FromMaybe(false)) {
+ return v8::MaybeLocal<v8::Object>();
+ }
+ if (!createDataProperty(context, location,
+ toV8StringInternalized(isolate, "lineNumber"),
+ v8::Integer::New(isolate, lineNumber))
+ .FromMaybe(false)) {
+ return v8::MaybeLocal<v8::Object>();
+ }
+ if (!createDataProperty(context, location,
+ toV8StringInternalized(isolate, "columnNumber"),
+ v8::Integer::New(isolate, columnNumber))
+ .FromMaybe(false)) {
+ return v8::MaybeLocal<v8::Object>();
+ }
+ if (!markAsInternal(context, location, V8InternalValueType::kLocation)) {
+ return v8::MaybeLocal<v8::Object>();
+ }
+ return location;
+}
+
+v8::MaybeLocal<v8::Object> generatorObjectLocation(
+ v8::Local<v8::Context> context, v8::Local<v8::Value> value) {
+ if (!value->IsGeneratorObject()) return v8::MaybeLocal<v8::Object>();
+ v8::Local<v8::debug::GeneratorObject> generatorObject =
+ v8::debug::GeneratorObject::Cast(value);
+ if (!generatorObject->IsSuspended()) {
+ v8::Local<v8::Function> func = generatorObject->Function();
+ return buildLocation(context, func->ScriptId(), func->GetScriptLineNumber(),
+ func->GetScriptColumnNumber());
+ }
+ v8::Local<v8::debug::Script> script;
+ if (!generatorObject->Script().ToLocal(&script))
+ return v8::MaybeLocal<v8::Object>();
+ v8::debug::Location suspendedLocation = generatorObject->SuspendedLocation();
+ return buildLocation(context, script->Id(), suspendedLocation.GetLineNumber(),
+ suspendedLocation.GetColumnNumber());
+}
+
} // namespace
static bool inLiveEditScope = false;
@@ -72,10 +178,9 @@ void V8Debugger::enable() {
if (m_enableCount++) return;
DCHECK(!enabled());
v8::HandleScope scope(m_isolate);
- v8::debug::SetDebugEventListener(m_isolate, &V8Debugger::v8DebugEventCallback,
- v8::External::New(m_isolate, this));
- v8::debug::SetAsyncTaskListener(m_isolate, &V8Debugger::v8AsyncTaskListener,
- this);
+ v8::debug::SetDebugDelegate(m_isolate, this);
+ v8::debug::SetOutOfMemoryCallback(m_isolate, &V8Debugger::v8OOMCallback,
+ this);
m_debuggerContext.Reset(m_isolate, v8::debug::GetDebugContext(m_isolate));
v8::debug::ChangeBreakOnException(m_isolate, v8::debug::NoBreakOnException);
m_pauseOnExceptionsState = v8::debug::NoBreakOnException;
@@ -90,8 +195,9 @@ void V8Debugger::disable() {
m_debuggerContext.Reset();
allAsyncTasksCanceled();
m_wasmTranslation.Clear();
- v8::debug::SetDebugEventListener(m_isolate, nullptr);
- v8::debug::SetAsyncTaskListener(m_isolate, nullptr, nullptr);
+ v8::debug::SetDebugDelegate(m_isolate, nullptr);
+ v8::debug::SetOutOfMemoryCallback(m_isolate, nullptr, nullptr);
+ m_isolate->RestoreOriginalHeapLimit();
}
bool V8Debugger::enabled() const { return !m_debuggerScript.IsEmpty(); }
@@ -140,6 +246,7 @@ String16 V8Debugger::setBreakpoint(const ScriptBreakpoint& breakpoint,
toV8String(m_isolate, breakpoint.condition))
.FromMaybe(false);
DCHECK(success);
+ USE(success);
v8::Local<v8::Function> setBreakpointFunction = v8::Local<v8::Function>::Cast(
m_debuggerScript.Get(m_isolate)
@@ -174,6 +281,7 @@ void V8Debugger::removeBreakpoint(const String16& breakpointId) {
toV8String(m_isolate, breakpointId))
.FromMaybe(false);
DCHECK(success);
+ USE(success);
v8::Local<v8::Function> removeBreakpointFunction =
v8::Local<v8::Function>::Cast(
@@ -202,25 +310,7 @@ void V8Debugger::setBreakpointsActivated(bool activated) {
UNREACHABLE();
return;
}
- v8::HandleScope scope(m_isolate);
- v8::Local<v8::Context> context = debuggerContext();
- v8::Context::Scope contextScope(context);
-
- v8::Local<v8::Object> info = v8::Object::New(m_isolate);
- bool success = false;
- success = info->Set(context, toV8StringInternalized(m_isolate, "enabled"),
- v8::Boolean::New(m_isolate, activated))
- .FromMaybe(false);
- DCHECK(success);
- v8::Local<v8::Function> setBreakpointsActivated =
- v8::Local<v8::Function>::Cast(
- m_debuggerScript.Get(m_isolate)
- ->Get(context, toV8StringInternalized(m_isolate,
- "setBreakpointsActivated"))
- .ToLocalChecked());
- v8::debug::Call(debuggerContext(), setBreakpointsActivated, info)
- .ToLocalChecked();
-
+ v8::debug::SetBreakPointsActive(m_isolate, activated);
m_breakpointsActivated = activated;
}
@@ -238,7 +328,7 @@ void V8Debugger::setPauseOnExceptionsState(
}
void V8Debugger::setPauseOnNextStatement(bool pause) {
- if (m_runningNestedMessageLoop) return;
+ if (isPaused()) return;
if (pause)
v8::debug::DebugBreak(m_isolate);
else
@@ -247,19 +337,12 @@ void V8Debugger::setPauseOnNextStatement(bool pause) {
bool V8Debugger::canBreakProgram() {
if (!m_breakpointsActivated) return false;
- return m_isolate->InContext();
+ return v8::debug::HasNonBlackboxedFrameOnStack(m_isolate);
}
void V8Debugger::breakProgram() {
- if (isPaused()) {
- DCHECK(!m_runningNestedMessageLoop);
- v8::Local<v8::Value> exception;
- v8::Local<v8::Array> hitBreakpoints;
- handleProgramBreak(m_pausedContext, m_executionState, exception,
- hitBreakpoints);
- return;
- }
-
+ // Don't allow nested breaks.
+ if (isPaused()) return;
if (!canBreakProgram()) return;
v8::HandleScope scope(m_isolate);
@@ -300,11 +383,6 @@ void V8Debugger::stepOutOfFunction() {
continueProgram();
}
-void V8Debugger::clearStepping() {
- DCHECK(enabled());
- v8::debug::ClearStepping(m_isolate);
-}
-
Response V8Debugger::setScriptSource(
const String16& sourceID, v8::Local<v8::String> newSource, bool dryRun,
Maybe<protocol::Runtime::ExceptionDetails>* exceptionDetails,
@@ -403,27 +481,14 @@ Response V8Debugger::setScriptSource(
}
JavaScriptCallFrames V8Debugger::currentCallFrames(int limit) {
- if (!m_isolate->InContext()) return JavaScriptCallFrames();
+ if (!isPaused()) return JavaScriptCallFrames();
v8::Local<v8::Value> currentCallFramesV8;
- if (m_executionState.IsEmpty()) {
- v8::Local<v8::Function> currentCallFramesFunction =
- v8::Local<v8::Function>::Cast(
- m_debuggerScript.Get(m_isolate)
- ->Get(debuggerContext(),
- toV8StringInternalized(m_isolate, "currentCallFrames"))
- .ToLocalChecked());
- if (!v8::debug::Call(debuggerContext(), currentCallFramesFunction,
- v8::Integer::New(m_isolate, limit))
- .ToLocal(&currentCallFramesV8))
- return JavaScriptCallFrames();
- } else {
- v8::Local<v8::Value> argv[] = {m_executionState,
- v8::Integer::New(m_isolate, limit)};
- if (!callDebuggerMethod("currentCallFrames", arraysize(argv), argv, true)
- .ToLocal(&currentCallFramesV8))
- return JavaScriptCallFrames();
+ v8::Local<v8::Value> argv[] = {m_executionState,
+ v8::Integer::New(m_isolate, limit)};
+ if (!callDebuggerMethod("currentCallFrames", arraysize(argv), argv, true)
+ .ToLocal(&currentCallFramesV8)) {
+ return JavaScriptCallFrames();
}
- DCHECK(!currentCallFramesV8.IsEmpty());
if (!currentCallFramesV8->IsArray()) return JavaScriptCallFrames();
v8::Local<v8::Array> callFramesArray = currentCallFramesV8.As<v8::Array>();
JavaScriptCallFrames callFrames;
@@ -464,11 +529,11 @@ void V8Debugger::handleProgramBreak(v8::Local<v8::Context> pausedContext,
v8::Local<v8::Array> hitBreakpointNumbers,
bool isPromiseRejection, bool isUncaught) {
// Don't allow nested breaks.
- if (m_runningNestedMessageLoop) return;
+ if (isPaused()) return;
V8DebuggerAgentImpl* agent = m_inspector->enabledDebuggerAgentForGroup(
m_inspector->contextGroupId(pausedContext));
- if (!agent) return;
+ if (!agent || (agent->skipAllPauses() && !m_scheduledOOMBreak)) return;
std::vector<String16> breakpointIds;
if (!hitBreakpointNumbers.IsEmpty()) {
@@ -484,148 +549,111 @@ void V8Debugger::handleProgramBreak(v8::Local<v8::Context> pausedContext,
m_pausedContext = pausedContext;
m_executionState = executionState;
- V8DebuggerAgentImpl::SkipPauseRequest result = agent->didPause(
- pausedContext, exception, breakpointIds, isPromiseRejection, isUncaught);
- if (result == V8DebuggerAgentImpl::RequestNoSkip) {
- m_runningNestedMessageLoop = true;
- int groupId = m_inspector->contextGroupId(pausedContext);
- DCHECK(groupId);
+ m_runningNestedMessageLoop = true;
+ agent->didPause(InspectedContext::contextId(pausedContext), exception,
+ breakpointIds, isPromiseRejection, isUncaught,
+ m_scheduledOOMBreak);
+ int groupId = m_inspector->contextGroupId(pausedContext);
+ DCHECK(groupId);
+ {
v8::Context::Scope scope(pausedContext);
v8::Local<v8::Context> context = m_isolate->GetCurrentContext();
CHECK(!context.IsEmpty() &&
context != v8::debug::GetDebugContext(m_isolate));
m_inspector->client()->runMessageLoopOnPause(groupId);
- // The agent may have been removed in the nested loop.
- agent = m_inspector->enabledDebuggerAgentForGroup(
- m_inspector->contextGroupId(pausedContext));
- if (agent) agent->didContinue();
m_runningNestedMessageLoop = false;
}
+ // The agent may have been removed in the nested loop.
+ agent = m_inspector->enabledDebuggerAgentForGroup(groupId);
+ if (agent) agent->didContinue();
+ if (m_scheduledOOMBreak) m_isolate->RestoreOriginalHeapLimit();
+ m_scheduledOOMBreak = false;
m_pausedContext.Clear();
m_executionState.Clear();
-
- if (result == V8DebuggerAgentImpl::RequestStepFrame) {
- v8::debug::PrepareStep(m_isolate, v8::debug::StepFrame);
- } else if (result == V8DebuggerAgentImpl::RequestStepInto) {
- v8::debug::PrepareStep(m_isolate, v8::debug::StepIn);
- } else if (result == V8DebuggerAgentImpl::RequestStepOut) {
- v8::debug::PrepareStep(m_isolate, v8::debug::StepOut);
- }
}
-void V8Debugger::v8DebugEventCallback(
- const v8::debug::EventDetails& eventDetails) {
- V8Debugger* thisPtr = toV8Debugger(eventDetails.GetCallbackData());
- thisPtr->handleV8DebugEvent(eventDetails);
+void V8Debugger::v8OOMCallback(void* data) {
+ V8Debugger* thisPtr = static_cast<V8Debugger*>(data);
+ thisPtr->m_isolate->IncreaseHeapLimitForDebugging();
+ thisPtr->m_scheduledOOMBreak = true;
+ thisPtr->setPauseOnNextStatement(true);
}
-v8::Local<v8::Value> V8Debugger::callInternalGetterFunction(
- v8::Local<v8::Object> object, const char* functionName) {
- v8::MicrotasksScope microtasks(m_isolate,
- v8::MicrotasksScope::kDoNotRunMicrotasks);
- v8::Local<v8::Value> getterValue =
- object
- ->Get(m_isolate->GetCurrentContext(),
- toV8StringInternalized(m_isolate, functionName))
- .ToLocalChecked();
- DCHECK(!getterValue.IsEmpty() && getterValue->IsFunction());
- return v8::Local<v8::Function>::Cast(getterValue)
- ->Call(m_isolate->GetCurrentContext(), object, 0, nullptr)
- .ToLocalChecked();
+void V8Debugger::ScriptCompiled(v8::Local<v8::debug::Script> script,
+ bool has_compile_error) {
+ V8DebuggerAgentImpl* agent = agentForScript(m_inspector, script);
+ if (!agent) return;
+ if (script->IsWasm()) {
+ m_wasmTranslation.AddScript(script.As<v8::debug::WasmScript>(), agent);
+ } else if (m_ignoreScriptParsedEventsCounter == 0) {
+ agent->didParseSource(
+ V8DebuggerScript::Create(m_isolate, script, inLiveEditScope),
+ !has_compile_error);
+ }
}
-void V8Debugger::handleV8DebugEvent(
- const v8::debug::EventDetails& eventDetails) {
- if (!enabled()) return;
- v8::HandleScope scope(m_isolate);
-
- v8::DebugEvent event = eventDetails.GetEvent();
- if (event != v8::Break && event != v8::Exception &&
- event != v8::AfterCompile && event != v8::CompileError)
+void V8Debugger::BreakProgramRequested(v8::Local<v8::Context> pausedContext,
+ v8::Local<v8::Object> execState,
+ v8::Local<v8::Value> breakPointsHit) {
+ v8::Local<v8::Value> argv[] = {breakPointsHit};
+ v8::Local<v8::Value> hitBreakpoints;
+ if (!callDebuggerMethod("getBreakpointNumbers", 1, argv, true)
+ .ToLocal(&hitBreakpoints)) {
return;
+ }
+ DCHECK(hitBreakpoints->IsArray());
+ handleProgramBreak(pausedContext, execState, v8::Local<v8::Value>(),
+ hitBreakpoints.As<v8::Array>());
+}
- v8::Local<v8::Context> eventContext = eventDetails.GetEventContext();
- DCHECK(!eventContext.IsEmpty());
- V8DebuggerAgentImpl* agent = m_inspector->enabledDebuggerAgentForGroup(
- m_inspector->contextGroupId(eventContext));
- if (!agent) return;
+void V8Debugger::ExceptionThrown(v8::Local<v8::Context> pausedContext,
+ v8::Local<v8::Object> execState,
+ v8::Local<v8::Value> exception,
+ v8::Local<v8::Value> promise,
+ bool isUncaught) {
+ bool isPromiseRejection = promise->IsPromise();
+ handleProgramBreak(pausedContext, execState, exception,
+ v8::Local<v8::Array>(), isPromiseRejection, isUncaught);
+}
- if (event == v8::AfterCompile || event == v8::CompileError) {
- v8::Context::Scope contextScope(debuggerContext());
- // Determine if the script is a wasm script.
- v8::Local<v8::Value> scriptMirror =
- callInternalGetterFunction(eventDetails.GetEventData(), "script");
- DCHECK(scriptMirror->IsObject());
- v8::Local<v8::Value> scriptWrapper =
- callInternalGetterFunction(scriptMirror.As<v8::Object>(), "value");
- DCHECK(scriptWrapper->IsObject());
- v8::Local<v8::debug::Script> script;
- if (!v8::debug::Script::Wrap(m_isolate, scriptWrapper.As<v8::Object>())
- .ToLocal(&script)) {
- return;
- }
- if (script->IsWasm()) {
- m_wasmTranslation.AddScript(script.As<v8::debug::WasmScript>(), agent);
- } else if (m_ignoreScriptParsedEventsCounter == 0) {
- agent->didParseSource(
- V8DebuggerScript::Create(m_isolate, script, inLiveEditScope),
- event == v8::AfterCompile);
- }
- } else if (event == v8::Exception) {
- v8::Local<v8::Context> context = debuggerContext();
- v8::Local<v8::Object> eventData = eventDetails.GetEventData();
- v8::Local<v8::Value> exception =
- callInternalGetterFunction(eventData, "exception");
- v8::Local<v8::Value> promise =
- callInternalGetterFunction(eventData, "promise");
- bool isPromiseRejection = !promise.IsEmpty() && promise->IsObject();
- v8::Local<v8::Value> uncaught =
- callInternalGetterFunction(eventData, "uncaught");
- bool isUncaught = uncaught->BooleanValue(context).FromJust();
- handleProgramBreak(eventContext, eventDetails.GetExecutionState(),
- exception, v8::Local<v8::Array>(), isPromiseRejection,
- isUncaught);
- } else if (event == v8::Break) {
- v8::Local<v8::Value> argv[] = {eventDetails.GetEventData()};
- v8::Local<v8::Value> hitBreakpoints;
- if (!callDebuggerMethod("getBreakpointNumbers", 1, argv, true)
- .ToLocal(&hitBreakpoints))
- return;
- DCHECK(hitBreakpoints->IsArray());
- handleProgramBreak(eventContext, eventDetails.GetExecutionState(),
- v8::Local<v8::Value>(), hitBreakpoints.As<v8::Array>());
- }
-}
-
-void V8Debugger::v8AsyncTaskListener(v8::debug::PromiseDebugActionType type,
- int id, void* data) {
- V8Debugger* debugger = static_cast<V8Debugger*>(data);
- if (!debugger->m_maxAsyncCallStackDepth) return;
+bool V8Debugger::IsFunctionBlackboxed(v8::Local<v8::debug::Script> script,
+ const v8::debug::Location& start,
+ const v8::debug::Location& end) {
+ V8DebuggerAgentImpl* agent = agentForScript(m_inspector, script);
+ if (!agent) return false;
+ return agent->isFunctionBlackboxed(String16::fromInteger(script->Id()), start,
+ end);
+}
+
+void V8Debugger::PromiseEventOccurred(v8::debug::PromiseDebugActionType type,
+ int id, int parentId) {
+ if (!m_maxAsyncCallStackDepth) return;
// Async task events from Promises are given misaligned pointers to prevent
// from overlapping with other Blink task identifiers. There is a single
// namespace of such ids, managed by src/js/promise.js.
void* ptr = reinterpret_cast<void*>(id * 2 + 1);
switch (type) {
+ case v8::debug::kDebugPromiseCreated:
+ asyncTaskCreated(
+ ptr, parentId ? reinterpret_cast<void*>(parentId * 2 + 1) : nullptr);
+ break;
case v8::debug::kDebugEnqueueAsyncFunction:
- debugger->asyncTaskScheduled("async function", ptr, true);
+ asyncTaskScheduled("async function", ptr, true);
break;
case v8::debug::kDebugEnqueuePromiseResolve:
- debugger->asyncTaskScheduled("Promise.resolve", ptr, true);
+ asyncTaskScheduled("Promise.resolve", ptr, true);
break;
case v8::debug::kDebugEnqueuePromiseReject:
- debugger->asyncTaskScheduled("Promise.reject", ptr, true);
- break;
- case v8::debug::kDebugEnqueuePromiseResolveThenableJob:
- debugger->asyncTaskScheduled("PromiseResolveThenableJob", ptr, true);
+ asyncTaskScheduled("Promise.reject", ptr, true);
break;
case v8::debug::kDebugPromiseCollected:
- debugger->asyncTaskCanceled(ptr);
+ asyncTaskCanceled(ptr);
break;
case v8::debug::kDebugWillHandle:
- debugger->asyncTaskStarted(ptr);
+ asyncTaskStarted(ptr);
break;
case v8::debug::kDebugDidHandle:
- debugger->asyncTaskFinished(ptr);
+ asyncTaskFinished(ptr);
break;
}
}
@@ -718,8 +746,11 @@ v8::MaybeLocal<v8::Array> V8Debugger::internalProperties(
return v8::MaybeLocal<v8::Array>();
if (value->IsFunction()) {
v8::Local<v8::Function> function = value.As<v8::Function>();
- v8::Local<v8::Value> location = functionLocation(context, function);
- if (location->IsObject()) {
+ v8::Local<v8::Object> location;
+ if (buildLocation(context, function->ScriptId(),
+ function->GetScriptLineNumber(),
+ function->GetScriptColumnNumber())
+ .ToLocal(&location)) {
createDataProperty(
context, properties, properties->Length(),
toV8StringInternalized(m_isolate, "[[FunctionLocation]]"));
@@ -732,26 +763,21 @@ v8::MaybeLocal<v8::Array> V8Debugger::internalProperties(
v8::True(m_isolate));
}
}
- if (!enabled()) return properties;
- if (value->IsMap() || value->IsWeakMap() || value->IsSet() ||
- value->IsWeakSet() || value->IsSetIterator() || value->IsMapIterator()) {
- v8::Local<v8::Value> entries =
- collectionEntries(context, v8::Local<v8::Object>::Cast(value));
- if (entries->IsArray()) {
- createDataProperty(context, properties, properties->Length(),
- toV8StringInternalized(m_isolate, "[[Entries]]"));
- createDataProperty(context, properties, properties->Length(), entries);
- }
+ v8::Local<v8::Array> entries;
+ if (collectionsEntries(context, value).ToLocal(&entries)) {
+ createDataProperty(context, properties, properties->Length(),
+ toV8StringInternalized(m_isolate, "[[Entries]]"));
+ createDataProperty(context, properties, properties->Length(), entries);
}
if (value->IsGeneratorObject()) {
- v8::Local<v8::Value> location =
- generatorObjectLocation(context, v8::Local<v8::Object>::Cast(value));
- if (location->IsObject()) {
+ v8::Local<v8::Object> location;
+ if (generatorObjectLocation(context, value).ToLocal(&location)) {
createDataProperty(
context, properties, properties->Length(),
toV8StringInternalized(m_isolate, "[[GeneratorLocation]]"));
createDataProperty(context, properties, properties->Length(), location);
}
+ if (!enabled()) return properties;
v8::Local<v8::Value> scopes;
if (generatorScopes(context, value).ToLocal(&scopes)) {
createDataProperty(context, properties, properties->Length(),
@@ -759,6 +785,7 @@ v8::MaybeLocal<v8::Array> V8Debugger::internalProperties(
createDataProperty(context, properties, properties->Length(), scopes);
}
}
+ if (!enabled()) return properties;
if (value->IsFunction()) {
v8::Local<v8::Function> function = value.As<v8::Function>();
v8::Local<v8::Value> boundFunction = function->GetBoundFunction();
@@ -773,99 +800,6 @@ v8::MaybeLocal<v8::Array> V8Debugger::internalProperties(
return properties;
}
-v8::Local<v8::Value> V8Debugger::collectionEntries(
- v8::Local<v8::Context> context, v8::Local<v8::Object> object) {
- if (!enabled()) {
- UNREACHABLE();
- return v8::Undefined(m_isolate);
- }
- v8::Local<v8::Value> argv[] = {object};
- v8::Local<v8::Value> entriesValue;
- if (!callDebuggerMethod("getCollectionEntries", 1, argv, true)
- .ToLocal(&entriesValue) ||
- !entriesValue->IsArray())
- return v8::Undefined(m_isolate);
-
- v8::Local<v8::Array> entries = entriesValue.As<v8::Array>();
- v8::Local<v8::Array> copiedArray =
- v8::Array::New(m_isolate, entries->Length());
- if (!copiedArray->SetPrototype(context, v8::Null(m_isolate)).FromMaybe(false))
- return v8::Undefined(m_isolate);
- for (uint32_t i = 0; i < entries->Length(); ++i) {
- v8::Local<v8::Value> item;
- if (!entries->Get(debuggerContext(), i).ToLocal(&item))
- return v8::Undefined(m_isolate);
- v8::Local<v8::Value> copied;
- if (!copyValueFromDebuggerContext(m_isolate, debuggerContext(), context,
- item)
- .ToLocal(&copied))
- return v8::Undefined(m_isolate);
- if (!createDataProperty(context, copiedArray, i, copied).FromMaybe(false))
- return v8::Undefined(m_isolate);
- }
- if (!markArrayEntriesAsInternal(context,
- v8::Local<v8::Array>::Cast(copiedArray),
- V8InternalValueType::kEntry))
- return v8::Undefined(m_isolate);
- return copiedArray;
-}
-
-v8::Local<v8::Value> V8Debugger::generatorObjectLocation(
- v8::Local<v8::Context> context, v8::Local<v8::Object> object) {
- if (!enabled()) {
- UNREACHABLE();
- return v8::Null(m_isolate);
- }
- v8::Local<v8::Value> argv[] = {object};
- v8::Local<v8::Value> location;
- v8::Local<v8::Value> copied;
- if (!callDebuggerMethod("getGeneratorObjectLocation", 1, argv, true)
- .ToLocal(&location) ||
- !copyValueFromDebuggerContext(m_isolate, debuggerContext(), context,
- location)
- .ToLocal(&copied) ||
- !copied->IsObject())
- return v8::Null(m_isolate);
- if (!markAsInternal(context, v8::Local<v8::Object>::Cast(copied),
- V8InternalValueType::kLocation))
- return v8::Null(m_isolate);
- return copied;
-}
-
-v8::Local<v8::Value> V8Debugger::functionLocation(
- v8::Local<v8::Context> context, v8::Local<v8::Function> function) {
- int scriptId = function->ScriptId();
- if (scriptId == v8::UnboundScript::kNoScriptId) return v8::Null(m_isolate);
- int lineNumber = function->GetScriptLineNumber();
- int columnNumber = function->GetScriptColumnNumber();
- if (lineNumber == v8::Function::kLineOffsetNotFound ||
- columnNumber == v8::Function::kLineOffsetNotFound)
- return v8::Null(m_isolate);
- v8::Local<v8::Object> location = v8::Object::New(m_isolate);
- if (!location->SetPrototype(context, v8::Null(m_isolate)).FromMaybe(false))
- return v8::Null(m_isolate);
- if (!createDataProperty(
- context, location, toV8StringInternalized(m_isolate, "scriptId"),
- toV8String(m_isolate, String16::fromInteger(scriptId)))
- .FromMaybe(false))
- return v8::Null(m_isolate);
- if (!createDataProperty(context, location,
- toV8StringInternalized(m_isolate, "lineNumber"),
- v8::Integer::New(m_isolate, lineNumber))
- .FromMaybe(false))
- return v8::Null(m_isolate);
- if (!createDataProperty(context, location,
- toV8StringInternalized(m_isolate, "columnNumber"),
- v8::Integer::New(m_isolate, columnNumber))
- .FromMaybe(false))
- return v8::Null(m_isolate);
- if (!markAsInternal(context, location, V8InternalValueType::kLocation))
- return v8::Null(m_isolate);
- return location;
-}
-
-bool V8Debugger::isPaused() { return !m_pausedContext.IsEmpty(); }
-
std::unique_ptr<V8StackTraceImpl> V8Debugger::createStackTrace(
v8::Local<v8::StackTrace> stackTrace) {
int contextGroupId =
@@ -893,6 +827,34 @@ void V8Debugger::setAsyncCallStackDepth(V8DebuggerAgentImpl* agent, int depth) {
if (!maxAsyncCallStackDepth) allAsyncTasksCanceled();
}
+void V8Debugger::registerAsyncTaskIfNeeded(void* task) {
+ if (m_taskToId.find(task) != m_taskToId.end()) return;
+
+ int id = ++m_lastTaskId;
+ m_taskToId[task] = id;
+ m_idToTask[id] = task;
+ if (static_cast<int>(m_idToTask.size()) > m_maxAsyncCallStacks) {
+ void* taskToRemove = m_idToTask.begin()->second;
+ asyncTaskCanceled(taskToRemove);
+ }
+}
+
+void V8Debugger::asyncTaskCreated(void* task, void* parentTask) {
+ if (!m_maxAsyncCallStackDepth) return;
+ if (parentTask) m_parentTask[task] = parentTask;
+ v8::HandleScope scope(m_isolate);
+ // We don't need to pass context group id here because we gets this callback
+ // from V8 for promise events only.
+ // Passing one as maxStackSize forces no async chain for the new stack and
+ // allows us to not grow exponentially.
+ std::unique_ptr<V8StackTraceImpl> creationStack =
+ V8StackTraceImpl::capture(this, 0, 1, String16());
+ if (creationStack && !creationStack->isEmpty()) {
+ m_asyncTaskCreationStacks[task] = std::move(creationStack);
+ registerAsyncTaskIfNeeded(task);
+ }
+}
+
void V8Debugger::asyncTaskScheduled(const StringView& taskName, void* task,
bool recurring) {
if (!m_maxAsyncCallStackDepth) return;
@@ -913,13 +875,7 @@ void V8Debugger::asyncTaskScheduled(const String16& taskName, void* task,
if (chain) {
m_asyncTaskStacks[task] = std::move(chain);
if (recurring) m_recurringTasks.insert(task);
- int id = ++m_lastTaskId;
- m_taskToId[task] = id;
- m_idToTask[id] = task;
- if (static_cast<int>(m_idToTask.size()) > m_maxAsyncCallStacks) {
- void* taskToRemove = m_idToTask.begin()->second;
- asyncTaskCanceled(taskToRemove);
- }
+ registerAsyncTaskIfNeeded(task);
}
}
@@ -927,6 +883,8 @@ void V8Debugger::asyncTaskCanceled(void* task) {
if (!m_maxAsyncCallStackDepth) return;
m_asyncTaskStacks.erase(task);
m_recurringTasks.erase(task);
+ m_parentTask.erase(task);
+ m_asyncTaskCreationStacks.erase(task);
auto it = m_taskToId.find(task);
if (it == m_taskToId.end()) return;
m_idToTask.erase(it->second);
@@ -936,7 +894,9 @@ void V8Debugger::asyncTaskCanceled(void* task) {
void V8Debugger::asyncTaskStarted(void* task) {
if (!m_maxAsyncCallStackDepth) return;
m_currentTasks.push_back(task);
- AsyncTaskToStackTrace::iterator stackIt = m_asyncTaskStacks.find(task);
+ auto parentIt = m_parentTask.find(task);
+ AsyncTaskToStackTrace::iterator stackIt = m_asyncTaskStacks.find(
+ parentIt == m_parentTask.end() ? task : parentIt->second);
// Needs to support following order of events:
// - asyncTaskScheduled
// <-- attached here -->
@@ -947,6 +907,10 @@ void V8Debugger::asyncTaskStarted(void* task) {
std::unique_ptr<V8StackTraceImpl> stack;
if (stackIt != m_asyncTaskStacks.end() && stackIt->second)
stack = stackIt->second->cloneImpl();
+ auto itCreation = m_asyncTaskCreationStacks.find(task);
+ if (stack && itCreation != m_asyncTaskCreationStacks.end()) {
+ stack->setCreation(itCreation->second->cloneImpl());
+ }
m_currentStacks.push_back(std::move(stack));
}
@@ -960,11 +924,7 @@ void V8Debugger::asyncTaskFinished(void* task) {
m_currentStacks.pop_back();
if (m_recurringTasks.find(task) == m_recurringTasks.end()) {
- m_asyncTaskStacks.erase(task);
- auto it = m_taskToId.find(task);
- if (it == m_taskToId.end()) return;
- m_idToTask.erase(it->second);
- m_taskToId.erase(it);
+ asyncTaskCanceled(task);
}
}
@@ -973,6 +933,8 @@ void V8Debugger::allAsyncTasksCanceled() {
m_recurringTasks.clear();
m_currentStacks.clear();
m_currentTasks.clear();
+ m_parentTask.clear();
+ m_asyncTaskCreationStacks.clear();
m_idToTask.clear();
m_taskToId.clear();
m_lastTaskId = 0;
diff --git a/deps/v8/src/inspector/v8-debugger.h b/deps/v8/src/inspector/v8-debugger.h
index 68fba6eaa8..c45c76f7f9 100644
--- a/deps/v8/src/inspector/v8-debugger.h
+++ b/deps/v8/src/inspector/v8-debugger.h
@@ -26,7 +26,7 @@ class V8StackTraceImpl;
using protocol::Response;
-class V8Debugger {
+class V8Debugger : public v8::debug::DebugDelegate {
public:
V8Debugger(v8::Isolate*, V8InspectorImpl*);
~V8Debugger();
@@ -48,7 +48,6 @@ class V8Debugger {
void stepIntoStatement();
void stepOverStatement();
void stepOutOfFunction();
- void clearStepping();
Response setScriptSource(
const String16& sourceID, v8::Local<v8::String> newSource, bool dryRun,
@@ -66,7 +65,7 @@ class V8Debugger {
void enable();
void disable();
- bool isPaused();
+ bool isPaused() const { return m_runningNestedMessageLoop; }
v8::Local<v8::Context> pausedContext() { return m_pausedContext; }
int maxAsyncCallChainDepth() { return m_maxAsyncCallStackDepth; }
@@ -104,6 +103,8 @@ class V8Debugger {
v8::Local<v8::Context> debuggerContext() const;
void clearBreakpoints();
+ static void v8OOMCallback(void* data);
+
static void breakProgramCallback(const v8::FunctionCallbackInfo<v8::Value>&);
void handleProgramBreak(v8::Local<v8::Context> pausedContext,
v8::Local<v8::Object> executionState,
@@ -111,19 +112,6 @@ class V8Debugger {
v8::Local<v8::Array> hitBreakpoints,
bool isPromiseRejection = false,
bool isUncaught = false);
- static void v8DebugEventCallback(const v8::debug::EventDetails&);
- v8::Local<v8::Value> callInternalGetterFunction(v8::Local<v8::Object>,
- const char* functionName);
- void handleV8DebugEvent(const v8::debug::EventDetails&);
- static void v8AsyncTaskListener(v8::debug::PromiseDebugActionType type,
- int id, void* data);
-
- v8::Local<v8::Value> collectionEntries(v8::Local<v8::Context>,
- v8::Local<v8::Object>);
- v8::Local<v8::Value> generatorObjectLocation(v8::Local<v8::Context>,
- v8::Local<v8::Object>);
- v8::Local<v8::Value> functionLocation(v8::Local<v8::Context>,
- v8::Local<v8::Function>);
enum ScopeTargetKind {
FUNCTION,
@@ -138,6 +126,25 @@ class V8Debugger {
v8::MaybeLocal<v8::Value> generatorScopes(v8::Local<v8::Context>,
v8::Local<v8::Value>);
+ void asyncTaskCreated(void* task, void* parentTask);
+ void registerAsyncTaskIfNeeded(void* task);
+
+ // v8::debug::DebugEventListener implementation.
+ void PromiseEventOccurred(v8::debug::PromiseDebugActionType type, int id,
+ int parentId) override;
+ void ScriptCompiled(v8::Local<v8::debug::Script> script,
+ bool has_compile_error) override;
+ void BreakProgramRequested(v8::Local<v8::Context> paused_context,
+ v8::Local<v8::Object> exec_state,
+ v8::Local<v8::Value> break_points_hit) override;
+ void ExceptionThrown(v8::Local<v8::Context> paused_context,
+ v8::Local<v8::Object> exec_state,
+ v8::Local<v8::Value> exception,
+ v8::Local<v8::Value> promise, bool is_uncaught) override;
+ bool IsFunctionBlackboxed(v8::Local<v8::debug::Script> script,
+ const v8::debug::Location& start,
+ const v8::debug::Location& end) override;
+
v8::Isolate* m_isolate;
V8InspectorImpl* m_inspector;
int m_enableCount;
@@ -148,10 +155,12 @@ class V8Debugger {
v8::Local<v8::Context> m_pausedContext;
bool m_runningNestedMessageLoop;
int m_ignoreScriptParsedEventsCounter;
+ bool m_scheduledOOMBreak = false;
using AsyncTaskToStackTrace =
protocol::HashMap<void*, std::unique_ptr<V8StackTraceImpl>>;
AsyncTaskToStackTrace m_asyncTaskStacks;
+ AsyncTaskToStackTrace m_asyncTaskCreationStacks;
int m_maxAsyncCallStacks;
std::map<int, void*> m_idToTask;
std::unordered_map<void*, int> m_taskToId;
@@ -161,6 +170,7 @@ class V8Debugger {
std::vector<void*> m_currentTasks;
std::vector<std::unique_ptr<V8StackTraceImpl>> m_currentStacks;
protocol::HashMap<V8DebuggerAgentImpl*, int> m_maxAsyncCallStackDepthMap;
+ protocol::HashMap<void*, void*> m_parentTask;
v8::debug::ExceptionBreakState m_pauseOnExceptionsState;
diff --git a/deps/v8/src/inspector/v8-injected-script-host.cc b/deps/v8/src/inspector/v8-injected-script-host.cc
index 3748ec9aa3..b3bd5efde8 100644
--- a/deps/v8/src/inspector/v8-injected-script-host.cc
+++ b/deps/v8/src/inspector/v8-injected-script-host.cc
@@ -55,6 +55,9 @@ v8::Local<v8::Object> V8InjectedScriptHost::create(
USE(success);
v8::Local<v8::External> debuggerExternal =
v8::External::New(isolate, inspector);
+ setFunctionProperty(context, injectedScriptHost, "nullifyPrototype",
+ V8InjectedScriptHost::nullifyPrototypeCallback,
+ debuggerExternal);
setFunctionProperty(context, injectedScriptHost, "internalConstructorName",
V8InjectedScriptHost::internalConstructorNameCallback,
debuggerExternal);
@@ -77,6 +80,16 @@ v8::Local<v8::Object> V8InjectedScriptHost::create(
return injectedScriptHost;
}
+void V8InjectedScriptHost::nullifyPrototypeCallback(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ CHECK(info.Length() == 1 && info[0]->IsObject());
+ v8::Isolate* isolate = info.GetIsolate();
+ info[0]
+ .As<v8::Object>()
+ ->SetPrototype(isolate->GetCurrentContext(), v8::Null(isolate))
+ .ToChecked();
+}
+
void V8InjectedScriptHost::internalConstructorNameCallback(
const v8::FunctionCallbackInfo<v8::Value>& info) {
if (info.Length() < 1 || !info[0]->IsObject()) return;
diff --git a/deps/v8/src/inspector/v8-injected-script-host.h b/deps/v8/src/inspector/v8-injected-script-host.h
index 7d293af5a7..a64c2f890c 100644
--- a/deps/v8/src/inspector/v8-injected-script-host.h
+++ b/deps/v8/src/inspector/v8-injected-script-host.h
@@ -27,6 +27,8 @@ class V8InjectedScriptHost {
static v8::Local<v8::Object> create(v8::Local<v8::Context>, V8InspectorImpl*);
private:
+ static void nullifyPrototypeCallback(
+ const v8::FunctionCallbackInfo<v8::Value>&);
static void internalConstructorNameCallback(
const v8::FunctionCallbackInfo<v8::Value>&);
static void formatAccessorsAsProperties(
diff --git a/deps/v8/src/inspector/v8-inspector-impl.h b/deps/v8/src/inspector/v8-inspector-impl.h
index f98747543b..9d6e62c908 100644
--- a/deps/v8/src/inspector/v8-inspector-impl.h
+++ b/deps/v8/src/inspector/v8-inspector-impl.h
@@ -36,7 +36,6 @@
#include "src/base/macros.h"
#include "src/inspector/protocol/Protocol.h"
-#include "include/v8-debug.h"
#include "include/v8-inspector.h"
namespace v8_inspector {
diff --git a/deps/v8/src/inspector/v8-inspector-session-impl.cc b/deps/v8/src/inspector/v8-inspector-session-impl.cc
index 3a5b59c28d..2674fc2f63 100644
--- a/deps/v8/src/inspector/v8-inspector-session-impl.cc
+++ b/deps/v8/src/inspector/v8-inspector-session-impl.cc
@@ -37,6 +37,11 @@ bool V8InspectorSession::canDispatchMethod(const StringView& method) {
protocol::Schema::Metainfo::commandPrefix);
}
+// static
+int V8ContextInfo::executionContextId(v8::Local<v8::Context> context) {
+ return InspectedContext::contextId(context);
+}
+
std::unique_ptr<V8InspectorSessionImpl> V8InspectorSessionImpl::create(
V8InspectorImpl* inspector, int contextGroupId,
V8Inspector::Channel* channel, const StringView& state) {
diff --git a/deps/v8/src/inspector/v8-profiler-agent-impl.cc b/deps/v8/src/inspector/v8-profiler-agent-impl.cc
index 16c4777e84..c7d1cc2617 100644
--- a/deps/v8/src/inspector/v8-profiler-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-profiler-agent-impl.cc
@@ -22,6 +22,7 @@ namespace ProfilerAgentState {
static const char samplingInterval[] = "samplingInterval";
static const char userInitiatedProfiling[] = "userInitiatedProfiling";
static const char profilerEnabled[] = "profilerEnabled";
+static const char preciseCoverageStarted[] = "preciseCoverageStarted";
}
namespace {
@@ -152,11 +153,8 @@ V8ProfilerAgentImpl::V8ProfilerAgentImpl(
protocol::DictionaryValue* state)
: m_session(session),
m_isolate(m_session->inspector()->isolate()),
- m_profiler(nullptr),
m_state(state),
- m_frontend(frontendChannel),
- m_enabled(false),
- m_recordingCPUProfile(false) {}
+ m_frontend(frontendChannel) {}
V8ProfilerAgentImpl::~V8ProfilerAgentImpl() {
if (m_profiler) m_profiler->Dispose();
@@ -204,8 +202,6 @@ void V8ProfilerAgentImpl::consoleProfileEnd(const String16& title) {
Response V8ProfilerAgentImpl::enable() {
if (m_enabled) return Response::OK();
m_enabled = true;
- DCHECK(!m_profiler);
- m_profiler = v8::CpuProfiler::New(m_isolate);
m_state->setBoolean(ProfilerAgentState::profilerEnabled, true);
return Response::OK();
}
@@ -216,18 +212,18 @@ Response V8ProfilerAgentImpl::disable() {
stopProfiling(m_startedProfiles[i - 1].m_id, false);
m_startedProfiles.clear();
stop(nullptr);
- m_profiler->Dispose();
- m_profiler = nullptr;
+ stopPreciseCoverage();
+ DCHECK(!m_profiler);
m_enabled = false;
m_state->setBoolean(ProfilerAgentState::profilerEnabled, false);
return Response::OK();
}
Response V8ProfilerAgentImpl::setSamplingInterval(int interval) {
- if (m_recordingCPUProfile)
+ if (m_profiler) {
return Response::Error("Cannot change sampling interval when profiling.");
+ }
m_state->setInteger(ProfilerAgentState::samplingInterval, interval);
- m_profiler->SetSamplingInterval(interval);
return Response::OK();
}
@@ -237,14 +233,14 @@ void V8ProfilerAgentImpl::restore() {
return;
m_enabled = true;
DCHECK(!m_profiler);
- m_profiler = v8::CpuProfiler::New(m_isolate);
- int interval = 0;
- m_state->getInteger(ProfilerAgentState::samplingInterval, &interval);
- if (interval) m_profiler->SetSamplingInterval(interval);
if (m_state->booleanProperty(ProfilerAgentState::userInitiatedProfiling,
false)) {
start();
}
+ if (m_state->booleanProperty(ProfilerAgentState::preciseCoverageStarted,
+ false)) {
+ startPreciseCoverage();
+ }
}
Response V8ProfilerAgentImpl::start() {
@@ -259,8 +255,9 @@ Response V8ProfilerAgentImpl::start() {
Response V8ProfilerAgentImpl::stop(
std::unique_ptr<protocol::Profiler::Profile>* profile) {
- if (!m_recordingCPUProfile)
+ if (!m_recordingCPUProfile) {
return Response::Error("No recording profiles found");
+ }
m_recordingCPUProfile = false;
std::unique_ptr<protocol::Profiler::Profile> cpuProfile =
stopProfiling(m_frontendInitiatedProfileId, !!profile);
@@ -273,6 +270,90 @@ Response V8ProfilerAgentImpl::stop(
return Response::OK();
}
+Response V8ProfilerAgentImpl::startPreciseCoverage() {
+ if (!m_enabled) return Response::Error("Profiler is not enabled");
+ m_state->setBoolean(ProfilerAgentState::preciseCoverageStarted, true);
+ v8::debug::Coverage::TogglePrecise(m_isolate, true);
+ return Response::OK();
+}
+
+Response V8ProfilerAgentImpl::stopPreciseCoverage() {
+ if (!m_enabled) return Response::Error("Profiler is not enabled");
+ m_state->setBoolean(ProfilerAgentState::preciseCoverageStarted, false);
+ v8::debug::Coverage::TogglePrecise(m_isolate, false);
+ return Response::OK();
+}
+
+namespace {
+Response takeCoverage(
+ v8::Isolate* isolate, bool reset_count,
+ std::unique_ptr<protocol::Array<protocol::Profiler::ScriptCoverage>>*
+ out_result) {
+ std::unique_ptr<protocol::Array<protocol::Profiler::ScriptCoverage>> result =
+ protocol::Array<protocol::Profiler::ScriptCoverage>::create();
+ v8::HandleScope handle_scope(isolate);
+ v8::debug::Coverage coverage =
+ v8::debug::Coverage::Collect(isolate, reset_count);
+ for (size_t i = 0; i < coverage.ScriptCount(); i++) {
+ v8::debug::Coverage::ScriptData script_data = coverage.GetScriptData(i);
+ v8::Local<v8::debug::Script> script = script_data.GetScript();
+ std::unique_ptr<protocol::Array<protocol::Profiler::FunctionCoverage>>
+ functions =
+ protocol::Array<protocol::Profiler::FunctionCoverage>::create();
+ for (size_t j = 0; j < script_data.FunctionCount(); j++) {
+ v8::debug::Coverage::FunctionData function_data =
+ script_data.GetFunctionData(j);
+ std::unique_ptr<protocol::Array<protocol::Profiler::CoverageRange>>
+ ranges = protocol::Array<protocol::Profiler::CoverageRange>::create();
+ // At this point we only have per-function coverage data, so there is
+ // only one range per function.
+ ranges->addItem(
+ protocol::Profiler::CoverageRange::create()
+ .setStartLineNumber(function_data.Start().GetLineNumber())
+ .setStartColumnNumber(function_data.Start().GetColumnNumber())
+ .setEndLineNumber(function_data.End().GetLineNumber())
+ .setEndColumnNumber(function_data.End().GetColumnNumber())
+ .setCount(function_data.Count())
+ .build());
+ functions->addItem(
+ protocol::Profiler::FunctionCoverage::create()
+ .setFunctionName(toProtocolString(
+ function_data.Name().FromMaybe(v8::Local<v8::String>())))
+ .setRanges(std::move(ranges))
+ .build());
+ }
+ String16 url;
+ v8::Local<v8::String> name;
+ if (script->Name().ToLocal(&name) || script->SourceURL().ToLocal(&name)) {
+ url = toProtocolString(name);
+ }
+ result->addItem(protocol::Profiler::ScriptCoverage::create()
+ .setScriptId(String16::fromInteger(script->Id()))
+ .setUrl(url)
+ .setFunctions(std::move(functions))
+ .build());
+ }
+ *out_result = std::move(result);
+ return Response::OK();
+}
+} // anonymous namespace
+
+Response V8ProfilerAgentImpl::takePreciseCoverage(
+ std::unique_ptr<protocol::Array<protocol::Profiler::ScriptCoverage>>*
+ out_result) {
+ if (!m_state->booleanProperty(ProfilerAgentState::preciseCoverageStarted,
+ false)) {
+ return Response::Error("Precise coverage has not been started.");
+ }
+ return takeCoverage(m_isolate, true, out_result);
+}
+
+Response V8ProfilerAgentImpl::getBestEffortCoverage(
+ std::unique_ptr<protocol::Array<protocol::Profiler::ScriptCoverage>>*
+ out_result) {
+ return takeCoverage(m_isolate, false, out_result);
+}
+
String16 V8ProfilerAgentImpl::nextProfileId() {
return String16::fromInteger(
v8::base::NoBarrier_AtomicIncrement(&s_lastProfileId, 1));
@@ -280,6 +361,15 @@ String16 V8ProfilerAgentImpl::nextProfileId() {
void V8ProfilerAgentImpl::startProfiling(const String16& title) {
v8::HandleScope handleScope(m_isolate);
+ if (!m_startedProfilesCount) {
+ DCHECK(!m_profiler);
+ m_profiler = v8::CpuProfiler::New(m_isolate);
+ m_profiler->SetIdle(m_idle);
+ int interval =
+ m_state->integerProperty(ProfilerAgentState::samplingInterval, 0);
+ if (interval) m_profiler->SetSamplingInterval(interval);
+ }
+ ++m_startedProfilesCount;
m_profiler->StartProfiling(toV8String(m_isolate, title), true);
}
@@ -288,24 +378,28 @@ std::unique_ptr<protocol::Profiler::Profile> V8ProfilerAgentImpl::stopProfiling(
v8::HandleScope handleScope(m_isolate);
v8::CpuProfile* profile =
m_profiler->StopProfiling(toV8String(m_isolate, title));
- if (!profile) return nullptr;
std::unique_ptr<protocol::Profiler::Profile> result;
- if (serialize) result = createCPUProfile(m_isolate, profile);
- profile->Delete();
+ if (profile) {
+ if (serialize) result = createCPUProfile(m_isolate, profile);
+ profile->Delete();
+ }
+ --m_startedProfilesCount;
+ if (!m_startedProfilesCount) {
+ m_profiler->Dispose();
+ m_profiler = nullptr;
+ }
return result;
}
-bool V8ProfilerAgentImpl::isRecording() const {
- return m_recordingCPUProfile || !m_startedProfiles.empty();
-}
-
bool V8ProfilerAgentImpl::idleStarted() {
- if (m_profiler) m_profiler->SetIdle(true);
+ m_idle = true;
+ if (m_profiler) m_profiler->SetIdle(m_idle);
return m_profiler;
}
bool V8ProfilerAgentImpl::idleFinished() {
- if (m_profiler) m_profiler->SetIdle(false);
+ m_idle = false;
+ if (m_profiler) m_profiler->SetIdle(m_idle);
return m_profiler;
}
diff --git a/deps/v8/src/inspector/v8-profiler-agent-impl.h b/deps/v8/src/inspector/v8-profiler-agent-impl.h
index a8441174e0..c60ff862d1 100644
--- a/deps/v8/src/inspector/v8-profiler-agent-impl.h
+++ b/deps/v8/src/inspector/v8-profiler-agent-impl.h
@@ -37,6 +37,15 @@ class V8ProfilerAgentImpl : public protocol::Profiler::Backend {
Response start() override;
Response stop(std::unique_ptr<protocol::Profiler::Profile>*) override;
+ Response startPreciseCoverage() override;
+ Response stopPreciseCoverage() override;
+ Response takePreciseCoverage(
+ std::unique_ptr<protocol::Array<protocol::Profiler::ScriptCoverage>>*
+ out_result) override;
+ Response getBestEffortCoverage(
+ std::unique_ptr<protocol::Array<protocol::Profiler::ScriptCoverage>>*
+ out_result) override;
+
void consoleProfile(const String16& title);
void consoleProfileEnd(const String16& title);
@@ -50,18 +59,18 @@ class V8ProfilerAgentImpl : public protocol::Profiler::Backend {
std::unique_ptr<protocol::Profiler::Profile> stopProfiling(
const String16& title, bool serialize);
- bool isRecording() const;
-
V8InspectorSessionImpl* m_session;
v8::Isolate* m_isolate;
- v8::CpuProfiler* m_profiler;
+ v8::CpuProfiler* m_profiler = nullptr;
protocol::DictionaryValue* m_state;
protocol::Profiler::Frontend m_frontend;
- bool m_enabled;
- bool m_recordingCPUProfile;
+ bool m_enabled = false;
+ bool m_recordingCPUProfile = false;
class ProfileDescriptor;
std::vector<ProfileDescriptor> m_startedProfiles;
String16 m_frontendInitiatedProfileId;
+ bool m_idle = false;
+ int m_startedProfilesCount = 0;
DISALLOW_COPY_AND_ASSIGN(V8ProfilerAgentImpl);
};
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.cc b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
index b40f08ed06..17c8a7b1ac 100644
--- a/deps/v8/src/inspector/v8-runtime-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
@@ -30,6 +30,7 @@
#include "src/inspector/v8-runtime-agent-impl.h"
+#include "src/debug/debug-interface.h"
#include "src/inspector/injected-script.h"
#include "src/inspector/inspected-context.h"
#include "src/inspector/protocol/Protocol.h"
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.cc b/deps/v8/src/inspector/v8-stack-trace-impl.cc
index 962a00a773..7d0edef13f 100644
--- a/deps/v8/src/inspector/v8-stack-trace-impl.cc
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.cc
@@ -9,7 +9,6 @@
#include "src/inspector/v8-debugger.h"
#include "src/inspector/v8-inspector-impl.h"
-#include "include/v8-debug.h"
#include "include/v8-version.h"
namespace v8_inspector {
@@ -141,10 +140,13 @@ std::unique_ptr<V8StackTraceImpl> V8StackTraceImpl::create(
maxAsyncCallChainDepth = 1;
}
- // Only the top stack in the chain may be empty, so ensure that second stack
- // is non-empty (it's the top of appended chain).
- if (asyncCallChain && asyncCallChain->isEmpty())
+ // Only the top stack in the chain may be empty and doesn't contain creation
+ // stack , so ensure that second stack is non-empty (it's the top of appended
+ // chain).
+ if (asyncCallChain && asyncCallChain->isEmpty() &&
+ !asyncCallChain->m_creation) {
asyncCallChain = asyncCallChain->m_parent.get();
+ }
if (stackTrace.IsEmpty() && !asyncCallChain) return nullptr;
@@ -180,9 +182,11 @@ std::unique_ptr<V8StackTraceImpl> V8StackTraceImpl::capture(
std::unique_ptr<V8StackTraceImpl> V8StackTraceImpl::cloneImpl() {
std::vector<Frame> framesCopy(m_frames);
- return std::unique_ptr<V8StackTraceImpl>(
+ std::unique_ptr<V8StackTraceImpl> copy(
new V8StackTraceImpl(m_contextGroupId, m_description, framesCopy,
m_parent ? m_parent->cloneImpl() : nullptr));
+ if (m_creation) copy->setCreation(m_creation->cloneImpl());
+ return copy;
}
std::unique_ptr<V8StackTrace> V8StackTraceImpl::clone() {
@@ -205,6 +209,19 @@ V8StackTraceImpl::V8StackTraceImpl(int contextGroupId,
V8StackTraceImpl::~V8StackTraceImpl() {}
+void V8StackTraceImpl::setCreation(std::unique_ptr<V8StackTraceImpl> creation) {
+ m_creation = std::move(creation);
+ // When async call chain is empty but doesn't contain useful schedule stack
+ // and parent async call chain contains creationg stack but doesn't
+ // synchronous we can merge them together.
+ // e.g. Promise ThenableJob.
+ if (m_parent && isEmpty() && m_description == m_parent->m_description &&
+ !m_parent->m_creation) {
+ m_frames.swap(m_parent->m_frames);
+ m_parent = std::move(m_parent->m_parent);
+ }
+}
+
StringView V8StackTraceImpl::topSourceURL() const {
DCHECK(m_frames.size());
return toStringView(m_frames[0].m_scriptName);
@@ -243,6 +260,10 @@ V8StackTraceImpl::buildInspectorObjectImpl() const {
.build();
if (!m_description.isEmpty()) stackTrace->setDescription(m_description);
if (m_parent) stackTrace->setParent(m_parent->buildInspectorObjectImpl());
+ if (m_creation && m_creation->m_frames.size()) {
+ stackTrace->setPromiseCreationFrame(
+ m_creation->m_frames[0].buildInspectorObject());
+ }
return stackTrace;
}
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.h b/deps/v8/src/inspector/v8-stack-trace-impl.h
index f0a452e939..f8b53d0a65 100644
--- a/deps/v8/src/inspector/v8-stack-trace-impl.h
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.h
@@ -81,6 +81,8 @@ class V8StackTraceImpl final : public V8StackTrace {
const override;
std::unique_ptr<StringBuffer> toString() const override;
+ void setCreation(std::unique_ptr<V8StackTraceImpl> creation);
+
private:
V8StackTraceImpl(int contextGroupId, const String16& description,
std::vector<Frame>& frames,
@@ -90,6 +92,7 @@ class V8StackTraceImpl final : public V8StackTrace {
String16 m_description;
std::vector<Frame> m_frames;
std::unique_ptr<V8StackTraceImpl> m_parent;
+ std::unique_ptr<V8StackTraceImpl> m_creation;
DISALLOW_COPY_AND_ASSIGN(V8StackTraceImpl);
};
diff --git a/deps/v8/src/inspector/wasm-translation.cc b/deps/v8/src/inspector/wasm-translation.cc
index 825341e122..00f1aabbf6 100644
--- a/deps/v8/src/inspector/wasm-translation.cc
+++ b/deps/v8/src/inspector/wasm-translation.cc
@@ -21,11 +21,11 @@ using namespace v8;
class WasmTranslation::TranslatorImpl {
public:
struct TransLocation {
- WasmTranslation *translation;
+ WasmTranslation* translation;
String16 script_id;
int line;
int column;
- TransLocation(WasmTranslation *translation, String16 script_id, int line,
+ TransLocation(WasmTranslation* translation, String16 script_id, int line,
int column)
: translation(translation),
script_id(script_id),
@@ -33,8 +33,10 @@ class WasmTranslation::TranslatorImpl {
column(column) {}
};
- virtual void Translate(TransLocation *loc) = 0;
- virtual void TranslateBack(TransLocation *loc) = 0;
+ virtual void Init(Isolate*, WasmTranslation*, V8DebuggerAgentImpl*) = 0;
+ virtual void Translate(TransLocation*) = 0;
+ virtual void TranslateBack(TransLocation*) = 0;
+ virtual ~TranslatorImpl() {}
class RawTranslator;
class DisassemblingTranslator;
@@ -43,8 +45,9 @@ class WasmTranslation::TranslatorImpl {
class WasmTranslation::TranslatorImpl::RawTranslator
: public WasmTranslation::TranslatorImpl {
public:
- void Translate(TransLocation *loc) {}
- void TranslateBack(TransLocation *loc) {}
+ void Init(Isolate*, WasmTranslation*, V8DebuggerAgentImpl*) {}
+ void Translate(TransLocation*) {}
+ void TranslateBack(TransLocation*) {}
};
class WasmTranslation::TranslatorImpl::DisassemblingTranslator
@@ -52,11 +55,13 @@ class WasmTranslation::TranslatorImpl::DisassemblingTranslator
using OffsetTable = debug::WasmDisassembly::OffsetTable;
public:
- DisassemblingTranslator(Isolate *isolate, Local<debug::WasmScript> script,
- WasmTranslation *translation,
- V8DebuggerAgentImpl *agent)
- : script_(isolate, script) {
+ DisassemblingTranslator(Isolate* isolate, Local<debug::WasmScript> script)
+ : script_(isolate, script) {}
+
+ void Init(Isolate* isolate, WasmTranslation* translation,
+ V8DebuggerAgentImpl* agent) override {
// Register fake scripts for each function in this wasm module/script.
+ Local<debug::WasmScript> script = script_.Get(isolate);
int num_functions = script->NumFunctions();
int num_imported_functions = script->NumImportedFunctions();
DCHECK_LE(0, num_imported_functions);
@@ -69,8 +74,8 @@ class WasmTranslation::TranslatorImpl::DisassemblingTranslator
}
}
- void Translate(TransLocation *loc) {
- const OffsetTable &offset_table = GetOffsetTable(loc);
+ void Translate(TransLocation* loc) override {
+ const OffsetTable& offset_table = GetOffsetTable(loc);
DCHECK(!offset_table.empty());
uint32_t byte_offset = static_cast<uint32_t>(loc->column);
@@ -96,18 +101,19 @@ class WasmTranslation::TranslatorImpl::DisassemblingTranslator
}
}
- void TranslateBack(TransLocation *loc) {
+ void TranslateBack(TransLocation* loc) override {
int func_index = GetFunctionIndexFromFakeScriptId(loc->script_id);
- const OffsetTable *reverse_table = GetReverseTable(func_index);
+ const OffsetTable* reverse_table = GetReverseTable(func_index);
if (!reverse_table) return;
DCHECK(!reverse_table->empty());
+ v8::Isolate* isolate = loc->translation->isolate_;
// Binary search for the given line and column.
unsigned left = 0; // inclusive
unsigned right = static_cast<unsigned>(reverse_table->size()); // exclusive
while (right - left > 1) {
unsigned mid = (left + right) / 2;
- auto &entry = (*reverse_table)[mid];
+ auto& entry = (*reverse_table)[mid];
if (entry.line < loc->line ||
(entry.line == loc->line && entry.column <= loc->column)) {
left = mid;
@@ -119,22 +125,31 @@ class WasmTranslation::TranslatorImpl::DisassemblingTranslator
int found_byte_offset = 0;
// If we found an exact match, use it. Otherwise check whether the next
// bigger entry is still in the same line. Report that one then.
+ // Otherwise we might have hit the special case of pointing after the last
+ // line, which is translated to the end of the function (one byte after the
+ // last function byte).
if ((*reverse_table)[left].line == loc->line &&
(*reverse_table)[left].column == loc->column) {
found_byte_offset = (*reverse_table)[left].byte_offset;
} else if (left + 1 < reverse_table->size() &&
(*reverse_table)[left + 1].line == loc->line) {
found_byte_offset = (*reverse_table)[left + 1].byte_offset;
+ } else if (left == reverse_table->size() - 1 &&
+ (*reverse_table)[left].line == loc->line - 1 &&
+ loc->column == 0) {
+ std::pair<int, int> func_range =
+ script_.Get(isolate)->GetFunctionRange(func_index);
+ DCHECK_LE(func_range.first, func_range.second);
+ found_byte_offset = func_range.second - func_range.first;
}
- v8::Isolate *isolate = loc->translation->isolate_;
loc->script_id = String16::fromInteger(script_.Get(isolate)->Id());
loc->line = func_index;
loc->column = found_byte_offset;
}
private:
- String16 GetFakeScriptUrl(v8::Isolate *isolate, int func_index) {
+ String16 GetFakeScriptUrl(v8::Isolate* isolate, int func_index) {
Local<debug::WasmScript> script = script_.Get(isolate);
String16 script_name = toProtocolString(script->Name().ToLocalChecked());
int numFunctions = script->NumFunctions();
@@ -157,13 +172,13 @@ class WasmTranslation::TranslatorImpl::DisassemblingTranslator
String16 GetFakeScriptId(const String16 script_id, int func_index) {
return String16::concat(script_id, '-', String16::fromInteger(func_index));
}
- String16 GetFakeScriptId(const TransLocation *loc) {
+ String16 GetFakeScriptId(const TransLocation* loc) {
return GetFakeScriptId(loc->script_id, loc->line);
}
- void AddFakeScript(v8::Isolate *isolate, const String16 &underlyingScriptId,
- int func_idx, WasmTranslation *translation,
- V8DebuggerAgentImpl *agent) {
+ void AddFakeScript(v8::Isolate* isolate, const String16& underlyingScriptId,
+ int func_idx, WasmTranslation* translation,
+ V8DebuggerAgentImpl* agent) {
String16 fake_script_id = GetFakeScriptId(underlyingScriptId, func_idx);
String16 fake_script_url = GetFakeScriptUrl(isolate, func_idx);
@@ -177,14 +192,15 @@ class WasmTranslation::TranslatorImpl::DisassemblingTranslator
String16 source(disassembly.disassembly.data(),
disassembly.disassembly.length());
std::unique_ptr<V8DebuggerScript> fake_script =
- V8DebuggerScript::CreateWasm(isolate, script, fake_script_id,
- std::move(fake_script_url), source);
+ V8DebuggerScript::CreateWasm(isolate, translation, script,
+ fake_script_id, std::move(fake_script_url),
+ source);
translation->AddFakeScript(fake_script->scriptId(), this);
agent->didParseSource(std::move(fake_script), true);
}
- int GetFunctionIndexFromFakeScriptId(const String16 &fake_script_id) {
+ int GetFunctionIndexFromFakeScriptId(const String16& fake_script_id) {
size_t last_dash_pos = fake_script_id.reverseFind('-');
DCHECK_GT(fake_script_id.length(), last_dash_pos);
bool ok = true;
@@ -193,7 +209,7 @@ class WasmTranslation::TranslatorImpl::DisassemblingTranslator
return func_index;
}
- const OffsetTable &GetOffsetTable(const TransLocation *loc) {
+ const OffsetTable& GetOffsetTable(const TransLocation* loc) {
int func_index = loc->line;
auto it = offset_tables_.find(func_index);
// TODO(clemensh): Once we load disassembly lazily, the offset table
@@ -202,7 +218,7 @@ class WasmTranslation::TranslatorImpl::DisassemblingTranslator
return it->second;
}
- const OffsetTable *GetReverseTable(int func_index) {
+ const OffsetTable* GetReverseTable(int func_index) {
auto it = reverse_tables_.find(func_index);
if (it != reverse_tables_.end()) return &it->second;
@@ -233,27 +249,29 @@ class WasmTranslation::TranslatorImpl::DisassemblingTranslator
std::unordered_map<int, const OffsetTable> reverse_tables_;
};
-WasmTranslation::WasmTranslation(v8::Isolate *isolate)
+WasmTranslation::WasmTranslation(v8::Isolate* isolate)
: isolate_(isolate), mode_(Disassemble) {}
WasmTranslation::~WasmTranslation() { Clear(); }
void WasmTranslation::AddScript(Local<debug::WasmScript> script,
- V8DebuggerAgentImpl *agent) {
- int script_id = script->Id();
- DCHECK_EQ(0, wasm_translators_.count(script_id));
+ V8DebuggerAgentImpl* agent) {
std::unique_ptr<TranslatorImpl> impl;
switch (mode_) {
case Raw:
impl.reset(new TranslatorImpl::RawTranslator());
break;
case Disassemble:
- impl.reset(new TranslatorImpl::DisassemblingTranslator(isolate_, script,
- this, agent));
+ impl.reset(new TranslatorImpl::DisassemblingTranslator(isolate_, script));
break;
}
DCHECK(impl);
- wasm_translators_.insert(std::make_pair(script_id, std::move(impl)));
+ auto inserted =
+ wasm_translators_.insert(std::make_pair(script->Id(), std::move(impl)));
+ // Check that no mapping for this script id existed before.
+ DCHECK(inserted.second);
+ // impl has been moved, use the returned iterator to call Init.
+ inserted.first->second->Init(isolate_, this, agent);
}
void WasmTranslation::Clear() {
@@ -263,7 +281,7 @@ void WasmTranslation::Clear() {
// Translation "forward" (to artificial scripts).
bool WasmTranslation::TranslateWasmScriptLocationToProtocolLocation(
- String16 *script_id, int *line_number, int *column_number) {
+ String16* script_id, int* line_number, int* column_number) {
DCHECK(script_id && line_number && column_number);
bool ok = true;
int script_id_int = script_id->toInteger(&ok);
@@ -271,7 +289,7 @@ bool WasmTranslation::TranslateWasmScriptLocationToProtocolLocation(
auto it = wasm_translators_.find(script_id_int);
if (it == wasm_translators_.end()) return false;
- TranslatorImpl *translator = it->second.get();
+ TranslatorImpl* translator = it->second.get();
TranslatorImpl::TransLocation trans_loc(this, std::move(*script_id),
*line_number, *column_number);
@@ -286,10 +304,10 @@ bool WasmTranslation::TranslateWasmScriptLocationToProtocolLocation(
// Translation "backward" (from artificial to real scripts).
bool WasmTranslation::TranslateProtocolLocationToWasmScriptLocation(
- String16 *script_id, int *line_number, int *column_number) {
+ String16* script_id, int* line_number, int* column_number) {
auto it = fake_scripts_.find(*script_id);
if (it == fake_scripts_.end()) return false;
- TranslatorImpl *translator = it->second;
+ TranslatorImpl* translator = it->second;
TranslatorImpl::TransLocation trans_loc(this, std::move(*script_id),
*line_number, *column_number);
@@ -302,8 +320,8 @@ bool WasmTranslation::TranslateProtocolLocationToWasmScriptLocation(
return true;
}
-void WasmTranslation::AddFakeScript(const String16 &scriptId,
- TranslatorImpl *translator) {
+void WasmTranslation::AddFakeScript(const String16& scriptId,
+ TranslatorImpl* translator) {
DCHECK_EQ(0, fake_scripts_.count(scriptId));
fake_scripts_.insert(std::make_pair(scriptId, translator));
}
diff --git a/deps/v8/src/interface-descriptors.cc b/deps/v8/src/interface-descriptors.cc
index 26b6422f6b..d77b137615 100644
--- a/deps/v8/src/interface-descriptors.cc
+++ b/deps/v8/src/interface-descriptors.cc
@@ -88,6 +88,16 @@ const Register FastNewObjectDescriptor::NewTargetRegister() {
return kJavaScriptCallNewTargetRegister;
}
+void FastNewArgumentsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {TargetRegister()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+const Register FastNewArgumentsDescriptor::TargetRegister() {
+ return kJSFunctionRegister;
+}
+
void LoadDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// kReceiver, kName, kSlot
@@ -433,6 +443,15 @@ void CallTrampolineDescriptor::InitializePlatformIndependent(
machine_types);
}
+void CallForwardVarargsDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kTarget, kStartIndex
+ MachineType machine_types[] = {MachineType::AnyTagged(),
+ MachineType::Int32()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
+}
+
void ConstructStubDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// kFunction, kNewTarget, kActualArgumentsCount, kAllocationSite
@@ -452,21 +471,21 @@ void ConstructTrampolineDescriptor::InitializePlatformIndependent(
machine_types);
}
-void CallFunctionWithFeedbackDescriptor::InitializePlatformIndependent(
+void CallICDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
- // kFunction, kSlot
- MachineType machine_types[] = {MachineType::AnyTagged(),
- MachineType::TaggedSigned()};
+ // kTarget, kActualArgumentsCount, kSlot, kVector
+ MachineType machine_types[] = {MachineType::AnyTagged(), MachineType::Int32(),
+ MachineType::Int32(),
+ MachineType::AnyTagged()};
data->InitializePlatformIndependent(arraysize(machine_types), 0,
machine_types);
}
-void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformIndependent(
+void CallICTrampolineDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
- // kFunction, kActualArgumentsCount, kSlot, kVector
- MachineType machine_types[] = {
- MachineType::TaggedPointer(), MachineType::Int32(),
- MachineType::TaggedSigned(), MachineType::AnyTagged()};
+ // kTarget, kActualArgumentsCount, kSlot
+ MachineType machine_types[] = {MachineType::AnyTagged(), MachineType::Int32(),
+ MachineType::Int32()};
data->InitializePlatformIndependent(arraysize(machine_types), 0,
machine_types);
}
@@ -498,6 +517,16 @@ const Register BuiltinDescriptor::TargetRegister() {
return kJSFunctionRegister;
}
+void ArrayConstructorDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite
+ MachineType machine_types[] = {MachineType::AnyTagged(),
+ MachineType::AnyTagged(), MachineType::Int32(),
+ MachineType::AnyTagged()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
+}
+
void ArrayNoArgumentConstructorDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// kFunction, kAllocationSite, kActualArgumentsCount, kFunctionParameter
@@ -522,9 +551,8 @@ void ArraySingleArgumentConstructorDescriptor::InitializePlatformIndependent(
void ArrayNArgumentsConstructorDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// kFunction, kAllocationSite, kActualArgumentsCount
- MachineType machine_types[] = {MachineType::TaggedPointer(),
- MachineType::AnyTagged(),
- MachineType::Int32()};
+ MachineType machine_types[] = {
+ MachineType::AnyTagged(), MachineType::AnyTagged(), MachineType::Int32()};
data->InitializePlatformIndependent(arraysize(machine_types), 0,
machine_types);
}
@@ -554,7 +582,7 @@ void InterpreterDispatchDescriptor::InitializePlatformIndependent(
// kAccumulator, kBytecodeOffset, kBytecodeArray, kDispatchTable
MachineType machine_types[] = {
MachineType::AnyTagged(), MachineType::IntPtr(), MachineType::AnyTagged(),
- MachineType::AnyTagged()};
+ MachineType::IntPtr()};
data->InitializePlatformIndependent(arraysize(machine_types), 0,
machine_types);
}
@@ -598,5 +626,13 @@ void InterpreterCEntryDescriptor::InitializePlatformIndependent(
machine_types);
}
+void FrameDropperTrampolineDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // New FP value.
+ MachineType machine_types[] = {MachineType::Pointer()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interface-descriptors.h b/deps/v8/src/interface-descriptors.h
index 1d1b48af93..4cf5c29d6c 100644
--- a/deps/v8/src/interface-descriptors.h
+++ b/deps/v8/src/interface-descriptors.h
@@ -33,9 +33,7 @@ class PlatformInterfaceDescriptor;
V(FastNewClosure) \
V(FastNewFunctionContext) \
V(FastNewObject) \
- V(FastNewRestParameter) \
- V(FastNewSloppyArguments) \
- V(FastNewStrictArguments) \
+ V(FastNewArguments) \
V(TypeConversion) \
V(Typeof) \
V(FastCloneRegExp) \
@@ -44,27 +42,22 @@ class PlatformInterfaceDescriptor;
V(CreateAllocationSite) \
V(CreateWeakCell) \
V(CallFunction) \
- V(CallFunctionWithFeedback) \
- V(CallFunctionWithFeedbackAndVector) \
+ V(CallIC) \
+ V(CallICTrampoline) \
+ V(CallForwardVarargs) \
V(CallConstruct) \
V(CallTrampoline) \
V(ConstructStub) \
V(ConstructTrampoline) \
V(RegExpExec) \
+ V(RegExpReplace) \
+ V(RegExpSplit) \
V(CopyFastSmiOrObjectElements) \
V(TransitionElementsKind) \
V(AllocateHeapNumber) \
- V(AllocateFloat32x4) \
- V(AllocateInt32x4) \
- V(AllocateUint32x4) \
- V(AllocateBool32x4) \
- V(AllocateInt16x8) \
- V(AllocateUint16x8) \
- V(AllocateBool16x8) \
- V(AllocateInt8x16) \
- V(AllocateUint8x16) \
- V(AllocateBool8x16) \
V(Builtin) \
+ V(ArrayConstructor) \
+ V(ForEach) \
V(ArrayNoArgumentConstructor) \
V(ArraySingleArgumentConstructor) \
V(ArrayNArgumentsConstructor) \
@@ -77,12 +70,15 @@ class PlatformInterfaceDescriptor;
V(StringCharAt) \
V(StringCharCodeAt) \
V(StringCompare) \
+ V(StringIndexOf) \
V(SubString) \
V(Keyed) \
V(Named) \
V(CreateIterResultObject) \
V(HasProperty) \
V(ForInFilter) \
+ V(ForInNext) \
+ V(ForInPrepare) \
V(GetProperty) \
V(CallHandler) \
V(ArgumentAdaptor) \
@@ -98,7 +94,9 @@ class PlatformInterfaceDescriptor;
V(InterpreterPushArgsAndConstructArray) \
V(InterpreterCEntry) \
V(ResumeGenerator) \
- V(PromiseHandleReject)
+ V(FrameDropperTrampoline) \
+ V(PromiseHandleReject) \
+ V(WasmRuntimeCall)
class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
public:
@@ -288,6 +286,41 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptor {
kContext = kParameterCount /* implicit parameter */ \
};
+#define DECLARE_BUILTIN_DESCRIPTOR(name) \
+ DECLARE_DESCRIPTOR_WITH_BASE(name, BuiltinDescriptor) \
+ protected: \
+ void InitializePlatformIndependent(CallInterfaceDescriptorData* data) \
+ override { \
+ MachineType machine_types[] = {MachineType::AnyTagged(), \
+ MachineType::AnyTagged(), \
+ MachineType::Int32()}; \
+ int argc = kStackParameterCount + 1 - arraysize(machine_types); \
+ data->InitializePlatformIndependent(arraysize(machine_types), argc, \
+ machine_types); \
+ } \
+ void InitializePlatformSpecific(CallInterfaceDescriptorData* data) \
+ override { \
+ Register registers[] = {TargetRegister(), NewTargetRegister(), \
+ ArgumentsCountRegister()}; \
+ data->InitializePlatformSpecific(arraysize(registers), registers); \
+ } \
+ \
+ public:
+
+#define DEFINE_BUILTIN_PARAMETERS(...) \
+ enum ParameterIndices { \
+ kReceiver, \
+ kBeforeFirstStackParameter = kReceiver, \
+ __VA_ARGS__, \
+ kAfterLastStackParameter, \
+ kNewTarget = kAfterLastStackParameter, \
+ kArgumentsCount, \
+ kContext, /* implicit parameter */ \
+ kParameterCount = kContext, \
+ kStackParameterCount = \
+ kAfterLastStackParameter - kBeforeFirstStackParameter - 1, \
+ };
+
class VoidDescriptor : public CallInterfaceDescriptor {
public:
DECLARE_DESCRIPTOR(VoidDescriptor, CallInterfaceDescriptor)
@@ -455,21 +488,11 @@ class FastNewObjectDescriptor : public CallInterfaceDescriptor {
static const Register NewTargetRegister();
};
-class FastNewRestParameterDescriptor : public CallInterfaceDescriptor {
- public:
- DECLARE_DESCRIPTOR(FastNewRestParameterDescriptor, CallInterfaceDescriptor)
-};
-
-class FastNewSloppyArgumentsDescriptor : public CallInterfaceDescriptor {
- public:
- DECLARE_DESCRIPTOR(FastNewSloppyArgumentsDescriptor,
- CallInterfaceDescriptor)
-};
-
-class FastNewStrictArgumentsDescriptor : public CallInterfaceDescriptor {
+class FastNewArgumentsDescriptor : public CallInterfaceDescriptor {
public:
- DECLARE_DESCRIPTOR(FastNewStrictArgumentsDescriptor,
- CallInterfaceDescriptor)
+ DEFINE_PARAMETERS(kFunction)
+ DECLARE_DESCRIPTOR(FastNewArgumentsDescriptor, CallInterfaceDescriptor)
+ static const Register TargetRegister();
};
class TypeConversionDescriptor final : public CallInterfaceDescriptor {
@@ -501,6 +524,20 @@ class ForInFilterDescriptor final : public CallInterfaceDescriptor {
kParameterCount)
};
+class ForInNextDescriptor final : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kObject, kCacheArray, kCacheType, kIndex)
+ DECLARE_DEFAULT_DESCRIPTOR(ForInNextDescriptor, CallInterfaceDescriptor,
+ kParameterCount)
+};
+
+class ForInPrepareDescriptor final : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kObject)
+ DECLARE_DEFAULT_DESCRIPTOR(ForInPrepareDescriptor, CallInterfaceDescriptor,
+ kParameterCount)
+};
+
class GetPropertyDescriptor final : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kObject, kKey)
@@ -560,6 +597,12 @@ class CallTrampolineDescriptor : public CallInterfaceDescriptor {
CallInterfaceDescriptor)
};
+class CallForwardVarargsDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kTarget, kStartIndex)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(CallForwardVarargsDescriptor,
+ CallInterfaceDescriptor)
+};
class ConstructStubDescriptor : public CallInterfaceDescriptor {
public:
@@ -583,24 +626,20 @@ class CallFunctionDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(CallFunctionDescriptor, CallInterfaceDescriptor)
};
-
-class CallFunctionWithFeedbackDescriptor : public CallInterfaceDescriptor {
+class CallICDescriptor : public CallInterfaceDescriptor {
public:
- DEFINE_PARAMETERS(kFunction, kSlot)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
- CallFunctionWithFeedbackDescriptor, CallInterfaceDescriptor)
+ DEFINE_PARAMETERS(kTarget, kActualArgumentsCount, kSlot, kVector)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(CallICDescriptor,
+ CallInterfaceDescriptor)
};
-
-class CallFunctionWithFeedbackAndVectorDescriptor
- : public CallInterfaceDescriptor {
+class CallICTrampolineDescriptor : public CallInterfaceDescriptor {
public:
- DEFINE_PARAMETERS(kFunction, kActualArgumentsCount, kSlot, kVector)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
- CallFunctionWithFeedbackAndVectorDescriptor, CallInterfaceDescriptor)
+ DEFINE_PARAMETERS(kTarget, kActualArgumentsCount, kSlot)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(CallICTrampolineDescriptor,
+ CallInterfaceDescriptor)
};
-
class CallConstructDescriptor : public CallInterfaceDescriptor {
public:
DECLARE_DESCRIPTOR(CallConstructDescriptor, CallInterfaceDescriptor)
@@ -613,6 +652,20 @@ class RegExpExecDescriptor : public CallInterfaceDescriptor {
CallInterfaceDescriptor)
};
+class RegExpReplaceDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kReceiver, kString, kReplaceValue)
+ DECLARE_DEFAULT_DESCRIPTOR(RegExpReplaceDescriptor, CallInterfaceDescriptor,
+ kParameterCount)
+};
+
+class RegExpSplitDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kReceiver, kString, kLimit)
+ DECLARE_DEFAULT_DESCRIPTOR(RegExpSplitDescriptor, CallInterfaceDescriptor,
+ kParameterCount)
+};
+
class CopyFastSmiOrObjectElementsDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kObject)
@@ -632,14 +685,6 @@ class AllocateHeapNumberDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(AllocateHeapNumberDescriptor, CallInterfaceDescriptor)
};
-#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
- class Allocate##Type##Descriptor : public CallInterfaceDescriptor { \
- public: \
- DECLARE_DESCRIPTOR(Allocate##Type##Descriptor, CallInterfaceDescriptor) \
- };
-SIMD128_TYPES(SIMD128_ALLOC_DESC)
-#undef SIMD128_ALLOC_DESC
-
class BuiltinDescriptor : public CallInterfaceDescriptor {
public:
// TODO(ishell): Where is kFunction??
@@ -651,6 +696,19 @@ class BuiltinDescriptor : public CallInterfaceDescriptor {
static const Register TargetRegister();
};
+class ForEachDescriptor : public BuiltinDescriptor {
+ public:
+ DEFINE_BUILTIN_PARAMETERS(kCallback, kThisArg)
+ DECLARE_BUILTIN_DESCRIPTOR(ForEachDescriptor)
+};
+
+class ArrayConstructorDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(ArrayConstructorDescriptor,
+ CallInterfaceDescriptor)
+};
+
class ArrayNoArgumentConstructorDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kFunction, kAllocationSite, kActualArgumentsCount,
@@ -745,6 +803,13 @@ class SubStringDescriptor : public CallInterfaceDescriptor {
CallInterfaceDescriptor)
};
+class StringIndexOfDescriptor final : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kReceiver, kSearchString, kPosition)
+ DECLARE_DEFAULT_DESCRIPTOR(StringIndexOfDescriptor, CallInterfaceDescriptor,
+ kParameterCount)
+};
+
// TODO(ishell): not used, remove.
class KeyedDescriptor : public CallInterfaceDescriptor {
public:
@@ -876,6 +941,11 @@ class ResumeGeneratorDescriptor final : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(ResumeGeneratorDescriptor, CallInterfaceDescriptor)
};
+class FrameDropperTrampolineDescriptor final : public CallInterfaceDescriptor {
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(FrameDropperTrampolineDescriptor,
+ CallInterfaceDescriptor)
+};
+
class PromiseHandleRejectDescriptor final : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kPromise, kOnReject, kException)
@@ -883,6 +953,12 @@ class PromiseHandleRejectDescriptor final : public CallInterfaceDescriptor {
CallInterfaceDescriptor, kParameterCount)
};
+class WasmRuntimeCallDescriptor final : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DEFAULT_DESCRIPTOR(WasmRuntimeCallDescriptor, CallInterfaceDescriptor,
+ 0)
+};
+
#undef DECLARE_DESCRIPTOR_WITH_BASE
#undef DECLARE_DESCRIPTOR
#undef DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.cc b/deps/v8/src/interpreter/bytecode-array-accessor.cc
index 8e6a732861..cc6777588a 100644
--- a/deps/v8/src/interpreter/bytecode-array-accessor.cc
+++ b/deps/v8/src/interpreter/bytecode-array-accessor.cc
@@ -178,7 +178,10 @@ Handle<Object> BytecodeArrayAccessor::GetConstantForIndexOperand(
int BytecodeArrayAccessor::GetJumpTargetOffset() const {
Bytecode bytecode = current_bytecode();
if (interpreter::Bytecodes::IsJumpImmediate(bytecode)) {
- int relative_offset = GetImmediateOperand(0);
+ int relative_offset = GetUnsignedImmediateOperand(0);
+ if (bytecode == Bytecode::kJumpLoop) {
+ relative_offset = -relative_offset;
+ }
return current_offset() + relative_offset + current_prefix_offset();
} else if (interpreter::Bytecodes::IsJumpConstant(bytecode)) {
Smi* smi = Smi::cast(*GetConstantForIndexOperand(0));
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc
index 58d7d6df41..c327fb7cd8 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.cc
+++ b/deps/v8/src/interpreter/bytecode-array-builder.cc
@@ -11,6 +11,7 @@
#include "src/interpreter/bytecode-peephole-optimizer.h"
#include "src/interpreter/bytecode-register-optimizer.h"
#include "src/interpreter/interpreter-intrinsics.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -21,8 +22,9 @@ BytecodeArrayBuilder::BytecodeArrayBuilder(
int locals_count, FunctionLiteral* literal,
SourcePositionTableBuilder::RecordingMode source_position_mode)
: zone_(zone),
+ literal_(literal),
bytecode_generated_(false),
- constant_array_builder_(zone, isolate->factory()->the_hole_value()),
+ constant_array_builder_(zone),
handler_table_builder_(zone),
return_seen_in_block_(false),
parameter_count_(parameter_count),
@@ -69,6 +71,12 @@ Register BytecodeArrayBuilder::Parameter(int parameter_index) const {
return Register::FromParameterIndex(parameter_index, parameter_count());
}
+Register BytecodeArrayBuilder::Local(int index) const {
+ // TODO(marja): Make a DCHECK once crbug.com/706234 is fixed.
+ CHECK_LT(index, locals_count());
+ return Register(index);
+}
+
Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray(Isolate* isolate) {
DCHECK(return_seen_in_block_);
DCHECK(!bytecode_generated_);
@@ -382,12 +390,54 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(Handle<Object> object) {
- size_t entry = GetConstantPoolEntry(object);
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(
+ const AstRawString* raw_string) {
+ size_t entry = GetConstantPoolEntry(raw_string);
+ OutputLdaConstant(entry);
+ return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(const Scope* scope) {
+ size_t entry = GetConstantPoolEntry(scope);
OutputLdaConstant(entry);
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(
+ const AstValue* ast_value) {
+ if (ast_value->IsSmi()) {
+ return LoadLiteral(ast_value->AsSmi());
+ } else if (ast_value->IsUndefined()) {
+ return LoadUndefined();
+ } else if (ast_value->IsTrue()) {
+ return LoadTrue();
+ } else if (ast_value->IsFalse()) {
+ return LoadFalse();
+ } else if (ast_value->IsNull()) {
+ return LoadNull();
+ } else if (ast_value->IsTheHole()) {
+ return LoadTheHole();
+ } else if (ast_value->IsString()) {
+ return LoadLiteral(ast_value->AsString());
+ } else if (ast_value->IsHeapNumber()) {
+ size_t entry = GetConstantPoolEntry(ast_value);
+ OutputLdaConstant(entry);
+ return *this;
+ } else {
+ // This should be the only ast value type left.
+ DCHECK(ast_value->IsSymbol());
+ size_t entry;
+ switch (ast_value->AsSymbol()) {
+ case AstSymbol::kHomeObjectSymbol:
+ entry = HomeObjectSymbolConstantPoolEntry();
+ break;
+ // No default case so that we get a warning if AstSymbol changes
+ }
+ OutputLdaConstant(entry);
+ return *this;
+ }
+}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadUndefined() {
OutputLdaUndefined();
return *this;
@@ -444,9 +494,18 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::MoveRegister(Register from,
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::LoadGlobal(
- const Handle<String> name, int feedback_slot, TypeofMode typeof_mode) {
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadGlobal(const AstRawString* name,
+ int feedback_slot,
+ TypeofMode typeof_mode) {
size_t name_index = GetConstantPoolEntry(name);
+ // Ensure that typeof mode is in sync with the IC slot kind if the function
+ // literal is available (not a unit test case).
+ // TODO(ishell): check only in debug mode.
+ if (literal_) {
+ FeedbackSlot slot = FeedbackVector::ToSlot(feedback_slot);
+ CHECK_EQ(GetTypeofModeFromSlotKind(feedback_vector_spec()->GetKind(slot)),
+ typeof_mode);
+ }
if (typeof_mode == INSIDE_TYPEOF) {
OutputLdaGlobalInsideTypeof(name_index, feedback_slot);
} else {
@@ -457,7 +516,7 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadGlobal(
}
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreGlobal(
- const Handle<String> name, int feedback_slot, LanguageMode language_mode) {
+ const AstRawString* name, int feedback_slot, LanguageMode language_mode) {
size_t name_index = GetConstantPoolEntry(name);
if (language_mode == SLOPPY) {
OutputStaGlobalSloppy(name_index, feedback_slot);
@@ -468,12 +527,20 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreGlobal(
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::LoadContextSlot(Register context,
- int slot_index,
- int depth) {
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadContextSlot(
+ Register context, int slot_index, int depth,
+ ContextSlotMutability mutability) {
if (context.is_current_context() && depth == 0) {
- OutputLdaCurrentContextSlot(slot_index);
+ if (mutability == kImmutableSlot) {
+ OutputLdaImmutableCurrentContextSlot(slot_index);
+ } else {
+ DCHECK_EQ(kMutableSlot, mutability);
+ OutputLdaCurrentContextSlot(slot_index);
+ }
+ } else if (mutability == kImmutableSlot) {
+ OutputLdaImmutableContextSlot(context, slot_index, depth);
} else {
+ DCHECK_EQ(mutability, kMutableSlot);
OutputLdaContextSlot(context, slot_index, depth);
}
return *this;
@@ -491,7 +558,7 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreContextSlot(Register context,
}
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLookupSlot(
- const Handle<String> name, TypeofMode typeof_mode) {
+ const AstRawString* name, TypeofMode typeof_mode) {
size_t name_index = GetConstantPoolEntry(name);
if (typeof_mode == INSIDE_TYPEOF) {
OutputLdaLookupSlotInsideTypeof(name_index);
@@ -503,7 +570,7 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLookupSlot(
}
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLookupContextSlot(
- const Handle<String> name, TypeofMode typeof_mode, int slot_index,
+ const AstRawString* name, TypeofMode typeof_mode, int slot_index,
int depth) {
size_t name_index = GetConstantPoolEntry(name);
if (typeof_mode == INSIDE_TYPEOF) {
@@ -516,7 +583,7 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLookupContextSlot(
}
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLookupGlobalSlot(
- const Handle<String> name, TypeofMode typeof_mode, int feedback_slot,
+ const AstRawString* name, TypeofMode typeof_mode, int feedback_slot,
int depth) {
size_t name_index = GetConstantPoolEntry(name);
if (typeof_mode == INSIDE_TYPEOF) {
@@ -529,7 +596,7 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLookupGlobalSlot(
}
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreLookupSlot(
- const Handle<String> name, LanguageMode language_mode) {
+ const AstRawString* name, LanguageMode language_mode) {
size_t name_index = GetConstantPoolEntry(name);
if (language_mode == SLOPPY) {
OutputStaLookupSlotSloppy(name_index);
@@ -541,7 +608,7 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreLookupSlot(
}
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadNamedProperty(
- Register object, const Handle<Name> name, int feedback_slot) {
+ Register object, const AstRawString* name, int feedback_slot) {
size_t name_index = GetConstantPoolEntry(name);
OutputLdaNamedProperty(object, name_index, feedback_slot);
return *this;
@@ -553,6 +620,20 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadKeyedProperty(
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadIteratorProperty(
+ Register object, int feedback_slot) {
+ size_t name_index = IteratorSymbolConstantPoolEntry();
+ OutputLdaNamedProperty(object, name_index, feedback_slot);
+ return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadAsyncIteratorProperty(
+ Register object, int feedback_slot) {
+ size_t name_index = AsyncIteratorSymbolConstantPoolEntry();
+ OutputLdaNamedProperty(object, name_index, feedback_slot);
+ return *this;
+}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreDataPropertyInLiteral(
Register object, Register name, DataPropertyInLiteralFlags flags,
int feedback_slot) {
@@ -561,9 +642,16 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreDataPropertyInLiteral(
}
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedProperty(
- Register object, const Handle<Name> name, int feedback_slot,
+ Register object, size_t name_index, int feedback_slot,
LanguageMode language_mode) {
- size_t name_index = GetConstantPoolEntry(name);
+ // Ensure that language mode is in sync with the IC slot kind if the function
+ // literal is available (not a unit test case).
+ // TODO(ishell): check only in debug mode.
+ if (literal_) {
+ FeedbackSlot slot = FeedbackVector::ToSlot(feedback_slot);
+ CHECK_EQ(GetLanguageModeFromSlotKind(feedback_vector_spec()->GetKind(slot)),
+ language_mode);
+ }
if (language_mode == SLOPPY) {
OutputStaNamedPropertySloppy(object, name_index, feedback_slot);
} else {
@@ -573,9 +661,39 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedProperty(
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedProperty(
+ Register object, const AstRawString* name, int feedback_slot,
+ LanguageMode language_mode) {
+ size_t name_index = GetConstantPoolEntry(name);
+ return StoreNamedProperty(object, name_index, feedback_slot, language_mode);
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedOwnProperty(
+ Register object, const AstRawString* name, int feedback_slot) {
+ size_t name_index = GetConstantPoolEntry(name);
+ // Ensure that the store operation is in sync with the IC slot kind if
+ // the function literal is available (not a unit test case).
+ // TODO(ishell): check only in debug mode.
+ if (literal_) {
+ FeedbackSlot slot = FeedbackVector::ToSlot(feedback_slot);
+ CHECK_EQ(FeedbackSlotKind::kStoreOwnNamed,
+ feedback_vector_spec()->GetKind(slot));
+ }
+ OutputStaNamedOwnProperty(object, name_index, feedback_slot);
+ return *this;
+}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreKeyedProperty(
Register object, Register key, int feedback_slot,
LanguageMode language_mode) {
+ // Ensure that language mode is in sync with the IC slot kind if the function
+ // literal is available (not a unit test case).
+ // TODO(ishell): check only in debug mode.
+ if (literal_) {
+ FeedbackSlot slot = FeedbackVector::ToSlot(feedback_slot);
+ CHECK_EQ(GetLanguageModeFromSlotKind(feedback_vector_spec()->GetKind(slot)),
+ language_mode);
+ }
if (language_mode == SLOPPY) {
OutputStaKeyedPropertySloppy(object, key, feedback_slot);
} else {
@@ -585,6 +703,12 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreKeyedProperty(
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::StoreHomeObjectProperty(
+ Register object, int feedback_slot, LanguageMode language_mode) {
+ size_t name_index = HomeObjectSymbolConstantPoolEntry();
+ return StoreNamedProperty(object, name_index, feedback_slot, language_mode);
+}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateClosure(
size_t shared_function_info_entry, int slot, int flags) {
OutputCreateClosure(shared_function_info_entry, slot, flags);
@@ -592,17 +716,17 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CreateClosure(
}
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateBlockContext(
- Handle<ScopeInfo> scope_info) {
- size_t entry = GetConstantPoolEntry(scope_info);
+ const Scope* scope) {
+ size_t entry = GetConstantPoolEntry(scope);
OutputCreateBlockContext(entry);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateCatchContext(
- Register exception, Handle<String> name, Handle<ScopeInfo> scope_info) {
+ Register exception, const AstRawString* name, const Scope* scope) {
size_t name_index = GetConstantPoolEntry(name);
- size_t scope_info_index = GetConstantPoolEntry(scope_info);
- OutputCreateCatchContext(exception, name_index, scope_info_index);
+ size_t scope_index = GetConstantPoolEntry(scope);
+ OutputCreateCatchContext(exception, name_index, scope_index);
return *this;
}
@@ -617,9 +741,9 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CreateEvalContext(int slots) {
}
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateWithContext(
- Register object, Handle<ScopeInfo> scope_info) {
- size_t scope_info_index = GetConstantPoolEntry(scope_info);
- OutputCreateWithContext(object, scope_info_index);
+ Register object, const Scope* scope) {
+ size_t scope_index = GetConstantPoolEntry(scope);
+ OutputCreateWithContext(object, scope_index);
return *this;
}
@@ -642,7 +766,7 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CreateArguments(
}
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateRegExpLiteral(
- Handle<String> pattern, int literal_index, int flags) {
+ const AstRawString* pattern, int literal_index, int flags) {
size_t pattern_entry = GetConstantPoolEntry(pattern);
OutputCreateRegExpLiteral(pattern_entry, literal_index, flags);
return *this;
@@ -707,6 +831,7 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(const BytecodeLabel& target,
}
BytecodeArrayBuilder& BytecodeArrayBuilder::Jump(BytecodeLabel* label) {
+ DCHECK(!label->is_bound());
OutputJump(label, 0);
return *this;
}
@@ -714,40 +839,47 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::Jump(BytecodeLabel* label) {
BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfTrue(BytecodeLabel* label) {
// The peephole optimizer attempts to simplify JumpIfToBooleanTrue
// to JumpIfTrue.
+ DCHECK(!label->is_bound());
OutputJumpIfToBooleanTrue(label, 0);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfFalse(BytecodeLabel* label) {
+ DCHECK(!label->is_bound());
OutputJumpIfToBooleanFalse(label, 0);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfNull(BytecodeLabel* label) {
+ DCHECK(!label->is_bound());
OutputJumpIfNull(label, 0);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfUndefined(
BytecodeLabel* label) {
+ DCHECK(!label->is_bound());
OutputJumpIfUndefined(label, 0);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfNotHole(
BytecodeLabel* label) {
+ DCHECK(!label->is_bound());
OutputJumpIfNotHole(label, 0);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfJSReceiver(
BytecodeLabel* label) {
+ DCHECK(!label->is_bound());
OutputJumpIfJSReceiver(label, 0);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::JumpLoop(BytecodeLabel* label,
int loop_depth) {
+ DCHECK(label->is_bound());
OutputJumpLoop(label, 0, loop_depth);
return *this;
}
@@ -891,10 +1023,22 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::Call(Register callable,
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::New(Register constructor,
- RegisterList args,
- int feedback_slot_id) {
- OutputNew(constructor, args, args.register_count(), feedback_slot_id);
+BytecodeArrayBuilder& BytecodeArrayBuilder::CallWithSpread(Register callable,
+ RegisterList args) {
+ OutputCallWithSpread(callable, args, args.register_count());
+ return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::Construct(Register constructor,
+ RegisterList args,
+ int feedback_slot_id) {
+ OutputConstruct(constructor, args, args.register_count(), feedback_slot_id);
+ return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::ConstructWithSpread(
+ Register constructor, RegisterList args) {
+ OutputConstructWithSpread(constructor, args, args.register_count());
return *this;
}
@@ -947,11 +1091,6 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CallJSRuntime(int context_index,
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::NewWithSpread(RegisterList args) {
- OutputNewWithSpread(args, args.register_count());
- return *this;
-}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::Delete(Register object,
LanguageMode language_mode) {
if (language_mode == SLOPPY) {
@@ -963,17 +1102,34 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::Delete(Register object,
return *this;
}
-size_t BytecodeArrayBuilder::GetConstantPoolEntry(Handle<Object> object) {
- return constant_array_builder()->Insert(object);
+size_t BytecodeArrayBuilder::GetConstantPoolEntry(
+ const AstRawString* raw_string) {
+ return constant_array_builder()->Insert(raw_string);
+}
+
+size_t BytecodeArrayBuilder::GetConstantPoolEntry(const AstValue* heap_number) {
+ DCHECK(heap_number->IsHeapNumber());
+ return constant_array_builder()->Insert(heap_number);
}
-size_t BytecodeArrayBuilder::AllocateConstantPoolEntry() {
- return constant_array_builder()->AllocateEntry();
+size_t BytecodeArrayBuilder::GetConstantPoolEntry(const Scope* scope) {
+ return constant_array_builder()->Insert(scope);
+}
+
+#define ENTRY_GETTER(NAME, ...) \
+ size_t BytecodeArrayBuilder::NAME##ConstantPoolEntry() { \
+ return constant_array_builder()->Insert##NAME(); \
+ }
+SINGLETON_CONSTANT_ENTRY_TYPES(ENTRY_GETTER)
+#undef ENTRY_GETTER
+
+size_t BytecodeArrayBuilder::AllocateDeferredConstantPoolEntry() {
+ return constant_array_builder()->InsertDeferred();
}
-void BytecodeArrayBuilder::InsertConstantPoolEntryAt(size_t entry,
- Handle<Object> object) {
- constant_array_builder()->InsertAllocatedEntry(entry, object);
+void BytecodeArrayBuilder::SetDeferredConstantPoolEntry(size_t entry,
+ Handle<Object> object) {
+ constant_array_builder()->SetDeferredAt(entry, object);
}
void BytecodeArrayBuilder::SetReturnPosition() {
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
index 121b84d523..0a10c1f485 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.h
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -71,12 +71,15 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
return register_allocator()->maximum_register_count();
}
+ Register Local(int index) const;
Register Parameter(int parameter_index) const;
// Constant loads to accumulator.
BytecodeArrayBuilder& LoadConstantPoolEntry(size_t entry);
BytecodeArrayBuilder& LoadLiteral(v8::internal::Smi* value);
- BytecodeArrayBuilder& LoadLiteral(Handle<Object> object);
+ BytecodeArrayBuilder& LoadLiteral(const AstRawString* raw_string);
+ BytecodeArrayBuilder& LoadLiteral(const Scope* scope);
+ BytecodeArrayBuilder& LoadLiteral(const AstValue* ast_value);
BytecodeArrayBuilder& LoadUndefined();
BytecodeArrayBuilder& LoadNull();
BytecodeArrayBuilder& LoadTheHole();
@@ -84,16 +87,17 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
BytecodeArrayBuilder& LoadFalse();
// Global loads to the accumulator and stores from the accumulator.
- BytecodeArrayBuilder& LoadGlobal(const Handle<String> name, int feedback_slot,
+ BytecodeArrayBuilder& LoadGlobal(const AstRawString* name, int feedback_slot,
TypeofMode typeof_mode);
- BytecodeArrayBuilder& StoreGlobal(const Handle<String> name,
- int feedback_slot,
+ BytecodeArrayBuilder& StoreGlobal(const AstRawString* name, int feedback_slot,
LanguageMode language_mode);
// Load the object at |slot_index| at |depth| in the context chain starting
// with |context| into the accumulator.
+ enum ContextSlotMutability { kImmutableSlot, kMutableSlot };
BytecodeArrayBuilder& LoadContextSlot(Register context, int slot_index,
- int depth);
+ int depth,
+ ContextSlotMutability immutable);
// Stores the object in the accumulator into |slot_index| at |depth| in the
// context chain starting with |context|.
@@ -117,10 +121,16 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
// Named load property.
BytecodeArrayBuilder& LoadNamedProperty(Register object,
- const Handle<Name> name,
+ const AstRawString* name,
int feedback_slot);
// Keyed load property. The key should be in the accumulator.
BytecodeArrayBuilder& LoadKeyedProperty(Register object, int feedback_slot);
+ // Named load property of the @@iterator symbol.
+ BytecodeArrayBuilder& LoadIteratorProperty(Register object,
+ int feedback_slot);
+ // Named load property of the @@asyncIterator symbol.
+ BytecodeArrayBuilder& LoadAsyncIteratorProperty(Register object,
+ int feedback_slot);
// Store properties. Flag for NeedsSetFunctionName() should
// be in the accumulator.
@@ -128,35 +138,54 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
Register object, Register name, DataPropertyInLiteralFlags flags,
int feedback_slot);
- // Store properties. The value to be stored should be in the accumulator.
+ // Store a property named by a property name. The value to be stored should be
+ // in the accumulator.
+ BytecodeArrayBuilder& StoreNamedProperty(Register object,
+ const AstRawString* name,
+ int feedback_slot,
+ LanguageMode language_mode);
+ // Store a property named by a constant from the constant pool. The value to
+ // be stored should be in the accumulator.
BytecodeArrayBuilder& StoreNamedProperty(Register object,
- const Handle<Name> name,
+ size_t constant_pool_entry,
int feedback_slot,
LanguageMode language_mode);
+ // Store an own property named by a constant from the constant pool. The
+ // value to be stored should be in the accumulator.
+ BytecodeArrayBuilder& StoreNamedOwnProperty(Register object,
+ const AstRawString* name,
+ int feedback_slot);
+ // Store a property keyed by a value in a register. The value to be stored
+ // should be in the accumulator.
BytecodeArrayBuilder& StoreKeyedProperty(Register object, Register key,
int feedback_slot,
LanguageMode language_mode);
+ // Store the home object property. The value to be stored should be in the
+ // accumulator.
+ BytecodeArrayBuilder& StoreHomeObjectProperty(Register object,
+ int feedback_slot,
+ LanguageMode language_mode);
// Lookup the variable with |name|.
- BytecodeArrayBuilder& LoadLookupSlot(const Handle<String> name,
+ BytecodeArrayBuilder& LoadLookupSlot(const AstRawString* name,
TypeofMode typeof_mode);
// Lookup the variable with |name|, which is known to be at |slot_index| at
// |depth| in the context chain if not shadowed by a context extension
// somewhere in that context chain.
- BytecodeArrayBuilder& LoadLookupContextSlot(const Handle<String> name,
+ BytecodeArrayBuilder& LoadLookupContextSlot(const AstRawString* name,
TypeofMode typeof_mode,
int slot_index, int depth);
// Lookup the variable with |name|, which has its feedback in |feedback_slot|
// and is known to be global if not shadowed by a context extension somewhere
// up to |depth| in that context chain.
- BytecodeArrayBuilder& LoadLookupGlobalSlot(const Handle<String> name,
+ BytecodeArrayBuilder& LoadLookupGlobalSlot(const AstRawString* name,
TypeofMode typeof_mode,
int feedback_slot, int depth);
// Store value in the accumulator into the variable with |name|.
- BytecodeArrayBuilder& StoreLookupSlot(const Handle<String> name,
+ BytecodeArrayBuilder& StoreLookupSlot(const AstRawString* name,
LanguageMode language_mode);
// Create a new closure for a SharedFunctionInfo which will be inserted at
@@ -164,15 +193,15 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
BytecodeArrayBuilder& CreateClosure(size_t shared_function_info_entry,
int slot, int flags);
- // Create a new local context for a |scope_info| and a closure which should be
+ // Create a new local context for a |scope| and a closure which should be
// in the accumulator.
- BytecodeArrayBuilder& CreateBlockContext(Handle<ScopeInfo> scope_info);
+ BytecodeArrayBuilder& CreateBlockContext(const Scope* scope);
// Create a new context for a catch block with |exception|, |name|,
- // |scope_info|, and the closure in the accumulator.
+ // |scope|, and the closure in the accumulator.
BytecodeArrayBuilder& CreateCatchContext(Register exception,
- Handle<String> name,
- Handle<ScopeInfo> scope_info);
+ const AstRawString* name,
+ const Scope* scope);
// Create a new context with size |slots|.
BytecodeArrayBuilder& CreateFunctionContext(int slots);
@@ -180,16 +209,15 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
// Create a new eval context with size |slots|.
BytecodeArrayBuilder& CreateEvalContext(int slots);
- // Creates a new context with the given |scope_info| for a with-statement
+ // Creates a new context with the given |scope| for a with-statement
// with the |object| in a register and the closure in the accumulator.
- BytecodeArrayBuilder& CreateWithContext(Register object,
- Handle<ScopeInfo> scope_info);
+ BytecodeArrayBuilder& CreateWithContext(Register object, const Scope* scope);
// Create a new arguments object in the accumulator.
BytecodeArrayBuilder& CreateArguments(CreateArgumentsType type);
// Literals creation. Constant elements should be in the accumulator.
- BytecodeArrayBuilder& CreateRegExpLiteral(Handle<String> pattern,
+ BytecodeArrayBuilder& CreateRegExpLiteral(const AstRawString* pattern,
int literal_index, int flags);
BytecodeArrayBuilder& CreateArrayLiteral(size_t constant_elements_entry,
int literal_index, int flags);
@@ -213,10 +241,21 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
Call::CallType call_type,
TailCallMode tail_call_mode = TailCallMode::kDisallow);
- // Call the new operator. The accumulator holds the |new_target|.
+ // Call a JS function. The JSFunction or Callable to be called should be in
+ // |callable|, the receiver in |args[0]| and the arguments in |args[1]|
+ // onwards. The final argument must be a spread.
+ BytecodeArrayBuilder& CallWithSpread(Register callable, RegisterList args);
+
+ // Call the Construct operator. The accumulator holds the |new_target|.
// The |constructor| is in a register and arguments are in |args|.
- BytecodeArrayBuilder& New(Register constructor, RegisterList args,
- int feedback_slot);
+ BytecodeArrayBuilder& Construct(Register constructor, RegisterList args,
+ int feedback_slot);
+
+ // Call the Construct operator for use with a spread. The accumulator holds
+ // the |new_target|. The |constructor| is in a register and arguments are in
+ // |args|. The final argument must be a spread.
+ BytecodeArrayBuilder& ConstructWithSpread(Register constructor,
+ RegisterList args);
// Call the runtime function with |function_id| and arguments |args|.
BytecodeArrayBuilder& CallRuntime(Runtime::FunctionId function_id,
@@ -243,11 +282,6 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
// Call the JS runtime function with |context_index| and arguments |args|.
BytecodeArrayBuilder& CallJSRuntime(int context_index, RegisterList args);
- // Call the constructor in |args[0]| with new_target in |args[1]| and the
- // arguments starting at |args[2]| onwards. The final argument must be a
- // spread.
- BytecodeArrayBuilder& NewWithSpread(RegisterList args);
-
// Operators (register holds the lhs value, accumulator holds the rhs value).
// Type feedback will be recorded in the |feedback_slot|
BytecodeArrayBuilder& BinaryOperation(Token::Value binop, Register reg,
@@ -328,12 +362,18 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
// entry, so that it can be referenced by above exception handling support.
int NewHandlerEntry() { return handler_table_builder()->NewHandlerEntry(); }
- // Gets a constant pool entry for the |object|.
- size_t GetConstantPoolEntry(Handle<Object> object);
- // Allocates a slot in the constant pool which can later be inserted.
- size_t AllocateConstantPoolEntry();
- // Inserts a entry into an allocated constant pool entry.
- void InsertConstantPoolEntryAt(size_t entry, Handle<Object> object);
+ // Gets a constant pool entry.
+ size_t GetConstantPoolEntry(const AstRawString* raw_string);
+ size_t GetConstantPoolEntry(const AstValue* heap_number);
+ size_t GetConstantPoolEntry(const Scope* scope);
+#define ENTRY_GETTER(NAME, ...) size_t NAME##ConstantPoolEntry();
+ SINGLETON_CONSTANT_ENTRY_TYPES(ENTRY_GETTER)
+#undef ENTRY_GETTER
+
+ // Allocates a slot in the constant pool which can later be set.
+ size_t AllocateDeferredConstantPoolEntry();
+ // Sets the deferred value into an allocated constant pool entry.
+ void SetDeferredConstantPoolEntry(size_t entry, Handle<Object> object);
void InitializeReturnPosition(FunctionLiteral* literal);
@@ -379,6 +419,10 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
OperandType... operand_types>
friend class BytecodeNodeBuilder;
+ const FeedbackVectorSpec* feedback_vector_spec() const {
+ return literal_->feedback_vector_spec();
+ }
+
// Returns the current source position for the given |bytecode|.
INLINE(BytecodeSourceInfo CurrentSourcePosition(Bytecode bytecode));
@@ -421,6 +465,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
}
Zone* zone_;
+ FunctionLiteral* literal_;
bool bytecode_generated_;
ConstantArrayBuilder constant_array_builder_;
HandlerTableBuilder handler_table_builder_;
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.cc b/deps/v8/src/interpreter/bytecode-array-writer.cc
index dd91564b16..225af0eb53 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.cc
+++ b/deps/v8/src/interpreter/bytecode-array-writer.cc
@@ -175,16 +175,19 @@ Bytecode GetJumpWithConstantOperand(Bytecode jump_bytecode) {
void BytecodeArrayWriter::PatchJumpWith8BitOperand(size_t jump_location,
int delta) {
Bytecode jump_bytecode = Bytecodes::FromByte(bytecodes()->at(jump_location));
+ DCHECK(Bytecodes::IsForwardJump(jump_bytecode));
DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode));
+ DCHECK_EQ(Bytecodes::GetOperandType(jump_bytecode, 0), OperandType::kUImm);
+ DCHECK_GT(delta, 0);
size_t operand_location = jump_location + 1;
DCHECK_EQ(bytecodes()->at(operand_location), k8BitJumpPlaceholder);
- if (Bytecodes::ScaleForSignedOperand(delta) == OperandScale::kSingle) {
- // The jump fits within the range of an Imm8 operand, so cancel
+ if (Bytecodes::ScaleForUnsignedOperand(delta) == OperandScale::kSingle) {
+ // The jump fits within the range of an UImm8 operand, so cancel
// the reservation and jump directly.
constant_array_builder()->DiscardReservedEntry(OperandSize::kByte);
bytecodes()->at(operand_location) = static_cast<uint8_t>(delta);
} else {
- // The jump does not fit within the range of an Imm8 operand, so
+ // The jump does not fit within the range of an UImm8 operand, so
// commit reservation putting the offset into the constant pool,
// and update the jump instruction and operand.
size_t entry = constant_array_builder()->CommitReservedEntry(
@@ -200,10 +203,13 @@ void BytecodeArrayWriter::PatchJumpWith8BitOperand(size_t jump_location,
void BytecodeArrayWriter::PatchJumpWith16BitOperand(size_t jump_location,
int delta) {
Bytecode jump_bytecode = Bytecodes::FromByte(bytecodes()->at(jump_location));
+ DCHECK(Bytecodes::IsForwardJump(jump_bytecode));
DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode));
+ DCHECK_EQ(Bytecodes::GetOperandType(jump_bytecode, 0), OperandType::kUImm);
+ DCHECK_GT(delta, 0);
size_t operand_location = jump_location + 1;
uint8_t operand_bytes[2];
- if (Bytecodes::ScaleForSignedOperand(delta) <= OperandScale::kDouble) {
+ if (Bytecodes::ScaleForUnsignedOperand(delta) <= OperandScale::kDouble) {
// The jump fits within the range of an Imm16 operand, so cancel
// the reservation and jump directly.
constant_array_builder()->DiscardReservedEntry(OperandSize::kShort);
@@ -282,15 +288,13 @@ void BytecodeArrayWriter::EmitJump(BytecodeNode* node, BytecodeLabel* label) {
if (label->is_bound()) {
CHECK_GE(current_offset, label->offset());
- CHECK_LE(current_offset, static_cast<size_t>(kMaxInt));
+ CHECK_LE(current_offset, static_cast<size_t>(kMaxUInt32));
// Label has been bound already so this is a backwards jump.
- size_t abs_delta = current_offset - label->offset();
- int delta = -static_cast<int>(abs_delta);
- OperandScale operand_scale = Bytecodes::ScaleForSignedOperand(delta);
+ uint32_t delta = static_cast<uint32_t>(current_offset - label->offset());
+ OperandScale operand_scale = Bytecodes::ScaleForUnsignedOperand(delta);
if (operand_scale > OperandScale::kSingle) {
// Adjust for scaling byte prefix for wide jump offset.
- DCHECK_LE(delta, 0);
- delta -= 1;
+ delta += 1;
}
DCHECK_EQ(Bytecode::kJumpLoop, node->bytecode());
node->update_operand0(delta);
diff --git a/deps/v8/src/interpreter/bytecode-decoder.cc b/deps/v8/src/interpreter/bytecode-decoder.cc
index 49751897ee..f003969d4b 100644
--- a/deps/v8/src/interpreter/bytecode-decoder.cc
+++ b/deps/v8/src/interpreter/bytecode-decoder.cc
@@ -6,7 +6,7 @@
#include <iomanip>
-#include "src/utils.h"
+#include "src/interpreter/interpreter-intrinsics.h"
namespace v8 {
namespace internal {
@@ -67,6 +67,23 @@ uint32_t BytecodeDecoder::DecodeUnsignedOperand(const uint8_t* operand_start,
return 0;
}
+namespace {
+const char* NameForRuntimeId(uint32_t idx) {
+ switch (idx) {
+#define CASE(name, nargs, ressize) \
+ case Runtime::k##name: \
+ return #name; \
+ case Runtime::kInline##name: \
+ return #name;
+ FOR_EACH_INTRINSIC(CASE)
+#undef CASE
+ default:
+ UNREACHABLE();
+ return nullptr;
+ }
+}
+} // anonymous namespace
+
// static
std::ostream& BytecodeDecoder::Decode(std::ostream& os,
const uint8_t* bytecode_start,
@@ -112,12 +129,21 @@ std::ostream& BytecodeDecoder::Decode(std::ostream& os,
switch (op_type) {
case interpreter::OperandType::kIdx:
case interpreter::OperandType::kUImm:
- case interpreter::OperandType::kRuntimeId:
- case interpreter::OperandType::kIntrinsicId:
os << "["
<< DecodeUnsignedOperand(operand_start, op_type, operand_scale)
<< "]";
break;
+ case interpreter::OperandType::kIntrinsicId: {
+ auto id = static_cast<IntrinsicsHelper::IntrinsicId>(
+ DecodeUnsignedOperand(operand_start, op_type, operand_scale));
+ os << "[" << NameForRuntimeId(IntrinsicsHelper::ToRuntimeId(id)) << "]";
+ break;
+ }
+ case interpreter::OperandType::kRuntimeId:
+ os << "[" << NameForRuntimeId(DecodeUnsignedOperand(
+ operand_start, op_type, operand_scale))
+ << "]";
+ break;
case interpreter::OperandType::kImm:
os << "[" << DecodeSignedOperand(operand_start, op_type, operand_scale)
<< "]";
diff --git a/deps/v8/src/interpreter/bytecode-flags.cc b/deps/v8/src/interpreter/bytecode-flags.cc
index 31ac88c1f7..57277c8a33 100644
--- a/deps/v8/src/interpreter/bytecode-flags.cc
+++ b/deps/v8/src/interpreter/bytecode-flags.cc
@@ -6,6 +6,7 @@
#include "src/builtins/builtins-constructor.h"
#include "src/code-stubs.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index be59a218fc..0310509fde 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -14,7 +14,7 @@
#include "src/interpreter/bytecode-label.h"
#include "src/interpreter/bytecode-register-allocator.h"
#include "src/interpreter/control-flow-builders.h"
-#include "src/objects.h"
+#include "src/objects-inl.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/token.h"
@@ -35,6 +35,7 @@ class BytecodeGenerator::ContextScope BASE_EMBEDDED {
register_(Register::current_context()),
depth_(0),
should_pop_context_(should_pop_context) {
+ DCHECK(scope->NeedsContext() || outer_ == nullptr);
if (outer_) {
depth_ = outer_->depth_ + 1;
@@ -75,7 +76,6 @@ class BytecodeGenerator::ContextScope BASE_EMBEDDED {
return previous;
}
- Scope* scope() const { return scope_; }
Register reg() const { return register_; }
bool ShouldPopContext() { return should_pop_context_; }
@@ -511,13 +511,14 @@ class BytecodeGenerator::GlobalDeclarationsBuilder final : public ZoneObject {
constant_pool_entry_(0),
has_constant_pool_entry_(false) {}
- void AddFunctionDeclaration(Handle<String> name, FeedbackVectorSlot slot,
+ void AddFunctionDeclaration(const AstRawString* name, FeedbackSlot slot,
+ FeedbackSlot literal_slot,
FunctionLiteral* func) {
DCHECK(!slot.IsInvalid());
- declarations_.push_back(Declaration(name, slot, func));
+ declarations_.push_back(Declaration(name, slot, literal_slot, func));
}
- void AddUndefinedDeclaration(Handle<String> name, FeedbackVectorSlot slot) {
+ void AddUndefinedDeclaration(const AstRawString* name, FeedbackSlot slot) {
DCHECK(!slot.IsInvalid());
declarations_.push_back(Declaration(name, slot, nullptr));
}
@@ -526,7 +527,7 @@ class BytecodeGenerator::GlobalDeclarationsBuilder final : public ZoneObject {
DCHECK(has_constant_pool_entry_);
int array_index = 0;
Handle<FixedArray> data = info->isolate()->factory()->NewFixedArray(
- static_cast<int>(declarations_.size() * 3), TENURED);
+ static_cast<int>(declarations_.size() * 4), TENURED);
for (const Declaration& declaration : declarations_) {
FunctionLiteral* func = declaration.func;
Handle<Object> initial_value;
@@ -541,8 +542,16 @@ class BytecodeGenerator::GlobalDeclarationsBuilder final : public ZoneObject {
// will set stack overflow.
if (initial_value.is_null()) return Handle<FixedArray>();
- data->set(array_index++, *declaration.name);
+ data->set(array_index++, *declaration.name->string());
data->set(array_index++, Smi::FromInt(declaration.slot.ToInt()));
+ Object* undefined_or_literal_slot;
+ if (declaration.literal_slot.IsInvalid()) {
+ undefined_or_literal_slot = info->isolate()->heap()->undefined_value();
+ } else {
+ undefined_or_literal_slot =
+ Smi::FromInt(declaration.literal_slot.ToInt());
+ }
+ data->set(array_index++, undefined_or_literal_slot);
data->set(array_index++, *initial_value);
}
return data;
@@ -564,13 +573,20 @@ class BytecodeGenerator::GlobalDeclarationsBuilder final : public ZoneObject {
private:
struct Declaration {
- Declaration() : slot(FeedbackVectorSlot::Invalid()), func(nullptr) {}
- Declaration(Handle<String> name, FeedbackVectorSlot slot,
+ Declaration() : slot(FeedbackSlot::Invalid()), func(nullptr) {}
+ Declaration(const AstRawString* name, FeedbackSlot slot,
+ FeedbackSlot literal_slot, FunctionLiteral* func)
+ : name(name), slot(slot), literal_slot(literal_slot), func(func) {}
+ Declaration(const AstRawString* name, FeedbackSlot slot,
FunctionLiteral* func)
- : name(name), slot(slot), func(func) {}
-
- Handle<String> name;
- FeedbackVectorSlot slot;
+ : name(name),
+ slot(slot),
+ literal_slot(FeedbackSlot::Invalid()),
+ func(func) {}
+
+ const AstRawString* name;
+ FeedbackSlot slot;
+ FeedbackSlot literal_slot;
FunctionLiteral* func;
};
ZoneVector<Declaration> declarations_;
@@ -578,6 +594,25 @@ class BytecodeGenerator::GlobalDeclarationsBuilder final : public ZoneObject {
bool has_constant_pool_entry_;
};
+class BytecodeGenerator::CurrentScope final {
+ public:
+ CurrentScope(BytecodeGenerator* generator, Scope* scope)
+ : generator_(generator), outer_scope_(generator->current_scope()) {
+ if (scope != nullptr) {
+ generator_->set_current_scope(scope);
+ }
+ }
+ ~CurrentScope() {
+ if (outer_scope_ != generator_->current_scope()) {
+ generator_->set_current_scope(outer_scope_);
+ }
+ }
+
+ private:
+ BytecodeGenerator* generator_;
+ Scope* outer_scope_;
+};
+
BytecodeGenerator::BytecodeGenerator(CompilationInfo* info)
: zone_(info->zone()),
builder_(new (zone()) BytecodeArrayBuilder(
@@ -586,7 +621,8 @@ BytecodeGenerator::BytecodeGenerator(CompilationInfo* info)
info->scope()->num_stack_slots(), info->literal(),
info->SourcePositionRecordingMode())),
info_(info),
- scope_(info->scope()),
+ closure_scope_(info->scope()),
+ current_scope_(info->scope()),
globals_builder_(new (zone()) GlobalDeclarationsBuilder(info->zone())),
global_declarations_(0, info->zone()),
function_literals_(0, info->zone()),
@@ -599,12 +635,12 @@ BytecodeGenerator::BytecodeGenerator(CompilationInfo* info)
generator_resume_points_(info->literal()->yield_count(), info->zone()),
generator_state_(),
loop_depth_(0),
- home_object_symbol_(info->isolate()->factory()->home_object_symbol()),
- iterator_symbol_(info->isolate()->factory()->iterator_symbol()),
- prototype_string_(info->isolate()->factory()->prototype_string()),
- empty_fixed_array_(info->isolate()->factory()->empty_fixed_array()),
+ prototype_string_(
+ info->isolate()->ast_string_constants()->prototype_string()),
undefined_string_(
- info->isolate()->ast_string_constants()->undefined_string()) {}
+ info->isolate()->ast_string_constants()->undefined_string()) {
+ DCHECK_EQ(closure_scope(), closure_scope()->GetClosureScope());
+}
Handle<BytecodeArray> BytecodeGenerator::FinalizeBytecode(Isolate* isolate) {
AllocateDeferredConstants(isolate);
@@ -618,8 +654,8 @@ void BytecodeGenerator::AllocateDeferredConstants(Isolate* isolate) {
Handle<FixedArray> declarations =
globals_builder->AllocateDeclarations(info());
if (declarations.is_null()) return SetStackOverflow();
- builder()->InsertConstantPoolEntryAt(globals_builder->constant_pool_entry(),
- declarations);
+ builder()->SetDeferredConstantPoolEntry(
+ globals_builder->constant_pool_entry(), declarations);
}
// Find or build shared function infos.
@@ -628,7 +664,7 @@ void BytecodeGenerator::AllocateDeferredConstants(Isolate* isolate) {
Handle<SharedFunctionInfo> shared_info =
Compiler::GetSharedFunctionInfo(expr, info()->script(), info());
if (shared_info.is_null()) return SetStackOverflow();
- builder()->InsertConstantPoolEntryAt(literal.second, shared_info);
+ builder()->SetDeferredConstantPoolEntry(literal.second, shared_info);
}
// Find or build shared function infos for the native function templates.
@@ -639,7 +675,7 @@ void BytecodeGenerator::AllocateDeferredConstants(Isolate* isolate) {
Compiler::GetSharedFunctionInfoForNative(expr->extension(),
expr->name());
if (shared_info.is_null()) return SetStackOverflow();
- builder()->InsertConstantPoolEntryAt(literal.second, shared_info);
+ builder()->SetDeferredConstantPoolEntry(literal.second, shared_info);
}
// Build object literal constant properties
@@ -648,10 +684,11 @@ void BytecodeGenerator::AllocateDeferredConstants(Isolate* isolate) {
if (object_literal->properties_count() > 0) {
// If constant properties is an empty fixed array, we've already added it
// to the constant pool when visiting the object literal.
- Handle<FixedArray> constant_properties =
+ Handle<BoilerplateDescription> constant_properties =
object_literal->GetOrBuildConstantProperties(isolate);
- builder()->InsertConstantPoolEntryAt(literal.second, constant_properties);
+ builder()->SetDeferredConstantPoolEntry(literal.second,
+ constant_properties);
}
}
@@ -660,7 +697,7 @@ void BytecodeGenerator::AllocateDeferredConstants(Isolate* isolate) {
ArrayLiteral* array_literal = literal.first;
Handle<ConstantElementsPair> constant_elements =
array_literal->GetOrBuildConstantElements(isolate);
- builder()->InsertConstantPoolEntryAt(literal.second, constant_elements);
+ builder()->SetDeferredConstantPoolEntry(literal.second, constant_elements);
}
}
@@ -672,7 +709,7 @@ void BytecodeGenerator::GenerateBytecode(uintptr_t stack_limit) {
InitializeAstVisitor(stack_limit);
// Initialize the incoming context.
- ContextScope incoming_context(this, scope(), false);
+ ContextScope incoming_context(this, closure_scope(), false);
// Initialize control scope.
ControlScopeForTopLevel control(this);
@@ -684,10 +721,10 @@ void BytecodeGenerator::GenerateBytecode(uintptr_t stack_limit) {
VisitGeneratorPrologue();
}
- if (scope()->NeedsContext()) {
+ if (closure_scope()->NeedsContext()) {
// Push a new inner context scope for the function.
BuildNewLocalActivationContext();
- ContextScope local_function_context(this, scope(), false);
+ ContextScope local_function_context(this, closure_scope(), false);
BuildLocalActivationContextInitialization();
GenerateBytecodeBody();
} else {
@@ -713,23 +750,23 @@ void BytecodeGenerator::GenerateBytecode(uintptr_t stack_limit) {
void BytecodeGenerator::GenerateBytecodeBody() {
// Build the arguments object if it is used.
- VisitArgumentsObject(scope()->arguments());
+ VisitArgumentsObject(closure_scope()->arguments());
// Build rest arguments array if it is used.
- Variable* rest_parameter = scope()->rest_parameter();
+ Variable* rest_parameter = closure_scope()->rest_parameter();
VisitRestArgumentsArray(rest_parameter);
// Build assignment to {.this_function} variable if it is used.
- VisitThisFunctionVariable(scope()->this_function_var());
+ VisitThisFunctionVariable(closure_scope()->this_function_var());
// Build assignment to {new.target} variable if it is used.
- VisitNewTargetVariable(scope()->new_target_var());
+ VisitNewTargetVariable(closure_scope()->new_target_var());
// Emit tracing call if requested to do so.
if (FLAG_trace) builder()->CallRuntime(Runtime::kTraceEnter);
// Visit declarations within the function scope.
- VisitDeclarations(scope()->declarations());
+ VisitDeclarations(closure_scope()->declarations());
// Emit initializing assignments for module namespace imports (if any).
VisitModuleNamespaceImports();
@@ -824,6 +861,7 @@ void BytecodeGenerator::VisitGeneratorPrologue() {
void BytecodeGenerator::VisitBlock(Block* stmt) {
// Visit declarations and statements.
+ CurrentScope current_scope(this, stmt->scope());
if (stmt->scope() != nullptr && stmt->scope()->NeedsContext()) {
BuildNewLocalBlockContext(stmt->scope());
ContextScope scope(this, stmt->scope());
@@ -848,13 +886,13 @@ void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
- FeedbackVectorSlot slot = decl->proxy()->VariableFeedbackSlot();
- globals_builder()->AddUndefinedDeclaration(variable->name(), slot);
+ FeedbackSlot slot = decl->proxy()->VariableFeedbackSlot();
+ globals_builder()->AddUndefinedDeclaration(variable->raw_name(), slot);
break;
}
case VariableLocation::LOCAL:
if (variable->binding_needs_init()) {
- Register destination(variable->index());
+ Register destination(builder()->Local(variable->index()));
builder()->LoadTheHole().StoreAccumulatorInRegister(destination);
}
break;
@@ -880,7 +918,7 @@ void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
Register name = register_allocator()->NewRegister();
builder()
- ->LoadLiteral(variable->name())
+ ->LoadLiteral(variable->raw_name())
.StoreAccumulatorInRegister(name)
.CallRuntime(Runtime::kDeclareEvalVar, name);
break;
@@ -888,8 +926,7 @@ void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
case VariableLocation::MODULE:
if (variable->IsExport() && variable->binding_needs_init()) {
builder()->LoadTheHole();
- BuildVariableAssignment(variable, Token::INIT,
- FeedbackVectorSlot::Invalid(),
+ BuildVariableAssignment(variable, Token::INIT, FeedbackSlot::Invalid(),
HoleCheckMode::kElided);
}
// Nothing to do for imports.
@@ -902,16 +939,16 @@ void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
DCHECK(variable->mode() == LET || variable->mode() == VAR);
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
- FeedbackVectorSlot slot = decl->proxy()->VariableFeedbackSlot();
- globals_builder()->AddFunctionDeclaration(variable->name(), slot,
- decl->fun());
+ FeedbackSlot slot = decl->proxy()->VariableFeedbackSlot();
+ globals_builder()->AddFunctionDeclaration(
+ variable->raw_name(), slot, decl->fun()->LiteralFeedbackSlot(),
+ decl->fun());
break;
}
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL: {
VisitForAccumulatorValue(decl->fun());
- BuildVariableAssignment(variable, Token::INIT,
- FeedbackVectorSlot::Invalid(),
+ BuildVariableAssignment(variable, Token::INIT, FeedbackSlot::Invalid(),
HoleCheckMode::kElided);
break;
}
@@ -925,7 +962,7 @@ void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
case VariableLocation::LOOKUP: {
RegisterList args = register_allocator()->NewRegisterList(2);
builder()
- ->LoadLiteral(variable->name())
+ ->LoadLiteral(variable->raw_name())
.StoreAccumulatorInRegister(args[0]);
VisitForAccumulatorValue(decl->fun());
builder()->StoreAccumulatorInRegister(args[1]).CallRuntime(
@@ -936,28 +973,27 @@ void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
DCHECK_EQ(variable->mode(), LET);
DCHECK(variable->IsExport());
VisitForAccumulatorValue(decl->fun());
- BuildVariableAssignment(variable, Token::INIT,
- FeedbackVectorSlot::Invalid(),
+ BuildVariableAssignment(variable, Token::INIT, FeedbackSlot::Invalid(),
HoleCheckMode::kElided);
break;
}
}
void BytecodeGenerator::VisitModuleNamespaceImports() {
- if (!scope()->is_module_scope()) return;
+ if (!closure_scope()->is_module_scope()) return;
RegisterAllocationScope register_scope(this);
Register module_request = register_allocator()->NewRegister();
- ModuleDescriptor* descriptor = scope()->AsModuleScope()->module();
+ ModuleDescriptor* descriptor = closure_scope()->AsModuleScope()->module();
for (auto entry : descriptor->namespace_imports()) {
builder()
->LoadLiteral(Smi::FromInt(entry->module_request))
.StoreAccumulatorInRegister(module_request)
.CallRuntime(Runtime::kGetModuleNamespace, module_request);
- Variable* var = scope()->LookupLocal(entry->local_name);
+ Variable* var = closure_scope()->LookupLocal(entry->local_name);
DCHECK_NOT_NULL(var);
- BuildVariableAssignment(var, Token::INIT, FeedbackVectorSlot::Invalid(),
+ BuildVariableAssignment(var, Token::INIT, FeedbackSlot::Invalid(),
HoleCheckMode::kElided);
}
}
@@ -972,7 +1008,7 @@ void BytecodeGenerator::VisitDeclarations(Declaration::List* declarations) {
if (globals_builder()->empty()) return;
globals_builder()->set_constant_pool_entry(
- builder()->AllocateConstantPoolEntry());
+ builder()->AllocateDeferredConstantPoolEntry());
int encoded_flags = info()->GetDeclareGlobalsFlags();
// Emit code to declare globals.
@@ -1206,7 +1242,7 @@ void BytecodeGenerator::VisitForStatement(ForStatement* stmt) {
}
void BytecodeGenerator::VisitForInAssignment(Expression* expr,
- FeedbackVectorSlot slot) {
+ FeedbackSlot slot) {
DCHECK(expr->IsValidReferenceExpression());
// Evaluate assignment starting with the value to be stored in the
@@ -1225,7 +1261,8 @@ void BytecodeGenerator::VisitForInAssignment(Expression* expr,
Register value = register_allocator()->NewRegister();
builder()->StoreAccumulatorInRegister(value);
Register object = VisitForRegisterValue(property->obj());
- Handle<String> name = property->key()->AsLiteral()->AsPropertyName();
+ const AstRawString* name =
+ property->key()->AsLiteral()->AsRawPropertyName();
builder()->LoadAccumulatorWithRegister(value);
builder()->StoreNamedProperty(object, name, feedback_index(slot),
language_mode());
@@ -1251,7 +1288,7 @@ void BytecodeGenerator::VisitForInAssignment(Expression* expr,
VisitForRegisterValue(super_property->this_var(), args[0]);
VisitForRegisterValue(super_property->home_object(), args[1]);
builder()
- ->LoadLiteral(property->key()->AsLiteral()->AsPropertyName())
+ ->LoadLiteral(property->key()->AsLiteral()->AsRawPropertyName())
.StoreAccumulatorInRegister(args[2])
.CallRuntime(StoreToSuperRuntimeId(), args);
break;
@@ -1304,7 +1341,7 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
builder()->SetExpressionAsStatementPosition(stmt->each());
builder()->ForInContinue(index, cache_length);
loop_builder.BreakIfFalse();
- FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
+ FeedbackSlot slot = stmt->ForInFeedbackSlot();
builder()->ForInNext(receiver, index, triple.Truncate(2),
feedback_index(slot));
loop_builder.ContinueIfUndefined();
@@ -1439,9 +1476,9 @@ void BytecodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
}
void BytecodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
- uint8_t flags = CreateClosureFlags::Encode(expr->pretenure(),
- scope()->is_function_scope());
- size_t entry = builder()->AllocateConstantPoolEntry();
+ uint8_t flags = CreateClosureFlags::Encode(
+ expr->pretenure(), closure_scope()->is_function_scope());
+ size_t entry = builder()->AllocateDeferredConstantPoolEntry();
int slot_index = feedback_index(expr->LiteralFeedbackSlot());
builder()->CreateClosure(entry, slot_index, flags);
function_literals_.push_back(std::make_pair(expr, entry));
@@ -1467,9 +1504,8 @@ void BytecodeGenerator::VisitClassLiteral(ClassLiteral* expr) {
if (FunctionLiteral::NeedsHomeObject(expr->constructor())) {
// Prototype is already in the accumulator.
- builder()->StoreNamedProperty(constructor, home_object_symbol(),
- feedback_index(expr->HomeObjectSlot()),
- language_mode());
+ builder()->StoreHomeObjectProperty(
+ constructor, feedback_index(expr->HomeObjectSlot()), language_mode());
}
VisitClassLiteralProperties(expr, constructor, prototype);
@@ -1478,9 +1514,8 @@ void BytecodeGenerator::VisitClassLiteral(ClassLiteral* expr) {
// Assign to class variable.
if (expr->class_variable_proxy() != nullptr) {
VariableProxy* proxy = expr->class_variable_proxy();
- FeedbackVectorSlot slot = expr->NeedsProxySlot()
- ? expr->ProxySlot()
- : FeedbackVectorSlot::Invalid();
+ FeedbackSlot slot =
+ expr->NeedsProxySlot() ? expr->ProxySlot() : FeedbackSlot::Invalid();
BuildVariableAssignment(proxy->var(), Token::INIT, slot,
HoleCheckMode::kElided);
}
@@ -1545,7 +1580,7 @@ void BytecodeGenerator::VisitClassLiteralProperties(ClassLiteral* expr,
flags |= DataPropertyInLiteralFlag::kSetFunctionName;
}
- FeedbackVectorSlot slot = property->GetStoreDataPropertySlot();
+ FeedbackSlot slot = property->GetStoreDataPropertySlot();
DCHECK(!slot.IsInvalid());
builder()
@@ -1584,7 +1619,7 @@ void BytecodeGenerator::BuildClassLiteralNameProperty(ClassLiteral* expr,
void BytecodeGenerator::VisitNativeFunctionLiteral(
NativeFunctionLiteral* expr) {
- size_t entry = builder()->AllocateConstantPoolEntry();
+ size_t entry = builder()->AllocateDeferredConstantPoolEntry();
int slot_index = feedback_index(expr->LiteralFeedbackSlot());
builder()->CreateClosure(entry, slot_index, NOT_TENURED);
native_function_literals_.push_back(std::make_pair(expr, entry));
@@ -1622,28 +1657,14 @@ void BytecodeGenerator::VisitConditional(Conditional* expr) {
void BytecodeGenerator::VisitLiteral(Literal* expr) {
if (!execution_result()->IsEffect()) {
const AstValue* raw_value = expr->raw_value();
- if (raw_value->IsSmi()) {
- builder()->LoadLiteral(raw_value->AsSmi());
- } else if (raw_value->IsUndefined()) {
- builder()->LoadUndefined();
- } else if (raw_value->IsTrue()) {
- builder()->LoadTrue();
- } else if (raw_value->IsFalse()) {
- builder()->LoadFalse();
- } else if (raw_value->IsNull()) {
- builder()->LoadNull();
- } else if (raw_value->IsTheHole()) {
- builder()->LoadTheHole();
- } else {
- builder()->LoadLiteral(raw_value->value());
- }
+ builder()->LoadLiteral(raw_value);
}
}
void BytecodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
// Materialize a regular expression literal.
- builder()->CreateRegExpLiteral(expr->pattern(), expr->literal_index(),
- expr->flags());
+ builder()->CreateRegExpLiteral(
+ expr->raw_pattern(), feedback_index(expr->literal_slot()), expr->flags());
}
void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
@@ -1656,15 +1677,16 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Register literal = register_allocator()->NewRegister();
size_t entry;
- // If constant properties is an empty fixed array, use our cached
- // empty_fixed_array to ensure it's only added to the constant pool once.
+ // If constant properties is an empty fixed array, use a cached empty fixed
+ // array to ensure it's only added to the constant pool once.
if (expr->properties_count() == 0) {
- entry = builder()->GetConstantPoolEntry(empty_fixed_array());
+ entry = builder()->EmptyFixedArrayConstantPoolEntry();
} else {
- entry = builder()->AllocateConstantPoolEntry();
+ entry = builder()->AllocateDeferredConstantPoolEntry();
object_literals_.push_back(std::make_pair(expr, entry));
}
- builder()->CreateObjectLiteral(entry, expr->literal_index(), flags, literal);
+ builder()->CreateObjectLiteral(entry, feedback_index(expr->literal_slot()),
+ flags, literal);
// Store computed values into the literal.
int property_index = 0;
@@ -1694,14 +1716,14 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
RegisterAllocationScope register_scope(this);
Register value = register_allocator()->NewRegister();
builder()->StoreAccumulatorInRegister(value);
- builder()->StoreNamedProperty(
- literal, key->AsPropertyName(),
- feedback_index(property->GetSlot(0)), language_mode());
+ builder()->StoreNamedOwnProperty(
+ literal, key->AsRawPropertyName(),
+ feedback_index(property->GetSlot(0)));
VisitSetHomeObject(value, literal, property, 1);
} else {
- builder()->StoreNamedProperty(
- literal, key->AsPropertyName(),
- feedback_index(property->GetSlot(0)), language_mode());
+ builder()->StoreNamedOwnProperty(
+ literal, key->AsRawPropertyName(),
+ feedback_index(property->GetSlot(0)));
}
} else {
VisitForEffect(property->value());
@@ -1799,7 +1821,7 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
data_property_flags |= DataPropertyInLiteralFlag::kSetFunctionName;
}
- FeedbackVectorSlot slot = property->GetStoreDataPropertySlot();
+ FeedbackSlot slot = property->GetStoreDataPropertySlot();
DCHECK(!slot.IsInvalid());
builder()
@@ -1847,8 +1869,9 @@ void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
uint8_t flags = CreateArrayLiteralFlags::Encode(
expr->IsFastCloningSupported(), expr->ComputeFlags());
- size_t entry = builder()->AllocateConstantPoolEntry();
- builder()->CreateArrayLiteral(entry, expr->literal_index(), flags);
+ size_t entry = builder()->AllocateDeferredConstantPoolEntry();
+ builder()->CreateArrayLiteral(entry, feedback_index(expr->literal_slot()),
+ flags);
array_literals_.push_back(std::make_pair(expr, entry));
Register index, literal;
@@ -1869,7 +1892,7 @@ void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
literal_in_accumulator = false;
}
- FeedbackVectorSlot slot = expr->LiteralFeedbackSlot();
+ FeedbackSlot slot = expr->LiteralFeedbackSlot();
builder()
->LoadLiteral(Smi::FromInt(array_index))
.StoreAccumulatorInRegister(index);
@@ -1890,19 +1913,18 @@ void BytecodeGenerator::VisitVariableProxy(VariableProxy* proxy) {
proxy->hole_check_mode());
}
-void BytecodeGenerator::BuildVariableLoad(Variable* variable,
- FeedbackVectorSlot slot,
+void BytecodeGenerator::BuildVariableLoad(Variable* variable, FeedbackSlot slot,
HoleCheckMode hole_check_mode,
TypeofMode typeof_mode) {
switch (variable->location()) {
case VariableLocation::LOCAL: {
- Register source(Register(variable->index()));
+ Register source(builder()->Local(variable->index()));
// We need to load the variable into the accumulator, even when in a
// VisitForRegisterScope, in order to avoid register aliasing if
// subsequent expressions assign to the same variable.
builder()->LoadAccumulatorWithRegister(source);
if (hole_check_mode == HoleCheckMode::kRequired) {
- BuildThrowIfHole(variable->name());
+ BuildThrowIfHole(variable->raw_name());
}
break;
}
@@ -1915,7 +1937,7 @@ void BytecodeGenerator::BuildVariableLoad(Variable* variable,
// subsequent expressions assign to the same variable.
builder()->LoadAccumulatorWithRegister(source);
if (hole_check_mode == HoleCheckMode::kRequired) {
- BuildThrowIfHole(variable->name());
+ BuildThrowIfHole(variable->raw_name());
}
break;
}
@@ -1926,7 +1948,7 @@ void BytecodeGenerator::BuildVariableLoad(Variable* variable,
if (variable->raw_name() == undefined_string()) {
builder()->LoadUndefined();
} else {
- builder()->LoadGlobal(variable->name(), feedback_index(slot),
+ builder()->LoadGlobal(variable->raw_name(), feedback_index(slot),
typeof_mode);
}
break;
@@ -1942,9 +1964,15 @@ void BytecodeGenerator::BuildVariableLoad(Variable* variable,
context_reg = execution_context()->reg();
}
- builder()->LoadContextSlot(context_reg, variable->index(), depth);
+ BytecodeArrayBuilder::ContextSlotMutability immutable =
+ (variable->maybe_assigned() == kNotAssigned)
+ ? BytecodeArrayBuilder::kImmutableSlot
+ : BytecodeArrayBuilder::kMutableSlot;
+
+ builder()->LoadContextSlot(context_reg, variable->index(), depth,
+ immutable);
if (hole_check_mode == HoleCheckMode::kRequired) {
- BuildThrowIfHole(variable->name());
+ BuildThrowIfHole(variable->raw_name());
}
break;
}
@@ -1954,21 +1982,22 @@ void BytecodeGenerator::BuildVariableLoad(Variable* variable,
Variable* local_variable = variable->local_if_not_shadowed();
int depth =
execution_context()->ContextChainDepth(local_variable->scope());
- builder()->LoadLookupContextSlot(variable->name(), typeof_mode,
+ builder()->LoadLookupContextSlot(variable->raw_name(), typeof_mode,
local_variable->index(), depth);
if (hole_check_mode == HoleCheckMode::kRequired) {
- BuildThrowIfHole(variable->name());
+ BuildThrowIfHole(variable->raw_name());
}
break;
}
case DYNAMIC_GLOBAL: {
- int depth = scope()->ContextChainLengthUntilOutermostSloppyEval();
- builder()->LoadLookupGlobalSlot(variable->name(), typeof_mode,
+ int depth =
+ closure_scope()->ContextChainLengthUntilOutermostSloppyEval();
+ builder()->LoadLookupGlobalSlot(variable->raw_name(), typeof_mode,
feedback_index(slot), depth);
break;
}
default:
- builder()->LoadLookupSlot(variable->name(), typeof_mode);
+ builder()->LoadLookupSlot(variable->raw_name(), typeof_mode);
}
break;
}
@@ -1976,7 +2005,7 @@ void BytecodeGenerator::BuildVariableLoad(Variable* variable,
int depth = execution_context()->ContextChainDepth(variable->scope());
builder()->LoadModuleVariable(variable->index(), depth);
if (hole_check_mode == HoleCheckMode::kRequired) {
- BuildThrowIfHole(variable->name());
+ BuildThrowIfHole(variable->raw_name());
}
break;
}
@@ -1984,7 +2013,7 @@ void BytecodeGenerator::BuildVariableLoad(Variable* variable,
}
void BytecodeGenerator::BuildVariableLoadForAccumulatorValue(
- Variable* variable, FeedbackVectorSlot slot, HoleCheckMode hole_check_mode,
+ Variable* variable, FeedbackSlot slot, HoleCheckMode hole_check_mode,
TypeofMode typeof_mode) {
ValueResultScope accumulator_result(this);
BuildVariableLoad(variable, slot, hole_check_mode, typeof_mode);
@@ -2010,9 +2039,9 @@ void BytecodeGenerator::BuildAsyncReturn() {
Register return_value = args[2];
builder()->StoreAccumulatorInRegister(return_value);
- Variable* var_promise = scope()->promise_var();
+ Variable* var_promise = closure_scope()->promise_var();
DCHECK_NOT_NULL(var_promise);
- BuildVariableLoad(var_promise, FeedbackVectorSlot::Invalid(),
+ BuildVariableLoad(var_promise, FeedbackSlot::Invalid(),
HoleCheckMode::kElided);
builder()
->StoreAccumulatorInRegister(promise)
@@ -2034,14 +2063,14 @@ void BytecodeGenerator::BuildAbort(BailoutReason bailout_reason) {
.CallRuntime(Runtime::kAbort, reason);
}
-void BytecodeGenerator::BuildThrowReferenceError(Handle<String> name) {
+void BytecodeGenerator::BuildThrowReferenceError(const AstRawString* name) {
RegisterAllocationScope register_scope(this);
Register name_reg = register_allocator()->NewRegister();
builder()->LoadLiteral(name).StoreAccumulatorInRegister(name_reg).CallRuntime(
Runtime::kThrowReferenceError, name_reg);
}
-void BytecodeGenerator::BuildThrowIfHole(Handle<String> name) {
+void BytecodeGenerator::BuildThrowIfHole(const AstRawString* name) {
// TODO(interpreter): Can the parser reduce the number of checks
// performed? Or should there be a ThrowIfHole bytecode.
BytecodeLabel no_reference_error;
@@ -2067,13 +2096,13 @@ void BytecodeGenerator::BuildHoleCheckForVariableAssignment(Variable* variable,
// Perform an initialization check for let/const declared variables.
// E.g. let x = (x = 20); is not allowed.
DCHECK(IsLexicalVariableMode(variable->mode()));
- BuildThrowIfHole(variable->name());
+ BuildThrowIfHole(variable->raw_name());
}
}
void BytecodeGenerator::BuildVariableAssignment(Variable* variable,
Token::Value op,
- FeedbackVectorSlot slot,
+ FeedbackSlot slot,
HoleCheckMode hole_check_mode) {
VariableMode mode = variable->mode();
RegisterAllocationScope assignment_register_scope(this);
@@ -2083,9 +2112,9 @@ void BytecodeGenerator::BuildVariableAssignment(Variable* variable,
case VariableLocation::LOCAL: {
Register destination;
if (VariableLocation::PARAMETER == variable->location()) {
- destination = Register(builder()->Parameter(variable->index() + 1));
+ destination = builder()->Parameter(variable->index() + 1);
} else {
- destination = Register(variable->index());
+ destination = builder()->Local(variable->index());
}
if (hole_check_mode == HoleCheckMode::kRequired) {
@@ -2107,7 +2136,7 @@ void BytecodeGenerator::BuildVariableAssignment(Variable* variable,
break;
}
case VariableLocation::UNALLOCATED: {
- builder()->StoreGlobal(variable->name(), feedback_index(slot),
+ builder()->StoreGlobal(variable->raw_name(), feedback_index(slot),
language_mode());
break;
}
@@ -2128,7 +2157,8 @@ void BytecodeGenerator::BuildVariableAssignment(Variable* variable,
Register value_temp = register_allocator()->NewRegister();
builder()
->StoreAccumulatorInRegister(value_temp)
- .LoadContextSlot(context_reg, variable->index(), depth);
+ .LoadContextSlot(context_reg, variable->index(), depth,
+ BytecodeArrayBuilder::kMutableSlot);
BuildHoleCheckForVariableAssignment(variable, op);
builder()->LoadAccumulatorWithRegister(value_temp);
@@ -2142,7 +2172,7 @@ void BytecodeGenerator::BuildVariableAssignment(Variable* variable,
break;
}
case VariableLocation::LOOKUP: {
- builder()->StoreLookupSlot(variable->name(), language_mode());
+ builder()->StoreLookupSlot(variable->raw_name(), language_mode());
break;
}
case VariableLocation::MODULE: {
@@ -2177,7 +2207,7 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
Register object, key;
RegisterList super_property_args;
- Handle<String> name;
+ const AstRawString* name;
// Left-hand side can only be a property, a global or a variable slot.
Property* property = expr->target()->AsProperty();
@@ -2190,7 +2220,7 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
break;
case NAMED_PROPERTY: {
object = VisitForRegisterValue(property->obj());
- name = property->key()->AsLiteral()->AsPropertyName();
+ name = property->key()->AsLiteral()->AsRawPropertyName();
break;
}
case KEYED_PROPERTY: {
@@ -2206,7 +2236,7 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
VisitForRegisterValue(super_property->home_object(),
super_property_args[1]);
builder()
- ->LoadLiteral(property->key()->AsLiteral()->AsPropertyName())
+ ->LoadLiteral(property->key()->AsLiteral()->AsRawPropertyName())
.StoreAccumulatorInRegister(super_property_args[2]);
break;
}
@@ -2235,7 +2265,7 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
break;
}
case NAMED_PROPERTY: {
- FeedbackVectorSlot slot = property->PropertyFeedbackSlot();
+ FeedbackSlot slot = property->PropertyFeedbackSlot();
builder()
->LoadNamedProperty(object, name, feedback_index(slot))
.StoreAccumulatorInRegister(old_value);
@@ -2244,7 +2274,7 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
case KEYED_PROPERTY: {
// Key is already in accumulator at this point due to evaluating the
// LHS above.
- FeedbackVectorSlot slot = property->PropertyFeedbackSlot();
+ FeedbackSlot slot = property->PropertyFeedbackSlot();
builder()
->LoadKeyedProperty(object, feedback_index(slot))
.StoreAccumulatorInRegister(old_value);
@@ -2266,8 +2296,7 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
}
}
VisitForAccumulatorValue(expr->value());
- FeedbackVectorSlot slot =
- expr->binary_operation()->BinaryOperationFeedbackSlot();
+ FeedbackSlot slot = expr->binary_operation()->BinaryOperationFeedbackSlot();
builder()->BinaryOperation(expr->binary_op(), old_value,
feedback_index(slot));
} else {
@@ -2276,7 +2305,7 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
// Store the value.
builder()->SetExpressionPosition(expr);
- FeedbackVectorSlot slot = expr->AssignmentSlot();
+ FeedbackSlot slot = expr->AssignmentSlot();
switch (assign_type) {
case VARIABLE: {
// TODO(oth): The BuildVariableAssignment() call is hard to reason about.
@@ -2392,15 +2421,15 @@ void BytecodeGenerator::VisitThrow(Throw* expr) {
void BytecodeGenerator::VisitPropertyLoad(Register obj, Property* expr) {
LhsKind property_kind = Property::GetAssignType(expr);
- FeedbackVectorSlot slot = expr->PropertyFeedbackSlot();
+ FeedbackSlot slot = expr->PropertyFeedbackSlot();
builder()->SetExpressionPosition(expr);
switch (property_kind) {
case VARIABLE:
UNREACHABLE();
case NAMED_PROPERTY: {
- builder()->LoadNamedProperty(obj,
- expr->key()->AsLiteral()->AsPropertyName(),
- feedback_index(slot));
+ builder()->LoadNamedProperty(
+ obj, expr->key()->AsLiteral()->AsRawPropertyName(),
+ feedback_index(slot));
break;
}
case KEYED_PROPERTY: {
@@ -2434,7 +2463,7 @@ void BytecodeGenerator::VisitNamedSuperPropertyLoad(Property* property,
VisitForRegisterValue(super_property->this_var(), args[0]);
VisitForRegisterValue(super_property->home_object(), args[1]);
builder()
- ->LoadLiteral(property->key()->AsLiteral()->AsPropertyName())
+ ->LoadLiteral(property->key()->AsLiteral()->AsRawPropertyName())
.StoreAccumulatorInRegister(args[2])
.CallRuntime(Runtime::kLoadFromSuper, args);
@@ -2486,13 +2515,17 @@ void BytecodeGenerator::VisitCall(Call* expr) {
return VisitCallSuper(expr);
}
- Register callee = register_allocator()->NewRegister();
// Grow the args list as we visit receiver / arguments to avoid allocating all
// the registers up-front. Otherwise these registers are unavailable during
// receiver / argument visiting and we can end up with memory leaks due to
// registers keeping objects alive.
+ Register callee = register_allocator()->NewRegister();
RegisterList args = register_allocator()->NewGrowableRegisterList();
+ // TODO(petermarshall): We have a lot of call bytecodes that are very similar,
+ // see if we can reduce the number by adding a separate argument which
+ // specifies the call type (e.g., property, spread, tailcall, etc.).
+
// Prepare the callee and the receiver to the function call. This depends on
// the semantics of the underlying call type.
switch (call_type) {
@@ -2500,7 +2533,7 @@ void BytecodeGenerator::VisitCall(Call* expr) {
case Call::KEYED_PROPERTY_CALL: {
Property* property = callee_expr->AsProperty();
VisitAndPushIntoRegisterList(property->obj(), &args);
- VisitPropertyLoadForRegister(args[0], property, callee);
+ VisitPropertyLoadForRegister(args.last_register(), property, callee);
break;
}
case Call::GLOBAL_CALL: {
@@ -2527,7 +2560,7 @@ void BytecodeGenerator::VisitCall(Call* expr) {
USE(receiver);
Variable* variable = callee_expr->AsVariableProxy()->var();
builder()
- ->LoadLiteral(variable->name())
+ ->LoadLiteral(variable->raw_name())
.StoreAccumulatorInRegister(name)
.CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, name,
result_pair);
@@ -2577,8 +2610,7 @@ void BytecodeGenerator::VisitCall(Call* expr) {
.MoveRegister(Register::function_closure(), runtime_call_args[2])
.LoadLiteral(Smi::FromInt(language_mode()))
.StoreAccumulatorInRegister(runtime_call_args[3])
- .LoadLiteral(
- Smi::FromInt(execution_context()->scope()->start_position()))
+ .LoadLiteral(Smi::FromInt(current_scope()->start_position()))
.StoreAccumulatorInRegister(runtime_call_args[4])
.LoadLiteral(Smi::FromInt(expr->position()))
.StoreAccumulatorInRegister(runtime_call_args[5]);
@@ -2591,9 +2623,16 @@ void BytecodeGenerator::VisitCall(Call* expr) {
builder()->SetExpressionPosition(expr);
- int const feedback_slot_index = feedback_index(expr->CallFeedbackICSlot());
- builder()->Call(callee, args, feedback_slot_index, call_type,
- expr->tail_call_mode());
+ // When a call contains a spread, a Call AST node is only created if there is
+ // exactly one spread, and it is the last argument.
+ if (expr->only_last_arg_is_spread()) {
+ DCHECK_EQ(TailCallMode::kDisallow, expr->tail_call_mode());
+ builder()->CallWithSpread(callee, args);
+ } else {
+ int const feedback_slot_index = feedback_index(expr->CallFeedbackICSlot());
+ builder()->Call(callee, args, feedback_slot_index, call_type,
+ expr->tail_call_mode());
+ }
}
void BytecodeGenerator::VisitCallSuper(Call* expr) {
@@ -2606,30 +2645,20 @@ void BytecodeGenerator::VisitCallSuper(Call* expr) {
builder()->GetSuperConstructor(constructor);
ZoneList<Expression*>* args = expr->arguments();
+ RegisterList args_regs = register_allocator()->NewGrowableRegisterList();
+ VisitArguments(args, &args_regs);
+ // The new target is loaded into the accumulator from the
+ // {new.target} variable.
+ VisitForAccumulatorValue(super->new_target_var());
+ builder()->SetExpressionPosition(expr);
// When a super call contains a spread, a CallSuper AST node is only created
// if there is exactly one spread, and it is the last argument.
- if (!args->is_empty() && args->last()->IsSpread()) {
- RegisterList args_regs = register_allocator()->NewGrowableRegisterList();
- Register constructor_arg =
- register_allocator()->GrowRegisterList(&args_regs);
- builder()->MoveRegister(constructor, constructor_arg);
- // Reserve argument reg for new.target in correct place for runtime call.
- // TODO(petermarshall): Remove this when changing bytecode to use the new
- // stub.
- Register new_target = register_allocator()->GrowRegisterList(&args_regs);
- VisitArguments(args, &args_regs);
- VisitForRegisterValue(super->new_target_var(), new_target);
- builder()->NewWithSpread(args_regs);
+ if (expr->only_last_arg_is_spread()) {
+ // TODO(petermarshall): Collect type on the feedback slot.
+ builder()->ConstructWithSpread(constructor, args_regs);
} else {
- RegisterList args_regs = register_allocator()->NewGrowableRegisterList();
- VisitArguments(args, &args_regs);
- // The new target is loaded into the accumulator from the
- // {new.target} variable.
- VisitForAccumulatorValue(super->new_target_var());
-
// Call construct.
- builder()->SetExpressionPosition(expr);
// TODO(turbofan): For now we do gather feedback on super constructor
// calls, utilizing the existing machinery to inline the actual call
// target and the JSCreate for the implicit receiver allocation. This
@@ -2637,7 +2666,7 @@ void BytecodeGenerator::VisitCallSuper(Call* expr) {
// the job done for now. In the long run we might want to revisit this
// and come up with a better way.
int const feedback_slot_index = feedback_index(expr->CallFeedbackICSlot());
- builder()->New(constructor, args_regs, feedback_slot_index);
+ builder()->Construct(constructor, args_regs, feedback_slot_index);
}
}
@@ -2646,12 +2675,18 @@ void BytecodeGenerator::VisitCallNew(CallNew* expr) {
RegisterList args = register_allocator()->NewGrowableRegisterList();
VisitArguments(expr->arguments(), &args);
- builder()->SetExpressionPosition(expr);
// The accumulator holds new target which is the same as the
// constructor for CallNew.
- builder()
- ->LoadAccumulatorWithRegister(constructor)
- .New(constructor, args, feedback_index(expr->CallNewFeedbackSlot()));
+ builder()->SetExpressionPosition(expr);
+ builder()->LoadAccumulatorWithRegister(constructor);
+
+ if (expr->only_last_arg_is_spread()) {
+ // TODO(petermarshall): Collect type on the feedback slot.
+ builder()->ConstructWithSpread(constructor, args);
+ } else {
+ builder()->Construct(constructor, args,
+ feedback_index(expr->CallNewFeedbackSlot()));
+ }
}
void BytecodeGenerator::VisitCallRuntime(CallRuntime* expr) {
@@ -2752,11 +2787,13 @@ void BytecodeGenerator::VisitDelete(UnaryOperation* expr) {
Register global_object = register_allocator()->NewRegister();
builder()
->LoadContextSlot(execution_context()->reg(),
- Context::NATIVE_CONTEXT_INDEX, 0)
+ Context::NATIVE_CONTEXT_INDEX, 0,
+ BytecodeArrayBuilder::kMutableSlot)
.StoreAccumulatorInRegister(native_context)
- .LoadContextSlot(native_context, Context::EXTENSION_INDEX, 0)
+ .LoadContextSlot(native_context, Context::EXTENSION_INDEX, 0,
+ BytecodeArrayBuilder::kMutableSlot)
.StoreAccumulatorInRegister(global_object)
- .LoadLiteral(variable->name())
+ .LoadLiteral(variable->raw_name())
.Delete(global_object, language_mode());
break;
}
@@ -2775,7 +2812,7 @@ void BytecodeGenerator::VisitDelete(UnaryOperation* expr) {
case VariableLocation::LOOKUP: {
Register name_reg = register_allocator()->NewRegister();
builder()
- ->LoadLiteral(variable->name())
+ ->LoadLiteral(variable->raw_name())
.StoreAccumulatorInRegister(name_reg)
.CallRuntime(Runtime::kDeleteLookupSlot, name_reg);
break;
@@ -2802,7 +2839,7 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
// Evaluate LHS expression and get old value.
Register object, key, old_value;
RegisterList super_property_args;
- Handle<String> name;
+ const AstRawString* name;
switch (assign_type) {
case VARIABLE: {
VariableProxy* proxy = expr->expression()->AsVariableProxy();
@@ -2812,14 +2849,14 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
case NAMED_PROPERTY: {
- FeedbackVectorSlot slot = property->PropertyFeedbackSlot();
+ FeedbackSlot slot = property->PropertyFeedbackSlot();
object = VisitForRegisterValue(property->obj());
- name = property->key()->AsLiteral()->AsPropertyName();
+ name = property->key()->AsLiteral()->AsRawPropertyName();
builder()->LoadNamedProperty(object, name, feedback_index(slot));
break;
}
case KEYED_PROPERTY: {
- FeedbackVectorSlot slot = property->PropertyFeedbackSlot();
+ FeedbackSlot slot = property->PropertyFeedbackSlot();
object = VisitForRegisterValue(property->obj());
// Use visit for accumulator here since we need the key in the accumulator
// for the LoadKeyedProperty.
@@ -2837,7 +2874,7 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
VisitForRegisterValue(super_property->this_var(), load_super_args[0]);
VisitForRegisterValue(super_property->home_object(), load_super_args[1]);
builder()
- ->LoadLiteral(property->key()->AsLiteral()->AsPropertyName())
+ ->LoadLiteral(property->key()->AsLiteral()->AsRawPropertyName())
.StoreAccumulatorInRegister(load_super_args[2])
.CallRuntime(Runtime::kLoadFromSuper, load_super_args);
break;
@@ -2865,12 +2902,12 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
}
// Perform +1/-1 operation.
- FeedbackVectorSlot slot = expr->CountBinaryOpFeedbackSlot();
+ FeedbackSlot slot = expr->CountBinaryOpFeedbackSlot();
builder()->CountOperation(expr->binary_op(), feedback_index(slot));
// Store the value.
builder()->SetExpressionPosition(expr);
- FeedbackVectorSlot feedback_slot = expr->CountSlot();
+ FeedbackSlot feedback_slot = expr->CountSlot();
switch (assign_type) {
case VARIABLE: {
VariableProxy* proxy = expr->expression()->AsVariableProxy();
@@ -2929,7 +2966,7 @@ void BytecodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Register lhs = VisitForRegisterValue(expr->left());
VisitForAccumulatorValue(expr->right());
builder()->SetExpressionPosition(expr);
- FeedbackVectorSlot slot = expr->CompareOperationFeedbackSlot();
+ FeedbackSlot slot = expr->CompareOperationFeedbackSlot();
builder()->CompareOperation(expr->op(), lhs, feedback_index(slot));
}
@@ -2938,7 +2975,7 @@ void BytecodeGenerator::VisitArithmeticExpression(BinaryOperation* expr) {
// +x and -x by the parser.
Register lhs = VisitForRegisterValue(expr->left());
VisitForAccumulatorValue(expr->right());
- FeedbackVectorSlot slot = expr->BinaryOperationFeedbackSlot();
+ FeedbackSlot slot = expr->BinaryOperationFeedbackSlot();
builder()->SetExpressionPosition(expr);
builder()->BinaryOperation(expr->op(), lhs, feedback_index(slot));
}
@@ -2950,8 +2987,8 @@ void BytecodeGenerator::VisitEmptyParentheses(EmptyParentheses* expr) {
}
void BytecodeGenerator::VisitGetIterator(GetIterator* expr) {
- FeedbackVectorSlot load_slot = expr->IteratorPropertyFeedbackSlot();
- FeedbackVectorSlot call_slot = expr->IteratorCallFeedbackSlot();
+ FeedbackSlot load_slot = expr->IteratorPropertyFeedbackSlot();
+ FeedbackSlot call_slot = expr->IteratorCallFeedbackSlot();
RegisterList args = register_allocator()->NewRegisterList(1);
Register method = register_allocator()->NewRegister();
@@ -2959,21 +2996,64 @@ void BytecodeGenerator::VisitGetIterator(GetIterator* expr) {
VisitForAccumulatorValue(expr->iterable());
- // Let method be GetMethod(obj, @@iterator).
- builder()
- ->StoreAccumulatorInRegister(obj)
- .LoadNamedProperty(obj, iterator_symbol(), feedback_index(load_slot))
- .StoreAccumulatorInRegister(method);
+ if (expr->hint() == IteratorType::kAsync) {
+ FeedbackSlot async_load_slot = expr->AsyncIteratorPropertyFeedbackSlot();
+ FeedbackSlot async_call_slot = expr->AsyncIteratorCallFeedbackSlot();
+
+ // Set method to GetMethod(obj, @@asyncIterator)
+ builder()->StoreAccumulatorInRegister(obj).LoadAsyncIteratorProperty(
+ obj, feedback_index(async_load_slot));
+
+ BytecodeLabel async_iterator_undefined, async_iterator_null, done;
+ // TODO(ignition): Add a single opcode for JumpIfNullOrUndefined
+ builder()->JumpIfUndefined(&async_iterator_undefined);
+ builder()->JumpIfNull(&async_iterator_null);
- // Let iterator be Call(method, obj).
- builder()->Call(method, args, feedback_index(call_slot),
- Call::NAMED_PROPERTY_CALL);
+ // Let iterator be Call(method, obj)
+ builder()->StoreAccumulatorInRegister(method).Call(
+ method, args, feedback_index(async_call_slot),
+ Call::NAMED_PROPERTY_CALL);
- // If Type(iterator) is not Object, throw a TypeError exception.
- BytecodeLabel no_type_error;
- builder()->JumpIfJSReceiver(&no_type_error);
- builder()->CallRuntime(Runtime::kThrowSymbolIteratorInvalid);
- builder()->Bind(&no_type_error);
+ // If Type(iterator) is not Object, throw a TypeError exception.
+ builder()->JumpIfJSReceiver(&done);
+ builder()->CallRuntime(Runtime::kThrowSymbolAsyncIteratorInvalid);
+
+ builder()->Bind(&async_iterator_undefined);
+ builder()->Bind(&async_iterator_null);
+ // If method is undefined,
+ // Let syncMethod be GetMethod(obj, @@iterator)
+ builder()
+ ->LoadIteratorProperty(obj, feedback_index(load_slot))
+ .StoreAccumulatorInRegister(method);
+
+ // Let syncIterator be Call(syncMethod, obj)
+ builder()->Call(method, args, feedback_index(call_slot),
+ Call::NAMED_PROPERTY_CALL);
+
+ // Return CreateAsyncFromSyncIterator(syncIterator)
+ // alias `method` register as it's no longer used
+ Register sync_iter = method;
+ builder()->StoreAccumulatorInRegister(sync_iter).CallRuntime(
+ Runtime::kInlineCreateAsyncFromSyncIterator, sync_iter);
+
+ builder()->Bind(&done);
+ } else {
+ // Let method be GetMethod(obj, @@iterator).
+ builder()
+ ->StoreAccumulatorInRegister(obj)
+ .LoadIteratorProperty(obj, feedback_index(load_slot))
+ .StoreAccumulatorInRegister(method);
+
+ // Let iterator be Call(method, obj).
+ builder()->Call(method, args, feedback_index(call_slot),
+ Call::NAMED_PROPERTY_CALL);
+
+ // If Type(iterator) is not Object, throw a TypeError exception.
+ BytecodeLabel no_type_error;
+ builder()->JumpIfJSReceiver(&no_type_error);
+ builder()->CallRuntime(Runtime::kThrowSymbolIteratorInvalid);
+ builder()->Bind(&no_type_error);
+ }
}
void BytecodeGenerator::VisitThisFunction(ThisFunction* expr) {
@@ -3071,7 +3151,7 @@ void BytecodeGenerator::VisitRewritableExpression(RewritableExpression* expr) {
void BytecodeGenerator::BuildNewLocalActivationContext() {
ValueResultScope value_execution_result(this);
- Scope* scope = this->scope();
+ Scope* scope = closure_scope();
// Create the appropriate context.
if (scope->is_script_scope()) {
@@ -3079,7 +3159,7 @@ void BytecodeGenerator::BuildNewLocalActivationContext() {
builder()
->LoadAccumulatorWithRegister(Register::function_closure())
.StoreAccumulatorInRegister(args[0])
- .LoadLiteral(scope->scope_info())
+ .LoadLiteral(scope)
.StoreAccumulatorInRegister(args[1])
.CallRuntime(Runtime::kNewScriptContext, args);
} else if (scope->is_module_scope()) {
@@ -3093,7 +3173,7 @@ void BytecodeGenerator::BuildNewLocalActivationContext() {
->MoveRegister(builder()->Parameter(1), args[0])
.LoadAccumulatorWithRegister(Register::function_closure())
.StoreAccumulatorInRegister(args[1])
- .LoadLiteral(scope->scope_info())
+ .LoadLiteral(scope)
.StoreAccumulatorInRegister(args[2])
.CallRuntime(Runtime::kPushModuleContext, args);
} else {
@@ -3123,7 +3203,7 @@ void BytecodeGenerator::BuildNewLocalActivationContext() {
}
void BytecodeGenerator::BuildLocalActivationContextInitialization() {
- DeclarationScope* scope = this->scope();
+ DeclarationScope* scope = closure_scope();
if (scope->has_this_declaration() && scope->receiver()->IsContextSlot()) {
Variable* variable = scope->receiver();
@@ -3155,7 +3235,7 @@ void BytecodeGenerator::BuildNewLocalBlockContext(Scope* scope) {
DCHECK(scope->is_block_scope());
VisitFunctionClosureForContext();
- builder()->CreateBlockContext(scope->scope_info());
+ builder()->CreateBlockContext(scope);
}
void BytecodeGenerator::BuildNewLocalWithContext(Scope* scope) {
@@ -3165,7 +3245,7 @@ void BytecodeGenerator::BuildNewLocalWithContext(Scope* scope) {
builder()->ConvertAccumulatorToObject(extension_object);
VisitFunctionClosureForContext();
- builder()->CreateWithContext(extension_object, scope->scope_info());
+ builder()->CreateWithContext(extension_object, scope);
}
void BytecodeGenerator::BuildNewLocalCatchContext(Variable* variable,
@@ -3176,8 +3256,7 @@ void BytecodeGenerator::BuildNewLocalCatchContext(Variable* variable,
Register exception = register_allocator()->NewRegister();
builder()->StoreAccumulatorInRegister(exception);
VisitFunctionClosureForContext();
- builder()->CreateCatchContext(exception, variable->name(),
- scope->scope_info());
+ builder()->CreateCatchContext(exception, variable->raw_name(), scope);
}
void BytecodeGenerator::VisitObjectLiteralAccessor(
@@ -3195,11 +3274,10 @@ void BytecodeGenerator::VisitSetHomeObject(Register value, Register home_object,
int slot_number) {
Expression* expr = property->value();
if (FunctionLiteral::NeedsHomeObject(expr)) {
- FeedbackVectorSlot slot = property->GetSlot(slot_number);
+ FeedbackSlot slot = property->GetSlot(slot_number);
builder()
->LoadAccumulatorWithRegister(home_object)
- .StoreNamedProperty(value, home_object_symbol(), feedback_index(slot),
- language_mode());
+ .StoreHomeObjectProperty(value, feedback_index(slot), language_mode());
}
}
@@ -3215,8 +3293,7 @@ void BytecodeGenerator::VisitArgumentsObject(Variable* variable) {
? CreateArgumentsType::kUnmappedArguments
: CreateArgumentsType::kMappedArguments;
builder()->CreateArguments(type);
- BuildVariableAssignment(variable, Token::ASSIGN,
- FeedbackVectorSlot::Invalid(),
+ BuildVariableAssignment(variable, Token::ASSIGN, FeedbackSlot::Invalid(),
HoleCheckMode::kElided);
}
@@ -3227,7 +3304,7 @@ void BytecodeGenerator::VisitRestArgumentsArray(Variable* rest) {
// variable.
builder()->CreateArguments(CreateArgumentsType::kRestParameter);
DCHECK(rest->IsContextSlot() || rest->IsStackAllocated());
- BuildVariableAssignment(rest, Token::ASSIGN, FeedbackVectorSlot::Invalid(),
+ BuildVariableAssignment(rest, Token::ASSIGN, FeedbackSlot::Invalid(),
HoleCheckMode::kElided);
}
@@ -3236,7 +3313,7 @@ void BytecodeGenerator::VisitThisFunctionVariable(Variable* variable) {
// Store the closure we were called with in the given variable.
builder()->LoadAccumulatorWithRegister(Register::function_closure());
- BuildVariableAssignment(variable, Token::INIT, FeedbackVectorSlot::Invalid(),
+ BuildVariableAssignment(variable, Token::INIT, FeedbackSlot::Invalid(),
HoleCheckMode::kElided);
}
@@ -3245,7 +3322,7 @@ void BytecodeGenerator::VisitNewTargetVariable(Variable* variable) {
// Store the new target we were called with in the given variable.
builder()->LoadAccumulatorWithRegister(Register::new_target());
- BuildVariableAssignment(variable, Token::INIT, FeedbackVectorSlot::Invalid(),
+ BuildVariableAssignment(variable, Token::INIT, FeedbackSlot::Invalid(),
HoleCheckMode::kElided);
// TODO(mstarzinger): The <new.target> register is not set by the deoptimizer
@@ -3258,26 +3335,27 @@ void BytecodeGenerator::VisitNewTargetVariable(Variable* variable) {
void BytecodeGenerator::VisitFunctionClosureForContext() {
ValueResultScope value_execution_result(this);
- DeclarationScope* closure_scope =
- execution_context()->scope()->GetClosureScope();
- if (closure_scope->is_script_scope()) {
+ if (closure_scope()->is_script_scope()) {
// Contexts nested in the native context have a canonical empty function as
// their closure, not the anonymous closure containing the global code.
Register native_context = register_allocator()->NewRegister();
builder()
->LoadContextSlot(execution_context()->reg(),
- Context::NATIVE_CONTEXT_INDEX, 0)
+ Context::NATIVE_CONTEXT_INDEX, 0,
+ BytecodeArrayBuilder::kMutableSlot)
.StoreAccumulatorInRegister(native_context)
- .LoadContextSlot(native_context, Context::CLOSURE_INDEX, 0);
- } else if (closure_scope->is_eval_scope()) {
+ .LoadContextSlot(native_context, Context::CLOSURE_INDEX, 0,
+ BytecodeArrayBuilder::kMutableSlot);
+ } else if (closure_scope()->is_eval_scope()) {
// Contexts created by a call to eval have the same closure as the
// context calling eval, not the anonymous closure containing the eval
// code. Fetch it from the context.
builder()->LoadContextSlot(execution_context()->reg(),
- Context::CLOSURE_INDEX, 0);
+ Context::CLOSURE_INDEX, 0,
+ BytecodeArrayBuilder::kMutableSlot);
} else {
- DCHECK(closure_scope->is_function_scope() ||
- closure_scope->is_module_scope());
+ DCHECK(closure_scope()->is_function_scope() ||
+ closure_scope()->is_module_scope());
builder()->LoadAccumulatorWithRegister(Register::function_closure());
}
}
@@ -3373,16 +3451,17 @@ void BytecodeGenerator::VisitForTest(Expression* expr,
}
void BytecodeGenerator::VisitInScope(Statement* stmt, Scope* scope) {
- ContextScope context_scope(this, scope);
DCHECK(scope->declarations()->is_empty());
+ CurrentScope current_scope(this, scope);
+ ContextScope context_scope(this, scope);
Visit(stmt);
}
LanguageMode BytecodeGenerator::language_mode() const {
- return execution_context()->scope()->language_mode();
+ return current_scope()->language_mode();
}
-int BytecodeGenerator::feedback_index(FeedbackVectorSlot slot) const {
+int BytecodeGenerator::feedback_index(FeedbackSlot slot) const {
return FeedbackVector::GetIndex(slot);
}
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index dcdba99e2f..755648ebfc 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -18,6 +18,7 @@ class CompilationInfo;
namespace interpreter {
+class GlobalDeclarationsBuilder;
class LoopBuilder;
class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
@@ -43,6 +44,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
class ControlScopeForTopLevel;
class ControlScopeForTryCatch;
class ControlScopeForTryFinally;
+ class CurrentScope;
class ExpressionResultScope;
class EffectResultScope;
class GlobalDeclarationsBuilder;
@@ -94,23 +96,22 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void VisitPropertyLoadForRegister(Register obj, Property* expr,
Register destination);
- void BuildVariableLoad(Variable* variable, FeedbackVectorSlot slot,
+ void BuildVariableLoad(Variable* variable, FeedbackSlot slot,
HoleCheckMode hole_check_mode,
TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
void BuildVariableLoadForAccumulatorValue(
- Variable* variable, FeedbackVectorSlot slot,
- HoleCheckMode hole_check_mode,
+ Variable* variable, FeedbackSlot slot, HoleCheckMode hole_check_mode,
TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
void BuildVariableAssignment(Variable* variable, Token::Value op,
- FeedbackVectorSlot slot,
+ FeedbackSlot slot,
HoleCheckMode hole_check_mode);
void BuildReturn();
void BuildAsyncReturn();
void BuildReThrow();
void BuildAbort(BailoutReason bailout_reason);
- void BuildThrowIfHole(Handle<String> name);
- void BuildThrowReferenceError(Handle<String> name);
+ void BuildThrowIfHole(const AstRawString* name);
+ void BuildThrowReferenceError(const AstRawString* name);
void BuildHoleCheckForVariableAssignment(Variable* variable, Token::Value op);
// Build jump to targets[value], where
@@ -141,7 +142,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void VisitObjectLiteralAccessor(Register home_object,
ObjectLiteralProperty* property,
Register value_out);
- void VisitForInAssignment(Expression* expr, FeedbackVectorSlot slot);
+ void VisitForInAssignment(Expression* expr, FeedbackSlot slot);
void VisitModuleNamespaceImports();
// Visit the header/body of a loop iteration.
@@ -172,9 +173,12 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
inline BytecodeArrayBuilder* builder() const { return builder_; }
inline Zone* zone() const { return zone_; }
- inline DeclarationScope* scope() const { return scope_; }
+ inline DeclarationScope* closure_scope() const { return closure_scope_; }
inline CompilationInfo* info() const { return info_; }
+ inline Scope* current_scope() const { return current_scope_; }
+ inline void set_current_scope(Scope* scope) { current_scope_ = scope; }
+
inline ControlScope* execution_control() const { return execution_control_; }
inline void set_execution_control(ControlScope* scope) {
execution_control_ = scope;
@@ -191,20 +195,21 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
return builder()->register_allocator();
}
- GlobalDeclarationsBuilder* globals_builder() { return globals_builder_; }
+ GlobalDeclarationsBuilder* globals_builder() {
+ DCHECK_NOT_NULL(globals_builder_);
+ return globals_builder_;
+ }
inline LanguageMode language_mode() const;
- int feedback_index(FeedbackVectorSlot slot) const;
+ int feedback_index(FeedbackSlot slot) const;
- Handle<Name> home_object_symbol() const { return home_object_symbol_; }
- Handle<Name> iterator_symbol() const { return iterator_symbol_; }
- Handle<Name> prototype_string() const { return prototype_string_; }
- Handle<FixedArray> empty_fixed_array() const { return empty_fixed_array_; }
+ const AstRawString* prototype_string() const { return prototype_string_; }
const AstRawString* undefined_string() const { return undefined_string_; }
Zone* zone_;
BytecodeArrayBuilder* builder_;
CompilationInfo* info_;
- DeclarationScope* scope_;
+ DeclarationScope* closure_scope_;
+ Scope* current_scope_;
GlobalDeclarationsBuilder* globals_builder_;
ZoneVector<GlobalDeclarationsBuilder*> global_declarations_;
@@ -222,10 +227,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
Register generator_state_;
int loop_depth_;
- Handle<Name> home_object_symbol_;
- Handle<Name> iterator_symbol_;
- Handle<Name> prototype_string_;
- Handle<FixedArray> empty_fixed_array_;
+ const AstRawString* prototype_string_;
const AstRawString* undefined_string_;
};
diff --git a/deps/v8/src/interpreter/bytecodes.cc b/deps/v8/src/interpreter/bytecodes.cc
index 15c4e98a02..f7fb7edde1 100644
--- a/deps/v8/src/interpreter/bytecodes.cc
+++ b/deps/v8/src/interpreter/bytecodes.cc
@@ -166,6 +166,19 @@ bool Bytecodes::IsRegisterOperandType(OperandType operand_type) {
return false;
}
+bool Bytecodes::MakesCallAlongCriticalPath(Bytecode bytecode) {
+ if (IsCallOrConstruct(bytecode) || IsCallRuntime(bytecode)) return true;
+ switch (bytecode) {
+ case Bytecode::kCreateWithContext:
+ case Bytecode::kCreateBlockContext:
+ case Bytecode::kCreateCatchContext:
+ case Bytecode::kCreateRegExpLiteral:
+ return true;
+ default:
+ return false;
+ }
+}
+
// static
bool Bytecodes::IsRegisterInputOperandType(OperandType operand_type) {
switch (operand_type) {
@@ -227,7 +240,8 @@ bool Bytecodes::IsStarLookahead(Bytecode bytecode, OperandScale operand_scale) {
case Bytecode::kTypeOf:
case Bytecode::kCall:
case Bytecode::kCallProperty:
- case Bytecode::kNew:
+ case Bytecode::kConstruct:
+ case Bytecode::kConstructWithSpread:
return true;
default:
return false;
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
index f09af85be4..f6085269ff 100644
--- a/deps/v8/src/interpreter/bytecodes.h
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -51,7 +51,10 @@ namespace interpreter {
V(PopContext, AccumulatorUse::kNone, OperandType::kReg) \
V(LdaContextSlot, AccumulatorUse::kWrite, OperandType::kReg, \
OperandType::kIdx, OperandType::kUImm) \
+ V(LdaImmutableContextSlot, AccumulatorUse::kWrite, OperandType::kReg, \
+ OperandType::kIdx, OperandType::kUImm) \
V(LdaCurrentContextSlot, AccumulatorUse::kWrite, OperandType::kIdx) \
+ V(LdaImmutableCurrentContextSlot, AccumulatorUse::kWrite, OperandType::kIdx) \
V(StaContextSlot, AccumulatorUse::kRead, OperandType::kReg, \
OperandType::kIdx, OperandType::kUImm) \
V(StaCurrentContextSlot, AccumulatorUse::kRead, OperandType::kIdx) \
@@ -94,6 +97,8 @@ namespace interpreter {
OperandType::kIdx, OperandType::kIdx) \
V(StaNamedPropertyStrict, AccumulatorUse::kRead, OperandType::kReg, \
OperandType::kIdx, OperandType::kIdx) \
+ V(StaNamedOwnProperty, AccumulatorUse::kRead, OperandType::kReg, \
+ OperandType::kIdx, OperandType::kIdx) \
V(StaKeyedPropertySloppy, AccumulatorUse::kRead, OperandType::kReg, \
OperandType::kReg, OperandType::kIdx) \
V(StaKeyedPropertyStrict, AccumulatorUse::kRead, OperandType::kReg, \
@@ -151,6 +156,8 @@ namespace interpreter {
OperandType::kRegCount, OperandType::kIdx) \
V(CallProperty, AccumulatorUse::kWrite, OperandType::kReg, \
OperandType::kRegList, OperandType::kRegCount, OperandType::kIdx) \
+ V(CallWithSpread, AccumulatorUse::kWrite, OperandType::kReg, \
+ OperandType::kRegList, OperandType::kRegCount) \
V(TailCall, AccumulatorUse::kWrite, OperandType::kReg, \
OperandType::kRegList, OperandType::kRegCount, OperandType::kIdx) \
V(CallRuntime, AccumulatorUse::kWrite, OperandType::kRuntimeId, \
@@ -164,11 +171,11 @@ namespace interpreter {
V(InvokeIntrinsic, AccumulatorUse::kWrite, OperandType::kIntrinsicId, \
OperandType::kRegList, OperandType::kRegCount) \
\
- /* New operators */ \
- V(New, AccumulatorUse::kReadWrite, OperandType::kReg, OperandType::kRegList, \
- OperandType::kRegCount, OperandType::kIdx) \
- V(NewWithSpread, AccumulatorUse::kWrite, OperandType::kRegList, \
- OperandType::kRegCount) \
+ /* Construct operators */ \
+ V(Construct, AccumulatorUse::kReadWrite, OperandType::kReg, \
+ OperandType::kRegList, OperandType::kRegCount, OperandType::kIdx) \
+ V(ConstructWithSpread, AccumulatorUse::kReadWrite, OperandType::kReg, \
+ OperandType::kRegList, OperandType::kRegCount) \
\
/* Test Operators */ \
V(TestEqual, AccumulatorUse::kReadWrite, OperandType::kReg, \
@@ -226,9 +233,9 @@ namespace interpreter {
\
/* Control Flow -- carefully ordered for efficient checks */ \
/* - [Unconditional jumps] */ \
- V(JumpLoop, AccumulatorUse::kNone, OperandType::kImm, OperandType::kImm) \
+ V(JumpLoop, AccumulatorUse::kNone, OperandType::kUImm, OperandType::kImm) \
/* - [Forward jumps] */ \
- V(Jump, AccumulatorUse::kNone, OperandType::kImm) \
+ V(Jump, AccumulatorUse::kNone, OperandType::kUImm) \
/* - [Start constant jumps] */ \
V(JumpConstant, AccumulatorUse::kNone, OperandType::kIdx) \
/* - [Conditional jumps] */ \
@@ -244,15 +251,15 @@ namespace interpreter {
V(JumpIfToBooleanFalseConstant, AccumulatorUse::kRead, OperandType::kIdx) \
/* - [End constant jumps] */ \
/* - [Conditional immediate jumps] */ \
- V(JumpIfToBooleanTrue, AccumulatorUse::kRead, OperandType::kImm) \
- V(JumpIfToBooleanFalse, AccumulatorUse::kRead, OperandType::kImm) \
+ V(JumpIfToBooleanTrue, AccumulatorUse::kRead, OperandType::kUImm) \
+ V(JumpIfToBooleanFalse, AccumulatorUse::kRead, OperandType::kUImm) \
/* - [End ToBoolean jumps] */ \
- V(JumpIfTrue, AccumulatorUse::kRead, OperandType::kImm) \
- V(JumpIfFalse, AccumulatorUse::kRead, OperandType::kImm) \
- V(JumpIfNull, AccumulatorUse::kRead, OperandType::kImm) \
- V(JumpIfUndefined, AccumulatorUse::kRead, OperandType::kImm) \
- V(JumpIfJSReceiver, AccumulatorUse::kRead, OperandType::kImm) \
- V(JumpIfNotHole, AccumulatorUse::kRead, OperandType::kImm) \
+ V(JumpIfTrue, AccumulatorUse::kRead, OperandType::kUImm) \
+ V(JumpIfFalse, AccumulatorUse::kRead, OperandType::kUImm) \
+ V(JumpIfNull, AccumulatorUse::kRead, OperandType::kUImm) \
+ V(JumpIfUndefined, AccumulatorUse::kRead, OperandType::kUImm) \
+ V(JumpIfJSReceiver, AccumulatorUse::kRead, OperandType::kUImm) \
+ V(JumpIfNotHole, AccumulatorUse::kRead, OperandType::kUImm) \
\
/* Complex flow control For..in */ \
V(ForInPrepare, AccumulatorUse::kNone, OperandType::kReg, \
@@ -508,7 +515,9 @@ class V8_EXPORT_PRIVATE Bytecodes final {
bytecode == Bytecode::kLdaTheHole ||
bytecode == Bytecode::kLdaConstant ||
bytecode == Bytecode::kLdaContextSlot ||
- bytecode == Bytecode::kLdaCurrentContextSlot;
+ bytecode == Bytecode::kLdaCurrentContextSlot ||
+ bytecode == Bytecode::kLdaImmutableContextSlot ||
+ bytecode == Bytecode::kLdaImmutableCurrentContextSlot;
}
// Return true if |bytecode| is a register load without effects,
@@ -610,9 +619,14 @@ class V8_EXPORT_PRIVATE Bytecodes final {
}
// Returns true if the bytecode is a call or a constructor call.
- static constexpr bool IsCallOrNew(Bytecode bytecode) {
+ static constexpr bool IsCallOrConstruct(Bytecode bytecode) {
return bytecode == Bytecode::kCall || bytecode == Bytecode::kCallProperty ||
- bytecode == Bytecode::kTailCall || bytecode == Bytecode::kNew;
+ bytecode == Bytecode::kTailCall ||
+ bytecode == Bytecode::kConstruct ||
+ bytecode == Bytecode::kCallWithSpread ||
+ bytecode == Bytecode::kConstructWithSpread ||
+ bytecode == Bytecode::kInvokeIntrinsic ||
+ bytecode == Bytecode::kCallJSRuntime;
}
// Returns true if the bytecode is a call to the runtime.
@@ -717,6 +731,10 @@ class V8_EXPORT_PRIVATE Bytecodes final {
// Returns the equivalent jump bytecode without the accumulator coercion.
static Bytecode GetJumpWithoutToBoolean(Bytecode bytecode);
+ // Returns true if there is a call in the most-frequently executed path
+ // through the bytecode's handler.
+ static bool MakesCallAlongCriticalPath(Bytecode bytecode);
+
// Returns true if the bytecode is a debug break.
static bool IsDebugBreak(Bytecode bytecode);
diff --git a/deps/v8/src/interpreter/constant-array-builder.cc b/deps/v8/src/interpreter/constant-array-builder.cc
index 6fd141e911..74d887a61a 100644
--- a/deps/v8/src/interpreter/constant-array-builder.cc
+++ b/deps/v8/src/interpreter/constant-array-builder.cc
@@ -7,6 +7,10 @@
#include <functional>
#include <set>
+#include "src/ast/ast-value-factory.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
+#include "src/base/functional.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
@@ -34,39 +38,45 @@ void ConstantArrayBuilder::ConstantArraySlice::Unreserve() {
}
size_t ConstantArrayBuilder::ConstantArraySlice::Allocate(
- Handle<Object> object) {
+ ConstantArrayBuilder::Entry entry) {
DCHECK_GT(available(), 0u);
size_t index = constants_.size();
DCHECK_LT(index, capacity());
- constants_.push_back(object);
+ constants_.push_back(entry);
return index + start_index();
}
-Handle<Object> ConstantArrayBuilder::ConstantArraySlice::At(
- size_t index) const {
+ConstantArrayBuilder::Entry& ConstantArrayBuilder::ConstantArraySlice::At(
+ size_t index) {
DCHECK_GE(index, start_index());
DCHECK_LT(index, start_index() + size());
return constants_[index - start_index()];
}
-void ConstantArrayBuilder::ConstantArraySlice::InsertAt(size_t index,
- Handle<Object> object) {
+const ConstantArrayBuilder::Entry& ConstantArrayBuilder::ConstantArraySlice::At(
+ size_t index) const {
DCHECK_GE(index, start_index());
DCHECK_LT(index, start_index() + size());
- constants_[index - start_index()] = object;
+ return constants_[index - start_index()];
}
#if DEBUG
-void ConstantArrayBuilder::ConstantArraySlice::CheckAllElementsAreUnique()
- const {
+void ConstantArrayBuilder::ConstantArraySlice::CheckAllElementsAreUnique(
+ Isolate* isolate) const {
std::set<Object*> elements;
- for (auto constant : constants_) {
- if (elements.find(*constant) != elements.end()) {
+ for (const Entry& entry : constants_) {
+ Handle<Object> handle = entry.ToHandle(isolate);
+ if (elements.find(*handle) != elements.end()) {
std::ostringstream os;
- os << "Duplicate constant found: " << Brief(*constant);
+ os << "Duplicate constant found: " << Brief(*handle) << std::endl;
+ // Print all the entries in the slice to help debug duplicates.
+ size_t i = start_index();
+ for (const Entry& prev_entry : constants_) {
+ os << i++ << ": " << Brief(*prev_entry.ToHandle(isolate)) << std::endl;
+ }
FATAL(os.str().c_str());
}
- elements.insert(*constant);
+ elements.insert(*handle);
}
}
#endif
@@ -77,14 +87,15 @@ STATIC_CONST_MEMBER_DEFINITION const size_t
STATIC_CONST_MEMBER_DEFINITION const size_t
ConstantArrayBuilder::k32BitCapacity;
-ConstantArrayBuilder::ConstantArrayBuilder(Zone* zone,
- Handle<Object> the_hole_value)
- : constants_map_(16, base::KeyEqualityMatcher<Address>(),
+ConstantArrayBuilder::ConstantArrayBuilder(Zone* zone)
+ : constants_map_(16, base::KeyEqualityMatcher<intptr_t>(),
ZoneAllocationPolicy(zone)),
smi_map_(zone),
smi_pairs_(zone),
- zone_(zone),
- the_hole_value_(the_hole_value) {
+#define INIT_SINGLETON_ENTRY_FIELD(NAME, LOWER_NAME) LOWER_NAME##_(-1),
+ SINGLETON_CONSTANT_ENTRY_TYPES(INIT_SINGLETON_ENTRY_FIELD)
+#undef INIT_SINGLETON_ENTRY_FIELD
+ zone_(zone) {
idx_slice_[0] =
new (zone) ConstantArraySlice(zone, 0, k8BitCapacity, OperandSize::kByte);
idx_slice_[1] = new (zone) ConstantArraySlice(
@@ -115,38 +126,35 @@ ConstantArrayBuilder::ConstantArraySlice* ConstantArrayBuilder::IndexToSlice(
return nullptr;
}
-Handle<Object> ConstantArrayBuilder::At(size_t index) const {
+MaybeHandle<Object> ConstantArrayBuilder::At(size_t index,
+ Isolate* isolate) const {
const ConstantArraySlice* slice = IndexToSlice(index);
+ DCHECK_LT(index, slice->capacity());
if (index < slice->start_index() + slice->size()) {
- return slice->At(index);
- } else {
- DCHECK_LT(index, slice->capacity());
- return the_hole_value();
+ const Entry& entry = slice->At(index);
+ if (!entry.IsDeferred()) return entry.ToHandle(isolate);
}
+ return MaybeHandle<Object>();
}
Handle<FixedArray> ConstantArrayBuilder::ToFixedArray(Isolate* isolate) {
- // First insert reserved SMI values.
- for (auto reserved_smi : smi_pairs_) {
- InsertAllocatedEntry(reserved_smi.second,
- handle(reserved_smi.first, isolate));
- }
-
Handle<FixedArray> fixed_array = isolate->factory()->NewFixedArrayWithHoles(
static_cast<int>(size()), PretenureFlag::TENURED);
int array_index = 0;
for (const ConstantArraySlice* slice : idx_slice_) {
+ DCHECK_EQ(slice->reserved(), 0);
DCHECK(array_index == 0 ||
base::bits::IsPowerOfTwo32(static_cast<uint32_t>(array_index)));
#if DEBUG
// Different slices might contain the same element due to reservations, but
// all elements within a slice should be unique. If this DCHECK fails, then
// the AST nodes are not being internalized within a CanonicalHandleScope.
- slice->CheckAllElementsAreUnique();
+ slice->CheckAllElementsAreUnique(isolate);
#endif
// Copy objects from slice into array.
for (size_t i = 0; i < slice->size(); ++i) {
- fixed_array->set(array_index++, *slice->At(slice->start_index() + i));
+ fixed_array->set(array_index++,
+ *slice->At(slice->start_index() + i).ToHandle(isolate));
}
// Leave holes where reservations led to unused slots.
size_t padding = slice->capacity() - slice->size();
@@ -159,19 +167,62 @@ Handle<FixedArray> ConstantArrayBuilder::ToFixedArray(Isolate* isolate) {
return fixed_array;
}
-size_t ConstantArrayBuilder::Insert(Handle<Object> object) {
+size_t ConstantArrayBuilder::Insert(Smi* smi) {
+ auto entry = smi_map_.find(smi);
+ if (entry == smi_map_.end()) {
+ return AllocateReservedEntry(smi);
+ }
+ return entry->second;
+}
+
+size_t ConstantArrayBuilder::Insert(const AstRawString* raw_string) {
return constants_map_
- .LookupOrInsert(object.address(), ObjectHash(object.address()),
- [&]() { return AllocateIndex(object); },
+ .LookupOrInsert(reinterpret_cast<intptr_t>(raw_string),
+ raw_string->hash(),
+ [&]() { return AllocateIndex(Entry(raw_string)); },
ZoneAllocationPolicy(zone_))
->value;
}
+size_t ConstantArrayBuilder::Insert(const AstValue* heap_number) {
+ // This method only accepts heap numbers. Other types of ast value should
+ // either be passed through as raw values (in the case of strings), use the
+ // singleton Insert methods (in the case of symbols), or skip the constant
+ // pool entirely and use bytecodes with immediate values (Smis, booleans,
+ // undefined, etc.).
+ DCHECK(heap_number->IsHeapNumber());
+ return constants_map_
+ .LookupOrInsert(reinterpret_cast<intptr_t>(heap_number),
+ static_cast<uint32_t>(base::hash_value(heap_number)),
+ [&]() { return AllocateIndex(Entry(heap_number)); },
+ ZoneAllocationPolicy(zone_))
+ ->value;
+}
+
+size_t ConstantArrayBuilder::Insert(const Scope* scope) {
+ return constants_map_
+ .LookupOrInsert(reinterpret_cast<intptr_t>(scope),
+ static_cast<uint32_t>(base::hash_value(scope)),
+ [&]() { return AllocateIndex(Entry(scope)); },
+ ZoneAllocationPolicy(zone_))
+ ->value;
+}
+
+#define INSERT_ENTRY(NAME, LOWER_NAME) \
+ size_t ConstantArrayBuilder::Insert##NAME() { \
+ if (LOWER_NAME##_ < 0) { \
+ LOWER_NAME##_ = AllocateIndex(Entry::NAME()); \
+ } \
+ return LOWER_NAME##_; \
+ }
+SINGLETON_CONSTANT_ENTRY_TYPES(INSERT_ENTRY)
+#undef INSERT_ENTRY
+
ConstantArrayBuilder::index_t ConstantArrayBuilder::AllocateIndex(
- Handle<Object> object) {
+ ConstantArrayBuilder::Entry entry) {
for (size_t i = 0; i < arraysize(idx_slice_); ++i) {
if (idx_slice_[i]->available() > 0) {
- return static_cast<index_t>(idx_slice_[i]->Allocate(object));
+ return static_cast<index_t>(idx_slice_[i]->Allocate(entry));
}
}
UNREACHABLE();
@@ -199,15 +250,13 @@ ConstantArrayBuilder::OperandSizeToSlice(OperandSize operand_size) const {
return slice;
}
-size_t ConstantArrayBuilder::AllocateEntry() {
- return AllocateIndex(the_hole_value());
+size_t ConstantArrayBuilder::InsertDeferred() {
+ return AllocateIndex(Entry::Deferred());
}
-void ConstantArrayBuilder::InsertAllocatedEntry(size_t index,
- Handle<Object> object) {
- DCHECK_EQ(the_hole_value().address(), At(index).address());
+void ConstantArrayBuilder::SetDeferredAt(size_t index, Handle<Object> object) {
ConstantArraySlice* slice = IndexToSlice(index);
- slice->InsertAt(index, object);
+ return slice->At(index).SetDeferred(object);
}
OperandSize ConstantArrayBuilder::CreateReservedEntry() {
@@ -223,9 +272,8 @@ OperandSize ConstantArrayBuilder::CreateReservedEntry() {
ConstantArrayBuilder::index_t ConstantArrayBuilder::AllocateReservedEntry(
Smi* value) {
- index_t index = static_cast<index_t>(AllocateEntry());
+ index_t index = static_cast<index_t>(AllocateIndex(Entry(value)));
smi_map_[value] = index;
- smi_pairs_.push_back(std::make_pair(value, index));
return index;
}
@@ -254,6 +302,33 @@ void ConstantArrayBuilder::DiscardReservedEntry(OperandSize operand_size) {
OperandSizeToSlice(operand_size)->Unreserve();
}
+Handle<Object> ConstantArrayBuilder::Entry::ToHandle(Isolate* isolate) const {
+ switch (tag_) {
+ case Tag::kDeferred:
+ // We shouldn't have any deferred entries by now.
+ UNREACHABLE();
+ return Handle<Object>::null();
+ case Tag::kHandle:
+ return handle_;
+ case Tag::kSmi:
+ return handle(smi_, isolate);
+ case Tag::kRawString:
+ return raw_string_->string();
+ case Tag::kHeapNumber:
+ DCHECK(heap_number_->IsHeapNumber());
+ return heap_number_->value();
+ case Tag::kScope:
+ return scope_->scope_info();
+#define ENTRY_LOOKUP(Name, name) \
+ case Tag::k##Name: \
+ return isolate->factory()->name();
+ SINGLETON_CONSTANT_ENTRY_TYPES(ENTRY_LOOKUP);
+#undef ENTRY_LOOKUP
+ }
+ UNREACHABLE();
+ return Handle<Object>::null();
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/constant-array-builder.h b/deps/v8/src/interpreter/constant-array-builder.h
index c99c8e7c59..86e7c0818b 100644
--- a/deps/v8/src/interpreter/constant-array-builder.h
+++ b/deps/v8/src/interpreter/constant-array-builder.h
@@ -14,9 +14,18 @@ namespace v8 {
namespace internal {
class Isolate;
+class AstRawString;
+class AstValue;
namespace interpreter {
+// Constant array entries that represent singletons.
+#define SINGLETON_CONSTANT_ENTRY_TYPES(V) \
+ V(IteratorSymbol, iterator_symbol) \
+ V(AsyncIteratorSymbol, async_iterator_symbol) \
+ V(HomeObjectSymbol, home_object_symbol) \
+ V(EmptyFixedArray, empty_fixed_array)
+
// A helper class for constructing constant arrays for the
// interpreter. Each instance of this class is intended to be used to
// generate exactly one FixedArray of constants via the ToFixedArray
@@ -33,28 +42,36 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final BASE_EMBEDDED {
static const size_t k32BitCapacity =
kMaxUInt32 - k16BitCapacity - k8BitCapacity + 1;
- ConstantArrayBuilder(Zone* zone, Handle<Object> the_hole_value);
+ ConstantArrayBuilder(Zone* zone);
- // Generate a fixed array of constants based on inserted objects.
+ // Generate a fixed array of constant handles based on inserted objects.
Handle<FixedArray> ToFixedArray(Isolate* isolate);
- // Returns the object in the constant pool array that at index
- // |index|.
- Handle<Object> At(size_t index) const;
+ // Returns the object, as a handle in |isolate|, that is in the constant pool
+ // array at index |index|. Returns null if there is no handle at this index.
+ // Only expected to be used in tests.
+ MaybeHandle<Object> At(size_t index, Isolate* isolate) const;
// Returns the number of elements in the array.
size_t size() const;
- // Insert an object into the constants array if it is not already
- // present. Returns the array index associated with the object.
- size_t Insert(Handle<Object> object);
+ // Insert an object into the constants array if it is not already present.
+ // Returns the array index associated with the object.
+ size_t Insert(Smi* smi);
+ size_t Insert(const AstRawString* raw_string);
+ size_t Insert(const AstValue* heap_number);
+ size_t Insert(const Scope* scope);
+#define INSERT_ENTRY(NAME, ...) size_t Insert##NAME();
+ SINGLETON_CONSTANT_ENTRY_TYPES(INSERT_ENTRY)
+#undef INSERT_ENTRY
- // Allocates an empty entry and returns the array index associated with the
- // reservation. Entry can be inserted by calling InsertReservedEntry().
- size_t AllocateEntry();
+ // Inserts an empty entry and returns the array index associated with the
+ // reservation. The entry's handle value can be inserted by calling
+ // SetDeferredAt().
+ size_t InsertDeferred();
- // Inserts the given object into an allocated entry.
- void InsertAllocatedEntry(size_t index, Handle<Object> object);
+ // Sets the deferred value at |index| to |object|.
+ void SetDeferredAt(size_t index, Handle<Object> object);
// Creates a reserved entry in the constant pool and returns
// the size of the operand that'll be required to hold the entry
@@ -71,7 +88,60 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final BASE_EMBEDDED {
private:
typedef uint32_t index_t;
- index_t AllocateIndex(Handle<Object> object);
+ class Entry {
+ private:
+ enum class Tag : uint8_t;
+
+ public:
+ explicit Entry(Smi* smi) : smi_(smi), tag_(Tag::kSmi) {}
+ explicit Entry(const AstRawString* raw_string)
+ : raw_string_(raw_string), tag_(Tag::kRawString) {}
+ explicit Entry(const AstValue* heap_number)
+ : heap_number_(heap_number), tag_(Tag::kHeapNumber) {}
+ explicit Entry(const Scope* scope) : scope_(scope), tag_(Tag::kScope) {}
+
+#define CONSTRUCT_ENTRY(NAME, LOWER_NAME) \
+ static Entry NAME() { return Entry(Tag::k##NAME); }
+ SINGLETON_CONSTANT_ENTRY_TYPES(CONSTRUCT_ENTRY)
+#undef CONSTRUCT_ENTRY
+
+ static Entry Deferred() { return Entry(Tag::kDeferred); }
+
+ bool IsDeferred() const { return tag_ == Tag::kDeferred; }
+
+ void SetDeferred(Handle<Object> handle) {
+ DCHECK(tag_ == Tag::kDeferred);
+ tag_ = Tag::kHandle;
+ handle_ = handle;
+ }
+
+ Handle<Object> ToHandle(Isolate* isolate) const;
+
+ private:
+ explicit Entry(Tag tag) : tag_(tag) {}
+
+ union {
+ Handle<Object> handle_;
+ Smi* smi_;
+ const AstRawString* raw_string_;
+ const AstValue* heap_number_;
+ const Scope* scope_;
+ };
+
+ enum class Tag : uint8_t {
+ kDeferred,
+ kHandle,
+ kSmi,
+ kRawString,
+ kHeapNumber,
+ kScope,
+#define ENTRY_TAG(NAME, ...) k##NAME,
+ SINGLETON_CONSTANT_ENTRY_TYPES(ENTRY_TAG)
+#undef ENTRY_TAG
+ } tag_;
+ };
+
+ index_t AllocateIndex(Entry constant_entry);
index_t AllocateReservedEntry(Smi* value);
struct ConstantArraySlice final : public ZoneObject {
@@ -79,12 +149,12 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final BASE_EMBEDDED {
OperandSize operand_size);
void Reserve();
void Unreserve();
- size_t Allocate(Handle<Object> object);
- Handle<Object> At(size_t index) const;
- void InsertAt(size_t index, Handle<Object> object);
+ size_t Allocate(Entry entry);
+ Entry& At(size_t index);
+ const Entry& At(size_t index) const;
#if DEBUG
- void CheckAllElementsAreUnique() const;
+ void CheckAllElementsAreUnique(Isolate* isolate) const;
#endif
inline size_t available() const { return capacity() - reserved() - size(); }
@@ -100,7 +170,7 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final BASE_EMBEDDED {
const size_t capacity_;
size_t reserved_;
OperandSize operand_size_;
- ZoneVector<Handle<Object>> constants_;
+ ZoneVector<Entry> constants_;
DISALLOW_COPY_AND_ASSIGN(ConstantArraySlice);
};
@@ -108,16 +178,19 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final BASE_EMBEDDED {
ConstantArraySlice* IndexToSlice(size_t index) const;
ConstantArraySlice* OperandSizeToSlice(OperandSize operand_size) const;
- Handle<Object> the_hole_value() const { return the_hole_value_; }
-
ConstantArraySlice* idx_slice_[3];
- base::TemplateHashMapImpl<Address, index_t, base::KeyEqualityMatcher<Address>,
+ base::TemplateHashMapImpl<intptr_t, index_t,
+ base::KeyEqualityMatcher<intptr_t>,
ZoneAllocationPolicy>
constants_map_;
ZoneMap<Smi*, index_t> smi_map_;
ZoneVector<std::pair<Smi*, index_t>> smi_pairs_;
+
+#define SINGLETON_ENTRY_FIELD(NAME, LOWER_NAME) int LOWER_NAME##_;
+ SINGLETON_CONSTANT_ENTRY_TYPES(SINGLETON_ENTRY_FIELD)
+#undef SINGLETON_ENTRY_FIELD
+
Zone* zone_;
- Handle<Object> the_hole_value_;
};
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/handler-table-builder.h b/deps/v8/src/interpreter/handler-table-builder.h
index 50061949dc..b9bfc5be17 100644
--- a/deps/v8/src/interpreter/handler-table-builder.h
+++ b/deps/v8/src/interpreter/handler-table-builder.h
@@ -5,7 +5,6 @@
#ifndef V8_INTERPRETER_HANDLER_TABLE_BUILDER_H_
#define V8_INTERPRETER_HANDLER_TABLE_BUILDER_H_
-#include "src/handles.h"
#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecodes.h"
#include "src/zone/zone-containers.h"
@@ -13,6 +12,8 @@
namespace v8 {
namespace internal {
+template <typename T>
+class Handle;
class HandlerTable;
class Isolate;
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index 5eb992c1da..557ad77fb5 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -14,6 +14,7 @@
#include "src/interpreter/interpreter.h"
#include "src/machine-type.h"
#include "src/macro-assembler.h"
+#include "src/objects-inl.h"
#include "src/zone/zone.h"
namespace v8 {
@@ -31,14 +32,23 @@ InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state,
operand_scale_(operand_scale),
bytecode_offset_(this, MachineType::PointerRepresentation()),
interpreted_frame_pointer_(this, MachineType::PointerRepresentation()),
+ bytecode_array_(this, MachineRepresentation::kTagged),
+ dispatch_table_(this, MachineType::PointerRepresentation()),
accumulator_(this, MachineRepresentation::kTagged),
accumulator_use_(AccumulatorUse::kNone),
made_call_(false),
+ reloaded_frame_ptr_(false),
+ saved_bytecode_offset_(false),
disable_stack_check_across_call_(false),
stack_pointer_before_call_(nullptr) {
accumulator_.Bind(Parameter(InterpreterDispatchDescriptor::kAccumulator));
bytecode_offset_.Bind(
Parameter(InterpreterDispatchDescriptor::kBytecodeOffset));
+ bytecode_array_.Bind(
+ Parameter(InterpreterDispatchDescriptor::kBytecodeArray));
+ dispatch_table_.Bind(
+ Parameter(InterpreterDispatchDescriptor::kDispatchTable));
+
if (FLAG_trace_ignition) {
TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
}
@@ -57,6 +67,10 @@ InterpreterAssembler::~InterpreterAssembler() {
Node* InterpreterAssembler::GetInterpretedFramePointer() {
if (!interpreted_frame_pointer_.IsBound()) {
interpreted_frame_pointer_.Bind(LoadParentFramePointer());
+ } else if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
+ !reloaded_frame_ptr_) {
+ interpreted_frame_pointer_.Bind(LoadParentFramePointer());
+ reloaded_frame_ptr_ = true;
}
return interpreted_frame_pointer_.value();
}
@@ -151,21 +165,33 @@ void InterpreterAssembler::GotoIfHasContextExtensionUpToDepth(Node* context,
}
Node* InterpreterAssembler::BytecodeOffset() {
+ if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
+ (bytecode_offset_.value() ==
+ Parameter(InterpreterDispatchDescriptor::kBytecodeOffset))) {
+ bytecode_offset_.Bind(LoadAndUntagRegister(Register::bytecode_offset()));
+ }
return bytecode_offset_.value();
}
Node* InterpreterAssembler::BytecodeArrayTaggedPointer() {
- if (made_call_) {
- // If we have made a call, restore bytecode array from stack frame in case
- // the debugger has swapped us to the patched debugger bytecode array.
- return LoadRegister(Register::bytecode_array());
- } else {
- return Parameter(InterpreterDispatchDescriptor::kBytecodeArray);
+ // Force a re-load of the bytecode array after every call in case the debugger
+ // has been activated.
+ if (made_call_ &&
+ (bytecode_array_.value() ==
+ Parameter(InterpreterDispatchDescriptor::kBytecodeArray))) {
+ bytecode_array_.Bind(LoadRegister(Register::bytecode_array()));
}
+ return bytecode_array_.value();
}
Node* InterpreterAssembler::DispatchTableRawPointer() {
- return Parameter(InterpreterDispatchDescriptor::kDispatchTable);
+ if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
+ (dispatch_table_.value() ==
+ Parameter(InterpreterDispatchDescriptor::kDispatchTable))) {
+ dispatch_table_.Bind(ExternalConstant(
+ ExternalReference::interpreter_dispatch_table_address(isolate())));
+ }
+ return dispatch_table_.value();
}
Node* InterpreterAssembler::RegisterLocation(Node* reg_index) {
@@ -187,6 +213,11 @@ Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
RegisterFrameOffset(reg_index));
}
+Node* InterpreterAssembler::LoadAndUntagRegister(Register reg) {
+ return LoadAndUntagSmi(GetInterpretedFramePointer(), reg.ToOperand()
+ << kPointerSizeLog2);
+}
+
Node* InterpreterAssembler::StoreRegister(Node* value, Register reg) {
return StoreNoWriteBarrier(
MachineRepresentation::kTagged, GetInterpretedFramePointer(),
@@ -199,6 +230,12 @@ Node* InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
RegisterFrameOffset(reg_index), value);
}
+Node* InterpreterAssembler::StoreAndTagRegister(compiler::Node* value,
+ Register reg) {
+ int offset = reg.ToOperand() << kPointerSizeLog2;
+ return StoreAndTagSmi(GetInterpretedFramePointer(), offset, value);
+}
+
Node* InterpreterAssembler::NextRegister(Node* reg_index) {
// Register indexes are negative, so the next index is minus one.
return IntPtrAdd(reg_index, IntPtrConstant(-1));
@@ -395,6 +432,10 @@ Node* InterpreterAssembler::BytecodeOperandUImm(int operand_index) {
return BytecodeUnsignedOperand(operand_index, operand_size);
}
+Node* InterpreterAssembler::BytecodeOperandUImmWord(int operand_index) {
+ return ChangeUint32ToWord(BytecodeOperandUImm(operand_index));
+}
+
Node* InterpreterAssembler::BytecodeOperandImm(int operand_index) {
DCHECK_EQ(OperandType::kImm,
Bytecodes::GetOperandType(bytecode_, operand_index));
@@ -463,14 +504,25 @@ Node* InterpreterAssembler::LoadAndUntagConstantPoolEntry(Node* index) {
Node* InterpreterAssembler::LoadFeedbackVector() {
Node* function = LoadRegister(Register::function_closure());
- Node* literals = LoadObjectField(function, JSFunction::kLiteralsOffset);
- Node* vector =
- LoadObjectField(literals, LiteralsArray::kFeedbackVectorOffset);
+ Node* cell = LoadObjectField(function, JSFunction::kFeedbackVectorOffset);
+ Node* vector = LoadObjectField(cell, Cell::kValueOffset);
return vector;
}
+void InterpreterAssembler::SaveBytecodeOffset() {
+ DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
+ StoreAndTagRegister(BytecodeOffset(), Register::bytecode_offset());
+ saved_bytecode_offset_ = true;
+}
+
void InterpreterAssembler::CallPrologue() {
- StoreRegister(SmiTag(BytecodeOffset()), Register::bytecode_offset());
+ if (!saved_bytecode_offset_) {
+ // If there are multiple calls in the bytecode handler, you need to spill
+ // before each of them, unless SaveBytecodeOffset has explicitly been called
+ // in a path that dominates _all_ of those calls. Therefore don't set
+ // saved_bytecode_offset_ to true or call SaveBytecodeOffset.
+ StoreAndTagRegister(BytecodeOffset(), Register::bytecode_offset());
+ }
if (FLAG_debug_code && !disable_stack_check_across_call_) {
DCHECK(stack_pointer_before_call_ == nullptr);
@@ -515,6 +567,8 @@ Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
// computed, meaning that it can't appear to be a pointer. If the low bit is
// 0, then hash is computed, but the 0 bit prevents the field from appearing
// to be a pointer.
+ DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
+ DCHECK(Bytecodes::IsCallOrConstruct(bytecode_));
STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
WeakCell::kValueOffset &&
@@ -528,7 +582,7 @@ Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
Node* feedback_element = LoadFixedArrayElement(feedback_vector, slot_id);
Node* feedback_value = LoadWeakCellValueUnchecked(feedback_element);
Node* is_monomorphic = WordEqual(function, feedback_value);
- GotoUnless(is_monomorphic, &extra_checks);
+ GotoIfNot(is_monomorphic, &extra_checks);
// The compare above could have been a SMI/SMI comparison. Guard against
// this convincing us that we have a monomorphic JSFunction.
@@ -542,7 +596,7 @@ Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
// Call using call function builtin.
Callable callable = CodeFactory::InterpreterPushArgsAndCall(
- isolate(), tail_call_mode, CallableType::kJSFunction);
+ isolate(), tail_call_mode, InterpreterPushArgsMode::kJSFunction);
Node* code_target = HeapConstant(callable.code());
Node* ret_value = CallStub(callable.descriptor(), code_target, context,
arg_count, first_arg, function);
@@ -563,14 +617,14 @@ Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
GotoIf(is_megamorphic, &call);
Comment("check if it is an allocation site");
- GotoUnless(IsAllocationSiteMap(LoadMap(feedback_element)),
- &check_initialized);
+ GotoIfNot(IsAllocationSiteMap(LoadMap(feedback_element)),
+ &check_initialized);
// If it is not the Array() function, mark megamorphic.
Node* context_slot = LoadContextElement(LoadNativeContext(context),
Context::ARRAY_FUNCTION_INDEX);
Node* is_array_function = WordEqual(context_slot, function);
- GotoUnless(is_array_function, &mark_megamorphic);
+ GotoIfNot(is_array_function, &mark_megamorphic);
// It is a monomorphic Array function. Increment the call count.
IncrementCallCount(feedback_vector, slot_id);
@@ -592,7 +646,7 @@ Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
Node* is_uninitialized = WordEqual(
feedback_element,
HeapConstant(FeedbackVector::UninitializedSentinel(isolate())));
- GotoUnless(is_uninitialized, &mark_megamorphic);
+ GotoIfNot(is_uninitialized, &mark_megamorphic);
Comment("handle_unitinitialized");
// If it is not a JSFunction mark it as megamorphic.
@@ -603,7 +657,7 @@ Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
Node* instance_type = LoadInstanceType(function);
Node* is_js_function =
Word32Equal(instance_type, Int32Constant(JS_FUNCTION_TYPE));
- GotoUnless(is_js_function, &mark_megamorphic);
+ GotoIfNot(is_js_function, &mark_megamorphic);
// Check if it is the Array() function.
Node* context_slot = LoadContextElement(LoadNativeContext(context),
@@ -616,7 +670,7 @@ Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
LoadObjectField(function, JSFunction::kContextOffset));
Node* is_same_native_context =
WordEqual(native_context, LoadNativeContext(context));
- GotoUnless(is_same_native_context, &mark_megamorphic);
+ GotoIfNot(is_same_native_context, &mark_megamorphic);
CreateWeakCellInFeedbackVector(feedback_vector, SmiTag(slot_id),
function);
@@ -657,7 +711,7 @@ Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
// Call using call builtin.
Callable callable_call = CodeFactory::InterpreterPushArgsAndCall(
- isolate(), tail_call_mode, CallableType::kAny);
+ isolate(), tail_call_mode, InterpreterPushArgsMode::kOther);
Node* code_target_call = HeapConstant(callable_call.code());
Node* ret_value = CallStub(callable_call.descriptor(), code_target_call,
context, arg_count, first_arg, function);
@@ -672,18 +726,33 @@ Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
Node* InterpreterAssembler::CallJS(Node* function, Node* context,
Node* first_arg, Node* arg_count,
TailCallMode tail_call_mode) {
+ DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
+ DCHECK(Bytecodes::IsCallOrConstruct(bytecode_));
+ Callable callable = CodeFactory::InterpreterPushArgsAndCall(
+ isolate(), tail_call_mode, InterpreterPushArgsMode::kOther);
+ Node* code_target = HeapConstant(callable.code());
+
+ return CallStub(callable.descriptor(), code_target, context, arg_count,
+ first_arg, function);
+}
+
+Node* InterpreterAssembler::CallJSWithSpread(Node* function, Node* context,
+ Node* first_arg, Node* arg_count) {
+ DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
Callable callable = CodeFactory::InterpreterPushArgsAndCall(
- isolate(), tail_call_mode, CallableType::kAny);
+ isolate(), TailCallMode::kDisallow,
+ InterpreterPushArgsMode::kWithFinalSpread);
Node* code_target = HeapConstant(callable.code());
return CallStub(callable.descriptor(), code_target, context, arg_count,
first_arg, function);
}
-Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context,
- Node* new_target, Node* first_arg,
- Node* arg_count, Node* slot_id,
- Node* feedback_vector) {
+Node* InterpreterAssembler::Construct(Node* constructor, Node* context,
+ Node* new_target, Node* first_arg,
+ Node* arg_count, Node* slot_id,
+ Node* feedback_vector) {
+ DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
Variable return_value(this, MachineRepresentation::kTagged);
Variable allocation_feedback(this, MachineRepresentation::kTagged);
Label call_construct_function(this, &allocation_feedback),
@@ -702,7 +771,7 @@ Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context,
Node* instance_type = LoadInstanceType(constructor);
Node* is_js_function =
Word32Equal(instance_type, Int32Constant(JS_FUNCTION_TYPE));
- GotoUnless(is_js_function, &call_construct);
+ GotoIfNot(is_js_function, &call_construct);
// Check if it is a monomorphic constructor.
Node* feedback_element = LoadFixedArrayElement(feedback_vector, slot_id);
@@ -713,10 +782,10 @@ Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context,
Bind(&call_construct_function);
{
- Comment("call using callConstructFunction");
+ Comment("call using ConstructFunction");
IncrementCallCount(feedback_vector, slot_id);
Callable callable_function = CodeFactory::InterpreterPushArgsAndConstruct(
- isolate(), CallableType::kJSFunction);
+ isolate(), InterpreterPushArgsMode::kJSFunction);
return_value.Bind(CallStub(callable_function.descriptor(),
HeapConstant(callable_function.code()), context,
arg_count, new_target, constructor,
@@ -739,7 +808,7 @@ Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context,
Comment("check if weak cell");
Node* is_weak_cell = WordEqual(LoadMap(feedback_element),
LoadRoot(Heap::kWeakCellMapRootIndex));
- GotoUnless(is_weak_cell, &check_allocation_site);
+ GotoIfNot(is_weak_cell, &check_allocation_site);
// If the weak cell is cleared, we have a new chance to become
// monomorphic.
@@ -753,13 +822,13 @@ Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context,
Node* is_allocation_site =
WordEqual(LoadObjectField(feedback_element, 0),
LoadRoot(Heap::kAllocationSiteMapRootIndex));
- GotoUnless(is_allocation_site, &check_initialized);
+ GotoIfNot(is_allocation_site, &check_initialized);
// Make sure the function is the Array() function.
Node* context_slot = LoadContextElement(LoadNativeContext(context),
Context::ARRAY_FUNCTION_INDEX);
Node* is_array_function = WordEqual(context_slot, constructor);
- GotoUnless(is_array_function, &mark_megamorphic);
+ GotoIfNot(is_array_function, &mark_megamorphic);
allocation_feedback.Bind(feedback_element);
Goto(&call_construct_function);
@@ -817,9 +886,9 @@ Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context,
Bind(&call_construct);
{
- Comment("call using callConstruct builtin");
+ Comment("call using Construct builtin");
Callable callable = CodeFactory::InterpreterPushArgsAndConstruct(
- isolate(), CallableType::kAny);
+ isolate(), InterpreterPushArgsMode::kOther);
Node* code_target = HeapConstant(callable.code());
return_value.Bind(CallStub(callable.descriptor(), code_target, context,
arg_count, new_target, constructor,
@@ -831,9 +900,28 @@ Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context,
return return_value.value();
}
+Node* InterpreterAssembler::ConstructWithSpread(Node* constructor,
+ Node* context, Node* new_target,
+ Node* first_arg,
+ Node* arg_count) {
+ DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
+ Variable return_value(this, MachineRepresentation::kTagged);
+ Comment("call using ConstructWithSpread");
+ Callable callable = CodeFactory::InterpreterPushArgsAndConstruct(
+ isolate(), InterpreterPushArgsMode::kWithFinalSpread);
+ Node* code_target = HeapConstant(callable.code());
+ return_value.Bind(CallStub(callable.descriptor(), code_target, context,
+ arg_count, new_target, constructor,
+ UndefinedConstant(), first_arg));
+
+ return return_value.value();
+}
+
Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
Node* first_arg, Node* arg_count,
int result_size) {
+ DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
+ DCHECK(Bytecodes::IsCallRuntime(bytecode_));
Callable callable = CodeFactory::InterpreterCEntry(isolate(), result_size);
Node* code_target = HeapConstant(callable.code());
@@ -852,10 +940,7 @@ Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
arg_count, first_arg, function_entry);
}
-void InterpreterAssembler::UpdateInterruptBudget(Node* weight) {
- // TODO(rmcilroy): It might be worthwhile to only update the budget for
- // backwards branches. Those are distinguishable by the {JumpLoop} bytecode.
-
+void InterpreterAssembler::UpdateInterruptBudget(Node* weight, bool backward) {
Label ok(this), interrupt_check(this, Label::kDeferred), end(this);
Node* budget_offset =
IntPtrConstant(BytecodeArray::kInterruptBudgetOffset - kHeapObjectTag);
@@ -864,7 +949,11 @@ void InterpreterAssembler::UpdateInterruptBudget(Node* weight) {
Variable new_budget(this, MachineRepresentation::kWord32);
Node* old_budget =
Load(MachineType::Int32(), BytecodeArrayTaggedPointer(), budget_offset);
- new_budget.Bind(Int32Add(old_budget, weight));
+ if (backward) {
+ new_budget.Bind(Int32Sub(old_budget, weight));
+ } else {
+ new_budget.Bind(Int32Add(old_budget, weight));
+ }
Node* condition =
Int32GreaterThanOrEqual(new_budget.value(), Int32Constant(0));
Branch(condition, &ok, &interrupt_check);
@@ -892,24 +981,31 @@ Node* InterpreterAssembler::Advance(int delta) {
return Advance(IntPtrConstant(delta));
}
-Node* InterpreterAssembler::Advance(Node* delta) {
+Node* InterpreterAssembler::Advance(Node* delta, bool backward) {
if (FLAG_trace_ignition) {
TraceBytecode(Runtime::kInterpreterTraceBytecodeExit);
}
- Node* next_offset = IntPtrAdd(BytecodeOffset(), delta);
+ Node* next_offset = backward ? IntPtrSub(BytecodeOffset(), delta)
+ : IntPtrAdd(BytecodeOffset(), delta);
bytecode_offset_.Bind(next_offset);
return next_offset;
}
-Node* InterpreterAssembler::Jump(Node* delta) {
+Node* InterpreterAssembler::Jump(Node* delta, bool backward) {
DCHECK(!Bytecodes::IsStarLookahead(bytecode_, operand_scale_));
- UpdateInterruptBudget(TruncateWordToWord32(delta));
- Node* new_bytecode_offset = Advance(delta);
+ UpdateInterruptBudget(TruncateWordToWord32(delta), backward);
+ Node* new_bytecode_offset = Advance(delta, backward);
Node* target_bytecode = LoadBytecode(new_bytecode_offset);
return DispatchToBytecode(target_bytecode, new_bytecode_offset);
}
+Node* InterpreterAssembler::Jump(Node* delta) { return Jump(delta, false); }
+
+Node* InterpreterAssembler::JumpBackward(Node* delta) {
+ return Jump(delta, true);
+}
+
void InterpreterAssembler::JumpConditional(Node* condition, Node* delta) {
Label match(this), no_match(this);
@@ -976,6 +1072,7 @@ void InterpreterAssembler::InlineStar() {
Node* InterpreterAssembler::Dispatch() {
Comment("========= Dispatch");
+ DCHECK_IMPLIES(Bytecodes::MakesCallAlongCriticalPath(bytecode_), made_call_);
Node* target_offset = Advance();
Node* target_bytecode = LoadBytecode(target_offset);
@@ -1023,6 +1120,7 @@ void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
// Indices 0-255 correspond to bytecodes with operand_scale == 0
// Indices 256-511 correspond to bytecodes with operand_scale == 1
// Indices 512-767 correspond to bytecodes with operand_scale == 2
+ DCHECK_IMPLIES(Bytecodes::MakesCallAlongCriticalPath(bytecode_), made_call_);
Node* next_bytecode_offset = Advance(1);
Node* next_bytecode = LoadBytecode(next_bytecode_offset);
@@ -1145,7 +1243,7 @@ void InterpreterAssembler::UpdateInterruptBudgetOnReturn() {
Node* profiling_weight =
Int32Sub(Int32Constant(kHeapObjectTag + BytecodeArray::kHeaderSize),
TruncateWordToWord32(BytecodeOffset()));
- UpdateInterruptBudget(profiling_weight);
+ UpdateInterruptBudget(profiling_weight, false);
}
Node* InterpreterAssembler::StackCheckTriggeredInterrupt() {
@@ -1181,6 +1279,26 @@ void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
Bind(&ok);
}
+void InterpreterAssembler::MaybeDropFrames(Node* context) {
+ Node* restart_fp_address =
+ ExternalConstant(ExternalReference::debug_restart_fp_address(isolate()));
+
+ Node* restart_fp = Load(MachineType::Pointer(), restart_fp_address);
+ Node* null = IntPtrConstant(0);
+
+ Label ok(this), drop_frames(this);
+ Branch(IntPtrEqual(restart_fp, null), &ok, &drop_frames);
+
+ Bind(&drop_frames);
+ // We don't expect this call to return since the frame dropper tears down
+ // the stack and jumps into the function on the target frame to restart it.
+ CallStub(CodeFactory::FrameDropperTrampoline(isolate()), context, restart_fp);
+ Abort(kUnexpectedReturnFromFrameDropper);
+ Goto(&ok);
+
+ Bind(&ok);
+}
+
void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) {
CallRuntime(function_id, GetContext(), BytecodeArrayTaggedPointer(),
SmiTag(BytecodeOffset()), GetAccumulatorUnchecked());
@@ -1255,7 +1373,7 @@ Node* InterpreterAssembler::ExportRegisterFile(Node* array) {
Bind(&loop);
{
Node* index = var_index.value();
- GotoUnless(UintPtrLessThan(index, register_count), &done_loop);
+ GotoIfNot(UintPtrLessThan(index, register_count), &done_loop);
Node* reg_index = IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
Node* value = LoadRegister(reg_index);
@@ -1288,7 +1406,7 @@ Node* InterpreterAssembler::ImportRegisterFile(Node* array) {
Bind(&loop);
{
Node* index = var_index.value();
- GotoUnless(UintPtrLessThan(index, register_count), &done_loop);
+ GotoIfNot(UintPtrLessThan(index, register_count), &done_loop);
Node* value = LoadFixedArrayElement(array, index);
diff --git a/deps/v8/src/interpreter/interpreter-assembler.h b/deps/v8/src/interpreter/interpreter-assembler.h
index 092f4858de..1317f377f4 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.h
+++ b/deps/v8/src/interpreter/interpreter-assembler.h
@@ -39,6 +39,9 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Returns the 32-bit unsigned immediate for bytecode operand |operand_index|
// in the current bytecode.
compiler::Node* BytecodeOperandUImm(int operand_index);
+ // Returns the word-size unsigned immediate for bytecode operand
+ // |operand_index| in the current bytecode.
+ compiler::Node* BytecodeOperandUImmWord(int operand_index);
// Returns the 32-bit signed immediate for bytecode operand |operand_index|
// in the current bytecode.
compiler::Node* BytecodeOperandImm(int operand_index);
@@ -85,9 +88,11 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Loads from and stores to the interpreter register file.
compiler::Node* LoadRegister(Register reg);
compiler::Node* LoadRegister(compiler::Node* reg_index);
+ compiler::Node* LoadAndUntagRegister(Register reg);
compiler::Node* StoreRegister(compiler::Node* value, Register reg);
compiler::Node* StoreRegister(compiler::Node* value,
compiler::Node* reg_index);
+ compiler::Node* StoreAndTagRegister(compiler::Node* value, Register reg);
// Returns the next consecutive register.
compiler::Node* NextRegister(compiler::Node* reg_index);
@@ -129,18 +134,34 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
compiler::Node* first_arg, compiler::Node* arg_count,
TailCallMode tail_call_mode);
+ // Call JSFunction or Callable |function| with |arg_count|
+ // arguments (not including receiver) and the first argument
+ // located at |first_arg|.
+ compiler::Node* CallJSWithSpread(compiler::Node* function,
+ compiler::Node* context,
+ compiler::Node* first_arg,
+ compiler::Node* arg_count);
+
// Call constructor |constructor| with |arg_count| arguments (not
// including receiver) and the first argument located at
// |first_arg|. The |new_target| is the same as the
// |constructor| for the new keyword, but differs for the super
// keyword.
- compiler::Node* CallConstruct(compiler::Node* constructor,
- compiler::Node* context,
- compiler::Node* new_target,
- compiler::Node* first_arg,
- compiler::Node* arg_count,
- compiler::Node* slot_id,
- compiler::Node* feedback_vector);
+ compiler::Node* Construct(compiler::Node* constructor,
+ compiler::Node* context, compiler::Node* new_target,
+ compiler::Node* first_arg,
+ compiler::Node* arg_count, compiler::Node* slot_id,
+ compiler::Node* feedback_vector);
+
+ // Call constructor |constructor| with |arg_count| arguments (not including
+ // receiver) and the first argument located at |first_arg|. The last argument
+ // is always a spread. The |new_target| is the same as the |constructor| for
+ // the new keyword, but differs for the super keyword.
+ compiler::Node* ConstructWithSpread(compiler::Node* constructor,
+ compiler::Node* context,
+ compiler::Node* new_target,
+ compiler::Node* first_arg,
+ compiler::Node* arg_count);
// Call runtime function with |arg_count| arguments and the first argument
// located at |first_arg|.
@@ -149,15 +170,18 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
compiler::Node* first_arg,
compiler::Node* arg_count, int return_size = 1);
- // Jump relative to the current bytecode by |jump_offset|.
+ // Jump forward relative to the current bytecode by the |jump_offset|.
compiler::Node* Jump(compiler::Node* jump_offset);
- // Jump relative to the current bytecode by |jump_offset| if the
+ // Jump backward relative to the current bytecode by the |jump_offset|.
+ compiler::Node* JumpBackward(compiler::Node* jump_offset);
+
+ // Jump forward relative to the current bytecode by |jump_offset| if the
// word values |lhs| and |rhs| are equal.
void JumpIfWordEqual(compiler::Node* lhs, compiler::Node* rhs,
compiler::Node* jump_offset);
- // Jump relative to the current bytecode by |jump_offset| if the
+ // Jump forward relative to the current bytecode by |jump_offset| if the
// word values |lhs| and |rhs| are not equal.
void JumpIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
compiler::Node* jump_offset);
@@ -193,9 +217,15 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
void AbortIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
BailoutReason bailout_reason);
+ // Dispatch to frame dropper trampoline if necessary.
+ void MaybeDropFrames(compiler::Node* context);
+
// Returns the offset from the BytecodeArrayPointer of the current bytecode.
compiler::Node* BytecodeOffset();
+ // Save the bytecode offset to the interpreter frame.
+ void SaveBytecodeOffset();
+
protected:
Bytecode bytecode() const { return bytecode_; }
static bool TargetSupportsUnalignedAccess();
@@ -227,9 +257,10 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Traces the current bytecode by calling |function_id|.
void TraceBytecode(Runtime::FunctionId function_id);
- // Updates the bytecode array's interrupt budget by a 32-bit signed |weight|
- // and calls Runtime::kInterrupt if counter reaches zero.
- void UpdateInterruptBudget(compiler::Node* weight);
+ // Updates the bytecode array's interrupt budget by a 32-bit unsigned |weight|
+ // and calls Runtime::kInterrupt if counter reaches zero. If |backward|, then
+ // the interrupt budget is decremented, otherwise it is incremented.
+ void UpdateInterruptBudget(compiler::Node* weight, bool backward);
// Returns the offset of register |index| relative to RegisterFilePointer().
compiler::Node* RegisterFrameOffset(compiler::Node* index);
@@ -260,7 +291,12 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
compiler::Node* BytecodeUnsignedOperand(int operand_index,
OperandSize operand_size);
- // Jump relative to the current bytecode by |jump_offset| if the
+ // Jump relative to the current bytecode by the |jump_offset|. If |backward|,
+ // then jump backward (subtract the offset), otherwise jump forward (add the
+ // offset). Helper function for Jump and JumpBackward.
+ compiler::Node* Jump(compiler::Node* jump_offset, bool backward);
+
+ // Jump forward relative to the current bytecode by |jump_offset| if the
// |condition| is true. Helper function for JumpIfWordEqual and
// JumpIfWordNotEqual.
void JumpConditional(compiler::Node* condition, compiler::Node* jump_offset);
@@ -272,7 +308,7 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Updates and returns BytecodeOffset() advanced by delta bytecodes.
// Traces the exit of the current bytecode.
compiler::Node* Advance(int delta);
- compiler::Node* Advance(compiler::Node* delta);
+ compiler::Node* Advance(compiler::Node* delta, bool backward = false);
// Load the bytecode at |bytecode_offset|.
compiler::Node* LoadBytecode(compiler::Node* bytecode_offset);
@@ -304,9 +340,13 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
OperandScale operand_scale_;
CodeStubAssembler::Variable bytecode_offset_;
CodeStubAssembler::Variable interpreted_frame_pointer_;
+ CodeStubAssembler::Variable bytecode_array_;
+ CodeStubAssembler::Variable dispatch_table_;
CodeStubAssembler::Variable accumulator_;
AccumulatorUse accumulator_use_;
bool made_call_;
+ bool reloaded_frame_ptr_;
+ bool saved_bytecode_offset_;
bool disable_stack_check_across_call_;
compiler::Node* stack_pointer_before_call_;
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics.cc b/deps/v8/src/interpreter/interpreter-intrinsics.cc
index a2820fb128..78de42b634 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics.cc
+++ b/deps/v8/src/interpreter/interpreter-intrinsics.cc
@@ -5,6 +5,7 @@
#include "src/interpreter/interpreter-intrinsics.h"
#include "src/code-factory.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -242,18 +243,6 @@ Node* IntrinsicsHelper::HasProperty(Node* input, Node* arg_count,
CodeFactory::HasProperty(isolate()));
}
-Node* IntrinsicsHelper::NumberToString(Node* input, Node* arg_count,
- Node* context) {
- return IntrinsicAsStubCall(input, context,
- CodeFactory::NumberToString(isolate()));
-}
-
-Node* IntrinsicsHelper::RegExpExec(Node* input, Node* arg_count,
- Node* context) {
- return IntrinsicAsStubCall(input, context,
- CodeFactory::RegExpExec(isolate()));
-}
-
Node* IntrinsicsHelper::SubString(Node* input, Node* arg_count, Node* context) {
return IntrinsicAsStubCall(input, context, CodeFactory::SubString(isolate()));
}
@@ -293,7 +282,7 @@ Node* IntrinsicsHelper::Call(Node* args_reg, Node* arg_count, Node* context) {
if (FLAG_debug_code) {
InterpreterAssembler::Label arg_count_positive(assembler_);
Node* comparison = __ Int32LessThan(target_args_count, __ Int32Constant(0));
- __ GotoUnless(comparison, &arg_count_positive);
+ __ GotoIfNot(comparison, &arg_count_positive);
__ Abort(kWrongArgumentCountForInvokeIntrinsic);
__ Goto(&arg_count_positive);
__ Bind(&arg_count_positive);
@@ -304,84 +293,43 @@ Node* IntrinsicsHelper::Call(Node* args_reg, Node* arg_count, Node* context) {
return result;
}
-Node* IntrinsicsHelper::ValueOf(Node* args_reg, Node* arg_count,
+Node* IntrinsicsHelper::ClassOf(Node* args_reg, Node* arg_count,
Node* context) {
+ Node* value = __ LoadRegister(args_reg);
+ return __ ClassOf(value);
+}
+
+Node* IntrinsicsHelper::CreateAsyncFromSyncIterator(Node* args_reg,
+ Node* arg_count,
+ Node* context) {
+ InterpreterAssembler::Label not_receiver(
+ assembler_, InterpreterAssembler::Label::kDeferred);
+ InterpreterAssembler::Label done(assembler_);
InterpreterAssembler::Variable return_value(assembler_,
MachineRepresentation::kTagged);
- InterpreterAssembler::Label done(assembler_);
- Node* object = __ LoadRegister(args_reg);
- return_value.Bind(object);
+ Node* sync_iterator = __ LoadRegister(args_reg);
- // If the object is a smi return the object.
- __ GotoIf(__ TaggedIsSmi(object), &done);
+ __ GotoIf(__ TaggedIsSmi(sync_iterator), &not_receiver);
+ __ GotoIfNot(__ IsJSReceiver(sync_iterator), &not_receiver);
- // If the object is not a value type, return the object.
- Node* condition =
- CompareInstanceType(object, JS_VALUE_TYPE, kInstanceTypeEqual);
- __ GotoUnless(condition, &done);
+ Node* const native_context = __ LoadNativeContext(context);
+ Node* const map = __ LoadContextElement(
+ native_context, Context::ASYNC_FROM_SYNC_ITERATOR_MAP_INDEX);
+ Node* const iterator = __ AllocateJSObjectFromMap(map);
- // If the object is a value type, return the value field.
- return_value.Bind(__ LoadObjectField(object, JSValue::kValueOffset));
- __ Goto(&done);
+ __ StoreObjectFieldNoWriteBarrier(
+ iterator, JSAsyncFromSyncIterator::kSyncIteratorOffset, sync_iterator);
- __ Bind(&done);
- return return_value.value();
-}
-
-Node* IntrinsicsHelper::ClassOf(Node* args_reg, Node* arg_count,
- Node* context) {
- InterpreterAssembler::Variable return_value(assembler_,
- MachineRepresentation::kTagged);
- InterpreterAssembler::Label done(assembler_), null(assembler_),
- function(assembler_), non_function_constructor(assembler_);
-
- Node* object = __ LoadRegister(args_reg);
-
- // If the object is not a JSReceiver, we return null.
- __ GotoIf(__ TaggedIsSmi(object), &null);
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- Node* is_js_receiver = CompareInstanceType(object, FIRST_JS_RECEIVER_TYPE,
- kInstanceTypeGreaterThanOrEqual);
- __ GotoUnless(is_js_receiver, &null);
-
- // Return 'Function' for JSFunction and JSBoundFunction objects.
- Node* is_function = CompareInstanceType(object, FIRST_FUNCTION_TYPE,
- kInstanceTypeGreaterThanOrEqual);
- STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
- __ GotoIf(is_function, &function);
-
- // Check if the constructor in the map is a JS function.
- Node* constructor = __ LoadMapConstructor(__ LoadMap(object));
- Node* constructor_is_js_function =
- CompareInstanceType(constructor, JS_FUNCTION_TYPE, kInstanceTypeEqual);
- __ GotoUnless(constructor_is_js_function, &non_function_constructor);
-
- // Grab the instance class name from the constructor function.
- Node* shared =
- __ LoadObjectField(constructor, JSFunction::kSharedFunctionInfoOffset);
- return_value.Bind(
- __ LoadObjectField(shared, SharedFunctionInfo::kInstanceClassNameOffset));
+ return_value.Bind(iterator);
__ Goto(&done);
- // Non-JS objects have class null.
- __ Bind(&null);
+ __ Bind(&not_receiver);
{
- return_value.Bind(__ LoadRoot(Heap::kNullValueRootIndex));
- __ Goto(&done);
- }
-
- // Functions have class 'Function'.
- __ Bind(&function);
- {
- return_value.Bind(__ LoadRoot(Heap::kFunction_stringRootIndex));
- __ Goto(&done);
- }
+ return_value.Bind(
+ __ CallRuntime(Runtime::kThrowSymbolIteratorInvalid, context));
- // Objects with a non-function constructor have class 'Object'.
- __ Bind(&non_function_constructor);
- {
- return_value.Bind(__ LoadRoot(Heap::kObject_stringRootIndex));
+ // Unreachable due to the Throw in runtime call.
__ Goto(&done);
}
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics.h b/deps/v8/src/interpreter/interpreter-intrinsics.h
index 825e2b9a98..502a2f7b38 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics.h
+++ b/deps/v8/src/interpreter/interpreter-intrinsics.h
@@ -23,25 +23,23 @@ namespace interpreter {
// List of supported intrisics, with upper case name, lower case name and
// expected number of arguments (-1 denoting argument count is variable).
-#define INTRINSICS_LIST(V) \
- V(Call, call, -1) \
- V(ClassOf, class_of, 1) \
- V(CreateIterResultObject, create_iter_result_object, 2) \
- V(HasProperty, has_property, 2) \
- V(IsArray, is_array, 1) \
- V(IsJSProxy, is_js_proxy, 1) \
- V(IsJSReceiver, is_js_receiver, 1) \
- V(IsSmi, is_smi, 1) \
- V(IsTypedArray, is_typed_array, 1) \
- V(NumberToString, number_to_string, 1) \
- V(RegExpExec, reg_exp_exec, 4) \
- V(SubString, sub_string, 3) \
- V(ToString, to_string, 1) \
- V(ToLength, to_length, 1) \
- V(ToInteger, to_integer, 1) \
- V(ToNumber, to_number, 1) \
- V(ToObject, to_object, 1) \
- V(ValueOf, value_of, 1)
+#define INTRINSICS_LIST(V) \
+ V(Call, call, -1) \
+ V(ClassOf, class_of, 1) \
+ V(CreateIterResultObject, create_iter_result_object, 2) \
+ V(CreateAsyncFromSyncIterator, create_async_from_sync_iterator, 1) \
+ V(HasProperty, has_property, 2) \
+ V(IsArray, is_array, 1) \
+ V(IsJSProxy, is_js_proxy, 1) \
+ V(IsJSReceiver, is_js_receiver, 1) \
+ V(IsSmi, is_smi, 1) \
+ V(IsTypedArray, is_typed_array, 1) \
+ V(SubString, sub_string, 3) \
+ V(ToString, to_string, 1) \
+ V(ToLength, to_length, 1) \
+ V(ToInteger, to_integer, 1) \
+ V(ToNumber, to_number, 1) \
+ V(ToObject, to_object, 1)
class IntrinsicsHelper {
public:
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index cacf6818c8..5db69e4f67 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -8,17 +8,23 @@
#include <memory>
#include "src/ast/prettyprinter.h"
+#include "src/builtins/builtins-arguments.h"
#include "src/builtins/builtins-constructor.h"
+#include "src/builtins/builtins-object.h"
#include "src/code-factory.h"
#include "src/compilation-info.h"
#include "src/compiler.h"
+#include "src/counters.h"
+#include "src/debug/debug.h"
#include "src/factory.h"
+#include "src/ic/accessor-assembler.h"
#include "src/interpreter/bytecode-flags.h"
#include "src/interpreter/bytecode-generator.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter-assembler.h"
#include "src/interpreter/interpreter-intrinsics.h"
#include "src/log.h"
+#include "src/objects-inl.h"
#include "src/zone/zone.h"
namespace v8 {
@@ -76,6 +82,7 @@ class InterpreterCompilationJob final : public CompilationJob {
BytecodeGenerator generator_;
RuntimeCallStats* runtime_call_stats_;
RuntimeCallCounter background_execute_counter_;
+ bool print_bytecode_;
DISALLOW_COPY_AND_ASSIGN(InterpreterCompilationJob);
};
@@ -135,6 +142,9 @@ void Interpreter::InstallBytecodeHandler(Zone* zone, Bytecode bytecode,
isolate_, zone, descriptor, Code::ComputeFlags(Code::BYTECODE_HANDLER),
Bytecodes::ToString(bytecode), Bytecodes::ReturnCount(bytecode));
InterpreterAssembler assembler(&state, bytecode, operand_scale);
+ if (Bytecodes::MakesCallAlongCriticalPath(bytecode)) {
+ assembler.SaveBytecodeOffset();
+ }
(this->*generator)(&assembler);
Handle<Code> code = compiler::CodeAssembler::GenerateCode(&state);
size_t index = GetDispatchTableIndex(bytecode, operand_scale);
@@ -191,15 +201,33 @@ int Interpreter::InterruptBudget() {
return FLAG_interrupt_budget * kCodeSizeMultiplier;
}
+namespace {
+
+bool ShouldPrintBytecode(Handle<SharedFunctionInfo> shared) {
+ if (!FLAG_print_bytecode) return false;
+
+ // Checks whether function passed the filter.
+ if (shared->is_toplevel()) {
+ Vector<const char> filter = CStrVector(FLAG_print_bytecode_filter);
+ return (filter.length() == 0) || (filter.length() == 1 && filter[0] == '*');
+ } else {
+ return shared->PassesFilter(FLAG_print_bytecode_filter);
+ }
+}
+
+} // namespace
+
InterpreterCompilationJob::InterpreterCompilationJob(CompilationInfo* info)
: CompilationJob(info->isolate(), info, "Ignition"),
generator_(info),
runtime_call_stats_(info->isolate()->counters()->runtime_call_stats()),
- background_execute_counter_("CompileBackgroundIgnition") {}
+ background_execute_counter_("CompileBackgroundIgnition"),
+ print_bytecode_(ShouldPrintBytecode(info->shared_info())) {}
InterpreterCompilationJob::Status InterpreterCompilationJob::PrepareJobImpl() {
CodeGenerator::MakeCodePrologue(info(), "interpreter");
- if (FLAG_print_bytecode) {
+
+ if (print_bytecode_) {
OFStream os(stdout);
std::unique_ptr<char[]> name = info()->GetDebugName();
os << "[generating bytecode for function: " << info()->GetDebugName().get()
@@ -241,7 +269,7 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl() {
return FAILED;
}
- if (FLAG_print_bytecode) {
+ if (print_bytecode_) {
OFStream os(stdout);
bytecodes->Print(os);
os << std::flush;
@@ -457,16 +485,71 @@ void Interpreter::DoMov(InterpreterAssembler* assembler) {
__ Dispatch();
}
-Node* Interpreter::BuildLoadGlobal(Callable ic, Node* context, Node* name_index,
- Node* feedback_slot,
- InterpreterAssembler* assembler) {
+void Interpreter::BuildLoadGlobal(int slot_operand_index,
+ int name_operand_index,
+ TypeofMode typeof_mode,
+ InterpreterAssembler* assembler) {
// Load the global via the LoadGlobalIC.
- Node* code_target = __ HeapConstant(ic.code());
- Node* name = __ LoadConstantPoolEntry(name_index);
- Node* smi_slot = __ SmiTag(feedback_slot);
Node* feedback_vector = __ LoadFeedbackVector();
- return __ CallStub(ic.descriptor(), code_target, context, name, smi_slot,
- feedback_vector);
+ Node* feedback_slot = __ BytecodeOperandIdx(slot_operand_index);
+
+ AccessorAssembler accessor_asm(assembler->state());
+
+ Label try_handler(assembler, Label::kDeferred),
+ miss(assembler, Label::kDeferred);
+
+ // Fast path without frame construction for the data case.
+ {
+ Label done(assembler);
+ Variable var_result(assembler, MachineRepresentation::kTagged);
+ ExitPoint exit_point(assembler, &done, &var_result);
+
+ accessor_asm.LoadGlobalIC_TryPropertyCellCase(
+ feedback_vector, feedback_slot, &exit_point, &try_handler, &miss,
+ CodeStubAssembler::INTPTR_PARAMETERS);
+
+ __ Bind(&done);
+ __ SetAccumulator(var_result.value());
+ __ Dispatch();
+ }
+
+ // Slow path with frame construction.
+ {
+ Label done(assembler);
+ Variable var_result(assembler, MachineRepresentation::kTagged);
+ ExitPoint exit_point(assembler, &done, &var_result);
+
+ __ Bind(&try_handler);
+ {
+ Node* context = __ GetContext();
+ Node* smi_slot = __ SmiTag(feedback_slot);
+ Node* name_index = __ BytecodeOperandIdx(name_operand_index);
+ Node* name = __ LoadConstantPoolEntry(name_index);
+
+ AccessorAssembler::LoadICParameters params(context, nullptr, name,
+ smi_slot, feedback_vector);
+ accessor_asm.LoadGlobalIC_TryHandlerCase(&params, typeof_mode,
+ &exit_point, &miss);
+ }
+
+ __ Bind(&miss);
+ {
+ Node* context = __ GetContext();
+ Node* smi_slot = __ SmiTag(feedback_slot);
+ Node* name_index = __ BytecodeOperandIdx(name_operand_index);
+ Node* name = __ LoadConstantPoolEntry(name_index);
+
+ AccessorAssembler::LoadICParameters params(context, nullptr, name,
+ smi_slot, feedback_vector);
+ accessor_asm.LoadGlobalIC_MissCase(&params, &exit_point);
+ }
+
+ __ Bind(&done);
+ {
+ __ SetAccumulator(var_result.value());
+ __ Dispatch();
+ }
+ }
}
// LdaGlobal <name_index> <slot>
@@ -474,16 +557,11 @@ Node* Interpreter::BuildLoadGlobal(Callable ic, Node* context, Node* name_index,
// Load the global with name in constant pool entry <name_index> into the
// accumulator using FeedBackVector slot <slot> outside of a typeof.
void Interpreter::DoLdaGlobal(InterpreterAssembler* assembler) {
- Callable ic =
- CodeFactory::LoadGlobalICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF);
-
- Node* context = __ GetContext();
+ static const int kNameOperandIndex = 0;
+ static const int kSlotOperandIndex = 1;
- Node* name_index = __ BytecodeOperandIdx(0);
- Node* raw_slot = __ BytecodeOperandIdx(1);
- Node* result = BuildLoadGlobal(ic, context, name_index, raw_slot, assembler);
- __ SetAccumulator(result);
- __ Dispatch();
+ BuildLoadGlobal(kSlotOperandIndex, kNameOperandIndex, NOT_INSIDE_TYPEOF,
+ assembler);
}
// LdaGlobalInsideTypeof <name_index> <slot>
@@ -491,16 +569,11 @@ void Interpreter::DoLdaGlobal(InterpreterAssembler* assembler) {
// Load the global with name in constant pool entry <name_index> into the
// accumulator using FeedBackVector slot <slot> inside of a typeof.
void Interpreter::DoLdaGlobalInsideTypeof(InterpreterAssembler* assembler) {
- Callable ic =
- CodeFactory::LoadGlobalICInOptimizedCode(isolate_, INSIDE_TYPEOF);
-
- Node* context = __ GetContext();
+ static const int kNameOperandIndex = 0;
+ static const int kSlotOperandIndex = 1;
- Node* name_index = __ BytecodeOperandIdx(0);
- Node* raw_slot = __ BytecodeOperandIdx(1);
- Node* result = BuildLoadGlobal(ic, context, name_index, raw_slot, assembler);
- __ SetAccumulator(result);
- __ Dispatch();
+ BuildLoadGlobal(kSlotOperandIndex, kNameOperandIndex, INSIDE_TYPEOF,
+ assembler);
}
void Interpreter::DoStaGlobal(Callable ic, InterpreterAssembler* assembler) {
@@ -556,6 +629,15 @@ void Interpreter::DoLdaContextSlot(InterpreterAssembler* assembler) {
__ Dispatch();
}
+// LdaImmutableContextSlot <context> <slot_index> <depth>
+//
+// Load the object in |slot_index| of the context at |depth| in the context
+// chain starting at |context| into the accumulator.
+void Interpreter::DoLdaImmutableContextSlot(InterpreterAssembler* assembler) {
+ // TODO(danno) Share the actual code object rather creating a duplicate one.
+ DoLdaContextSlot(assembler);
+}
+
// LdaCurrentContextSlot <slot_index>
//
// Load the object in |slot_index| of the current context into the accumulator.
@@ -567,6 +649,15 @@ void Interpreter::DoLdaCurrentContextSlot(InterpreterAssembler* assembler) {
__ Dispatch();
}
+// LdaImmutableCurrentContextSlot <slot_index>
+//
+// Load the object in |slot_index| of the current context into the accumulator.
+void Interpreter::DoLdaImmutableCurrentContextSlot(
+ InterpreterAssembler* assembler) {
+ // TODO(danno) Share the actual code object rather creating a duplicate one.
+ DoLdaCurrentContextSlot(assembler);
+}
+
// StaContextSlot <context> <slot_index> <depth>
//
// Stores the object in the accumulator into |slot_index| of the context at
@@ -670,8 +761,6 @@ void Interpreter::DoLdaLookupContextSlotInsideTypeof(
void Interpreter::DoLdaLookupGlobalSlot(Runtime::FunctionId function_id,
InterpreterAssembler* assembler) {
Node* context = __ GetContext();
- Node* name_index = __ BytecodeOperandIdx(0);
- Node* feedback_slot = __ BytecodeOperandIdx(1);
Node* depth = __ BytecodeOperandUImm(2);
Label slowpath(assembler, Label::kDeferred);
@@ -681,19 +770,21 @@ void Interpreter::DoLdaLookupGlobalSlot(Runtime::FunctionId function_id,
// Fast path does a normal load global
{
- Callable ic = CodeFactory::LoadGlobalICInOptimizedCode(
- isolate_, function_id == Runtime::kLoadLookupSlotInsideTypeof
- ? INSIDE_TYPEOF
- : NOT_INSIDE_TYPEOF);
- Node* result =
- BuildLoadGlobal(ic, context, name_index, feedback_slot, assembler);
- __ SetAccumulator(result);
- __ Dispatch();
+ static const int kNameOperandIndex = 0;
+ static const int kSlotOperandIndex = 1;
+
+ TypeofMode typeof_mode = function_id == Runtime::kLoadLookupSlotInsideTypeof
+ ? INSIDE_TYPEOF
+ : NOT_INSIDE_TYPEOF;
+
+ BuildLoadGlobal(kSlotOperandIndex, kNameOperandIndex, typeof_mode,
+ assembler);
}
// Slow path when we have to call out to the runtime
__ Bind(&slowpath);
{
+ Node* name_index = __ BytecodeOperandIdx(0);
Node* name = __ LoadConstantPoolEntry(name_index);
Node* result = __ CallRuntime(function_id, context, name);
__ SetAccumulator(result);
@@ -825,6 +916,16 @@ void Interpreter::DoStaNamedPropertyStrict(InterpreterAssembler* assembler) {
DoStoreIC(ic, assembler);
}
+// StaNamedOwnProperty <object> <name_index> <slot>
+//
+// Calls the StoreOwnIC at FeedBackVector slot <slot> for <object> and
+// the name in constant pool entry <name_index> with the value in the
+// accumulator.
+void Interpreter::DoStaNamedOwnProperty(InterpreterAssembler* assembler) {
+ Callable ic = CodeFactory::StoreOwnICInOptimizedCode(isolate_);
+ DoStoreIC(ic, assembler);
+}
+
void Interpreter::DoKeyedStoreIC(Callable ic, InterpreterAssembler* assembler) {
Node* code_target = __ HeapConstant(ic.code());
Node* object_reg_index = __ BytecodeOperandReg(0);
@@ -1047,7 +1148,7 @@ void Interpreter::DoCompareOpWithFeedback(Token::Value compare_op,
lhs_is_not_string(assembler), gather_rhs_type(assembler),
update_feedback(assembler);
- __ GotoUnless(__ TaggedIsSmi(lhs), &lhs_is_not_smi);
+ __ GotoIfNot(__ TaggedIsSmi(lhs), &lhs_is_not_smi);
var_type_feedback.Bind(
__ SmiConstant(CompareOperationFeedback::kSignedSmall));
@@ -1056,7 +1157,7 @@ void Interpreter::DoCompareOpWithFeedback(Token::Value compare_op,
__ Bind(&lhs_is_not_smi);
{
Node* lhs_map = __ LoadMap(lhs);
- __ GotoUnless(__ IsHeapNumberMap(lhs_map), &lhs_is_not_number);
+ __ GotoIfNot(__ IsHeapNumberMap(lhs_map), &lhs_is_not_number);
var_type_feedback.Bind(__ SmiConstant(CompareOperationFeedback::kNumber));
__ Goto(&gather_rhs_type);
@@ -1066,7 +1167,7 @@ void Interpreter::DoCompareOpWithFeedback(Token::Value compare_op,
Node* lhs_instance_type = __ LoadInstanceType(lhs);
if (Token::IsOrderedRelationalCompareOp(compare_op)) {
Label lhs_is_not_oddball(assembler);
- __ GotoUnless(
+ __ GotoIfNot(
__ Word32Equal(lhs_instance_type, __ Int32Constant(ODDBALL_TYPE)),
&lhs_is_not_oddball);
@@ -1078,8 +1179,8 @@ void Interpreter::DoCompareOpWithFeedback(Token::Value compare_op,
}
Label lhs_is_not_string(assembler);
- __ GotoUnless(__ IsStringInstanceType(lhs_instance_type),
- &lhs_is_not_string);
+ __ GotoIfNot(__ IsStringInstanceType(lhs_instance_type),
+ &lhs_is_not_string);
if (Token::IsOrderedRelationalCompareOp(compare_op)) {
var_type_feedback.Bind(
@@ -1096,7 +1197,15 @@ void Interpreter::DoCompareOpWithFeedback(Token::Value compare_op,
__ Goto(&gather_rhs_type);
__ Bind(&lhs_is_not_string);
- var_type_feedback.Bind(__ SmiConstant(CompareOperationFeedback::kAny));
+ if (Token::IsEqualityOp(compare_op)) {
+ var_type_feedback.Bind(__ SelectSmiConstant(
+ __ IsJSReceiverInstanceType(lhs_instance_type),
+ CompareOperationFeedback::kReceiver,
+ CompareOperationFeedback::kAny));
+ } else {
+ var_type_feedback.Bind(
+ __ SmiConstant(CompareOperationFeedback::kAny));
+ }
__ Goto(&gather_rhs_type);
}
}
@@ -1105,7 +1214,7 @@ void Interpreter::DoCompareOpWithFeedback(Token::Value compare_op,
{
Label rhs_is_not_smi(assembler), rhs_is_not_number(assembler);
- __ GotoUnless(__ TaggedIsSmi(rhs), &rhs_is_not_smi);
+ __ GotoIfNot(__ TaggedIsSmi(rhs), &rhs_is_not_smi);
var_type_feedback.Bind(
__ SmiOr(var_type_feedback.value(),
@@ -1115,7 +1224,7 @@ void Interpreter::DoCompareOpWithFeedback(Token::Value compare_op,
__ Bind(&rhs_is_not_smi);
{
Node* rhs_map = __ LoadMap(rhs);
- __ GotoUnless(__ IsHeapNumberMap(rhs_map), &rhs_is_not_number);
+ __ GotoIfNot(__ IsHeapNumberMap(rhs_map), &rhs_is_not_number);
var_type_feedback.Bind(
__ SmiOr(var_type_feedback.value(),
@@ -1127,9 +1236,9 @@ void Interpreter::DoCompareOpWithFeedback(Token::Value compare_op,
Node* rhs_instance_type = __ LoadInstanceType(rhs);
if (Token::IsOrderedRelationalCompareOp(compare_op)) {
Label rhs_is_not_oddball(assembler);
- __ GotoUnless(__ Word32Equal(rhs_instance_type,
- __ Int32Constant(ODDBALL_TYPE)),
- &rhs_is_not_oddball);
+ __ GotoIfNot(__ Word32Equal(rhs_instance_type,
+ __ Int32Constant(ODDBALL_TYPE)),
+ &rhs_is_not_oddball);
var_type_feedback.Bind(__ SmiOr(
var_type_feedback.value(),
@@ -1140,8 +1249,8 @@ void Interpreter::DoCompareOpWithFeedback(Token::Value compare_op,
}
Label rhs_is_not_string(assembler);
- __ GotoUnless(__ IsStringInstanceType(rhs_instance_type),
- &rhs_is_not_string);
+ __ GotoIfNot(__ IsStringInstanceType(rhs_instance_type),
+ &rhs_is_not_string);
if (Token::IsOrderedRelationalCompareOp(compare_op)) {
var_type_feedback.Bind(
@@ -1161,8 +1270,17 @@ void Interpreter::DoCompareOpWithFeedback(Token::Value compare_op,
__ Goto(&update_feedback);
__ Bind(&rhs_is_not_string);
- var_type_feedback.Bind(
- __ SmiConstant(CompareOperationFeedback::kAny));
+ if (Token::IsEqualityOp(compare_op)) {
+ var_type_feedback.Bind(
+ __ SmiOr(var_type_feedback.value(),
+ __ SelectSmiConstant(
+ __ IsJSReceiverInstanceType(rhs_instance_type),
+ CompareOperationFeedback::kReceiver,
+ CompareOperationFeedback::kAny)));
+ } else {
+ var_type_feedback.Bind(
+ __ SmiConstant(CompareOperationFeedback::kAny));
+ }
__ Goto(&update_feedback);
}
}
@@ -2158,33 +2276,56 @@ void Interpreter::DoCallJSRuntime(InterpreterAssembler* assembler) {
__ Dispatch();
}
-// NewWithSpread <first_arg> <arg_count>
+// CallWithSpread <callable> <first_arg> <arg_count>
//
-// Call the constructor in |first_arg| with the new.target in |first_arg + 1|
-// for the |arg_count - 2| following arguments. The final argument is always a
-// spread.
+// Call a JSfunction or Callable in |callable| with the receiver in
+// |first_arg| and |arg_count - 1| arguments in subsequent registers. The
+// final argument is always a spread.
//
-void Interpreter::DoNewWithSpread(InterpreterAssembler* assembler) {
- Node* first_arg_reg = __ BytecodeOperandReg(0);
- Node* first_arg = __ RegisterLocation(first_arg_reg);
- Node* args_count = __ BytecodeOperandCount(1);
+void Interpreter::DoCallWithSpread(InterpreterAssembler* assembler) {
+ Node* callable_reg = __ BytecodeOperandReg(0);
+ Node* callable = __ LoadRegister(callable_reg);
+ Node* receiver_reg = __ BytecodeOperandReg(1);
+ Node* receiver_arg = __ RegisterLocation(receiver_reg);
+ Node* receiver_args_count = __ BytecodeOperandCount(2);
+ Node* receiver_count = __ Int32Constant(1);
+ Node* args_count = __ Int32Sub(receiver_args_count, receiver_count);
Node* context = __ GetContext();
- // Call into Runtime function NewWithSpread which does everything.
- Node* runtime_function = __ Int32Constant(Runtime::kNewWithSpread);
+ // Call into Runtime function CallWithSpread which does everything.
Node* result =
- __ CallRuntimeN(runtime_function, context, first_arg, args_count);
+ __ CallJSWithSpread(callable, context, receiver_arg, args_count);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+// ConstructWithSpread <first_arg> <arg_count>
+//
+// Call the constructor in |constructor| with the first argument in register
+// |first_arg| and |arg_count| arguments in subsequent registers. The final
+// argument is always a spread. The new.target is in the accumulator.
+//
+void Interpreter::DoConstructWithSpread(InterpreterAssembler* assembler) {
+ Node* new_target = __ GetAccumulator();
+ Node* constructor_reg = __ BytecodeOperandReg(0);
+ Node* constructor = __ LoadRegister(constructor_reg);
+ Node* first_arg_reg = __ BytecodeOperandReg(1);
+ Node* first_arg = __ RegisterLocation(first_arg_reg);
+ Node* args_count = __ BytecodeOperandCount(2);
+ Node* context = __ GetContext();
+ Node* result = __ ConstructWithSpread(constructor, context, new_target,
+ first_arg, args_count);
__ SetAccumulator(result);
__ Dispatch();
}
-// New <constructor> <first_arg> <arg_count>
+// Construct <constructor> <first_arg> <arg_count>
//
-// Call operator new with |constructor| and the first argument in
+// Call operator construct with |constructor| and the first argument in
// register |first_arg| and |arg_count| arguments in subsequent
// registers. The new.target is in the accumulator.
//
-void Interpreter::DoNew(InterpreterAssembler* assembler) {
+void Interpreter::DoConstruct(InterpreterAssembler* assembler) {
Node* new_target = __ GetAccumulator();
Node* constructor_reg = __ BytecodeOperandReg(0);
Node* constructor = __ LoadRegister(constructor_reg);
@@ -2194,8 +2335,8 @@ void Interpreter::DoNew(InterpreterAssembler* assembler) {
Node* slot_id = __ BytecodeOperandIdx(3);
Node* feedback_vector = __ LoadFeedbackVector();
Node* context = __ GetContext();
- Node* result = __ CallConstruct(constructor, context, new_target, first_arg,
- args_count, slot_id, feedback_vector);
+ Node* result = __ Construct(constructor, context, new_target, first_arg,
+ args_count, slot_id, feedback_vector);
__ SetAccumulator(result);
__ Dispatch();
}
@@ -2350,7 +2491,7 @@ void Interpreter::DoTestUndefined(InterpreterAssembler* assembler) {
//
// Jump by number of bytes represented by the immediate operand |imm|.
void Interpreter::DoJump(InterpreterAssembler* assembler) {
- Node* relative_jump = __ BytecodeOperandImmIntPtr(0);
+ Node* relative_jump = __ BytecodeOperandUImmWord(0);
__ Jump(relative_jump);
}
@@ -2366,46 +2507,58 @@ void Interpreter::DoJumpConstant(InterpreterAssembler* assembler) {
// JumpIfTrue <imm>
//
// Jump by number of bytes represented by an immediate operand if the
-// accumulator contains true.
+// accumulator contains true. This only works for boolean inputs, and
+// will misbehave if passed arbitrary input values.
void Interpreter::DoJumpIfTrue(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
- Node* relative_jump = __ BytecodeOperandImmIntPtr(0);
+ Node* relative_jump = __ BytecodeOperandUImmWord(0);
Node* true_value = __ BooleanConstant(true);
+ CSA_ASSERT(assembler, assembler->TaggedIsNotSmi(accumulator));
+ CSA_ASSERT(assembler, assembler->IsBoolean(accumulator));
__ JumpIfWordEqual(accumulator, true_value, relative_jump);
}
// JumpIfTrueConstant <idx>
//
// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
-// if the accumulator contains true.
+// if the accumulator contains true. This only works for boolean inputs, and
+// will misbehave if passed arbitrary input values.
void Interpreter::DoJumpIfTrueConstant(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
Node* index = __ BytecodeOperandIdx(0);
Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
Node* true_value = __ BooleanConstant(true);
+ CSA_ASSERT(assembler, assembler->TaggedIsNotSmi(accumulator));
+ CSA_ASSERT(assembler, assembler->IsBoolean(accumulator));
__ JumpIfWordEqual(accumulator, true_value, relative_jump);
}
// JumpIfFalse <imm>
//
// Jump by number of bytes represented by an immediate operand if the
-// accumulator contains false.
+// accumulator contains false. This only works for boolean inputs, and
+// will misbehave if passed arbitrary input values.
void Interpreter::DoJumpIfFalse(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
- Node* relative_jump = __ BytecodeOperandImmIntPtr(0);
+ Node* relative_jump = __ BytecodeOperandUImmWord(0);
Node* false_value = __ BooleanConstant(false);
+ CSA_ASSERT(assembler, assembler->TaggedIsNotSmi(accumulator));
+ CSA_ASSERT(assembler, assembler->IsBoolean(accumulator));
__ JumpIfWordEqual(accumulator, false_value, relative_jump);
}
// JumpIfFalseConstant <idx>
//
// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
-// if the accumulator contains false.
+// if the accumulator contains false. This only works for boolean inputs, and
+// will misbehave if passed arbitrary input values.
void Interpreter::DoJumpIfFalseConstant(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
Node* index = __ BytecodeOperandIdx(0);
Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
Node* false_value = __ BooleanConstant(false);
+ CSA_ASSERT(assembler, assembler->TaggedIsNotSmi(accumulator));
+ CSA_ASSERT(assembler, assembler->IsBoolean(accumulator));
__ JumpIfWordEqual(accumulator, false_value, relative_jump);
}
@@ -2415,7 +2568,7 @@ void Interpreter::DoJumpIfFalseConstant(InterpreterAssembler* assembler) {
// referenced by the accumulator is true when the object is cast to boolean.
void Interpreter::DoJumpIfToBooleanTrue(InterpreterAssembler* assembler) {
Node* value = __ GetAccumulator();
- Node* relative_jump = __ BytecodeOperandImmIntPtr(0);
+ Node* relative_jump = __ BytecodeOperandUImmWord(0);
Label if_true(assembler), if_false(assembler);
__ BranchIfToBooleanIsTrue(value, &if_true, &if_false);
__ Bind(&if_true);
@@ -2448,7 +2601,7 @@ void Interpreter::DoJumpIfToBooleanTrueConstant(
// referenced by the accumulator is false when the object is cast to boolean.
void Interpreter::DoJumpIfToBooleanFalse(InterpreterAssembler* assembler) {
Node* value = __ GetAccumulator();
- Node* relative_jump = __ BytecodeOperandImmIntPtr(0);
+ Node* relative_jump = __ BytecodeOperandUImmWord(0);
Label if_true(assembler), if_false(assembler);
__ BranchIfToBooleanIsTrue(value, &if_true, &if_false);
__ Bind(&if_true);
@@ -2482,7 +2635,7 @@ void Interpreter::DoJumpIfToBooleanFalseConstant(
void Interpreter::DoJumpIfNull(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
- Node* relative_jump = __ BytecodeOperandImmIntPtr(0);
+ Node* relative_jump = __ BytecodeOperandUImmWord(0);
__ JumpIfWordEqual(accumulator, null_value, relative_jump);
}
@@ -2506,7 +2659,7 @@ void Interpreter::DoJumpIfUndefined(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
Node* undefined_value =
__ HeapConstant(isolate_->factory()->undefined_value());
- Node* relative_jump = __ BytecodeOperandImmIntPtr(0);
+ Node* relative_jump = __ BytecodeOperandUImmWord(0);
__ JumpIfWordEqual(accumulator, undefined_value, relative_jump);
}
@@ -2529,7 +2682,7 @@ void Interpreter::DoJumpIfUndefinedConstant(InterpreterAssembler* assembler) {
// referenced by the accumulator is a JSReceiver.
void Interpreter::DoJumpIfJSReceiver(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
- Node* relative_jump = __ BytecodeOperandImmIntPtr(0);
+ Node* relative_jump = __ BytecodeOperandUImmWord(0);
Label if_object(assembler), if_notobject(assembler, Label::kDeferred),
if_notsmi(assembler);
@@ -2573,7 +2726,7 @@ void Interpreter::DoJumpIfJSReceiverConstant(InterpreterAssembler* assembler) {
void Interpreter::DoJumpIfNotHole(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value());
- Node* relative_jump = __ BytecodeOperandImmIntPtr(0);
+ Node* relative_jump = __ BytecodeOperandUImmWord(0);
__ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump);
}
@@ -2595,7 +2748,7 @@ void Interpreter::DoJumpIfNotHoleConstant(InterpreterAssembler* assembler) {
// performs a loop nesting check and potentially triggers OSR in case the
// current OSR level matches (or exceeds) the specified |loop_depth|.
void Interpreter::DoJumpLoop(InterpreterAssembler* assembler) {
- Node* relative_jump = __ BytecodeOperandImmIntPtr(0);
+ Node* relative_jump = __ BytecodeOperandUImmWord(0);
Node* loop_depth = __ BytecodeOperandImm(1);
Node* osr_level = __ LoadOSRNestingLevel();
@@ -2606,7 +2759,7 @@ void Interpreter::DoJumpLoop(InterpreterAssembler* assembler) {
__ Branch(condition, &ok, &osr_armed);
__ Bind(&ok);
- __ Jump(relative_jump);
+ __ JumpBackward(relative_jump);
__ Bind(&osr_armed);
{
@@ -2614,7 +2767,7 @@ void Interpreter::DoJumpLoop(InterpreterAssembler* assembler) {
Node* target = __ HeapConstant(callable.code());
Node* context = __ GetContext();
__ CallStub(callable.descriptor(), target, context);
- __ Jump(relative_jump);
+ __ JumpBackward(relative_jump);
}
}
@@ -2654,7 +2807,6 @@ void Interpreter::DoCreateArrayLiteral(InterpreterAssembler* assembler) {
__ Bind(&fast_shallow_clone);
{
- DCHECK(FLAG_allocation_site_pretenuring);
ConstructorBuiltinsAssembler constructor_assembler(assembler->state());
Node* result = constructor_assembler.EmitFastCloneShallowArray(
closure, literal_index, context, &call_runtime, TRACK_ALLOCATION_SITE);
@@ -2738,8 +2890,8 @@ void Interpreter::DoCreateClosure(InterpreterAssembler* assembler) {
Node* context = __ GetContext();
Label call_runtime(assembler, Label::kDeferred);
- __ GotoUnless(__ IsSetWord32<CreateClosureFlags::FastNewClosureBit>(flags),
- &call_runtime);
+ __ GotoIfNot(__ IsSetWord32<CreateClosureFlags::FastNewClosureBit>(flags),
+ &call_runtime);
ConstructorBuiltinsAssembler constructor_assembler(assembler->state());
Node* vector_index = __ BytecodeOperandIdx(1);
vector_index = __ SmiTag(vector_index);
@@ -2865,10 +3017,9 @@ void Interpreter::DoCreateMappedArguments(InterpreterAssembler* assembler) {
__ Bind(&if_not_duplicate_parameters);
{
- // TODO(rmcilroy): Inline FastNewSloppyArguments when it is a TurboFan stub.
- Callable callable = CodeFactory::FastNewSloppyArguments(isolate_, true);
- Node* target = __ HeapConstant(callable.code());
- Node* result = __ CallStub(callable.descriptor(), target, context, closure);
+ ArgumentsBuiltinsAssembler constructor_assembler(assembler->state());
+ Node* result =
+ constructor_assembler.EmitFastNewSloppyArguments(context, closure);
__ SetAccumulator(result);
__ Dispatch();
}
@@ -2886,12 +3037,11 @@ void Interpreter::DoCreateMappedArguments(InterpreterAssembler* assembler) {
//
// Creates a new unmapped arguments object.
void Interpreter::DoCreateUnmappedArguments(InterpreterAssembler* assembler) {
- // TODO(rmcilroy): Inline FastNewStrictArguments when it is a TurboFan stub.
- Callable callable = CodeFactory::FastNewStrictArguments(isolate_, true);
- Node* target = __ HeapConstant(callable.code());
Node* context = __ GetContext();
Node* closure = __ LoadRegister(Register::function_closure());
- Node* result = __ CallStub(callable.descriptor(), target, context, closure);
+ ArgumentsBuiltinsAssembler builtins_assembler(assembler->state());
+ Node* result =
+ builtins_assembler.EmitFastNewStrictArguments(context, closure);
__ SetAccumulator(result);
__ Dispatch();
}
@@ -2900,12 +3050,10 @@ void Interpreter::DoCreateUnmappedArguments(InterpreterAssembler* assembler) {
//
// Creates a new rest parameter array.
void Interpreter::DoCreateRestParameter(InterpreterAssembler* assembler) {
- // TODO(rmcilroy): Inline FastNewRestArguments when it is a TurboFan stub.
- Callable callable = CodeFactory::FastNewRestParameter(isolate_, true);
- Node* target = __ HeapConstant(callable.code());
Node* closure = __ LoadRegister(Register::function_closure());
Node* context = __ GetContext();
- Node* result = __ CallStub(callable.descriptor(), target, context, closure);
+ ArgumentsBuiltinsAssembler builtins_assembler(assembler->state());
+ Node* result = builtins_assembler.EmitFastNewRestParameter(context, closure);
__ SetAccumulator(result);
__ Dispatch();
}
@@ -2982,7 +3130,7 @@ void Interpreter::DoReturn(InterpreterAssembler* assembler) {
// Call runtime to handle debugger statement.
void Interpreter::DoDebugger(InterpreterAssembler* assembler) {
Node* context = __ GetContext();
- __ CallRuntime(Runtime::kHandleDebuggerStatement, context);
+ __ CallStub(CodeFactory::HandleDebuggerStatement(isolate_), context);
__ Dispatch();
}
@@ -2995,6 +3143,7 @@ void Interpreter::DoDebugger(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator(); \
Node* original_handler = \
__ CallRuntime(Runtime::kDebugBreakOnBytecode, context, accumulator); \
+ __ MaybeDropFrames(context); \
__ DispatchToBytecodeHandler(original_handler); \
}
DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK);
@@ -3020,68 +3169,42 @@ void Interpreter::BuildForInPrepareResult(Node* output_register,
// |cache_info_triple + 2|, with the registers holding cache_type, cache_array,
// and cache_length respectively.
void Interpreter::DoForInPrepare(InterpreterAssembler* assembler) {
- Node* object_reg = __ BytecodeOperandReg(0);
- Node* receiver = __ LoadRegister(object_reg);
+ Node* object_register = __ BytecodeOperandReg(0);
+ Node* output_register = __ BytecodeOperandReg(1);
+ Node* receiver = __ LoadRegister(object_register);
Node* context = __ GetContext();
- Node* const zero_smi = __ SmiConstant(Smi::kZero);
- Label nothing_to_iterate(assembler, Label::kDeferred),
- use_enum_cache(assembler), use_runtime(assembler, Label::kDeferred);
+ Node* cache_type;
+ Node* cache_array;
+ Node* cache_length;
+ Label call_runtime(assembler, Label::kDeferred),
+ nothing_to_iterate(assembler, Label::kDeferred);
- if (FLAG_debug_code) {
- Label already_receiver(assembler), abort(assembler);
- Node* instance_type = __ LoadInstanceType(receiver);
- __ Branch(__ IsJSReceiverInstanceType(instance_type), &already_receiver,
- &abort);
- __ Bind(&abort);
- {
- __ Abort(kExpectedJSReceiver);
- // TODO(klaasb) remove this unreachable Goto once Abort ends the block
- __ Goto(&already_receiver);
- }
- __ Bind(&already_receiver);
- }
-
- __ CheckEnumCache(receiver, &use_enum_cache, &use_runtime);
+ ObjectBuiltinsAssembler object_assembler(assembler->state());
+ std::tie(cache_type, cache_array, cache_length) =
+ object_assembler.EmitForInPrepare(receiver, context, &call_runtime,
+ &nothing_to_iterate);
- __ Bind(&use_enum_cache);
- {
- // The enum cache is valid. Load the map of the object being
- // iterated over and use the cache for the iteration.
- Node* cache_type = __ LoadMap(receiver);
- Node* cache_length = __ EnumLength(cache_type);
- __ GotoIf(assembler->WordEqual(cache_length, zero_smi),
- &nothing_to_iterate);
- Node* descriptors = __ LoadMapDescriptors(cache_type);
- Node* cache_offset =
- __ LoadObjectField(descriptors, DescriptorArray::kEnumCacheOffset);
- Node* cache_array = __ LoadObjectField(
- cache_offset, DescriptorArray::kEnumCacheBridgeCacheOffset);
- Node* output_register = __ BytecodeOperandReg(1);
- BuildForInPrepareResult(output_register, cache_type, cache_array,
- cache_length, assembler);
- __ Dispatch();
- }
+ BuildForInPrepareResult(output_register, cache_type, cache_array,
+ cache_length, assembler);
+ __ Dispatch();
- __ Bind(&use_runtime);
+ __ Bind(&call_runtime);
{
Node* result_triple =
__ CallRuntime(Runtime::kForInPrepare, context, receiver);
Node* cache_type = __ Projection(0, result_triple);
Node* cache_array = __ Projection(1, result_triple);
Node* cache_length = __ Projection(2, result_triple);
- Node* output_register = __ BytecodeOperandReg(1);
BuildForInPrepareResult(output_register, cache_type, cache_array,
cache_length, assembler);
__ Dispatch();
}
-
__ Bind(&nothing_to_iterate);
{
// Receiver is null or undefined or descriptors are zero length.
- Node* output_register = __ BytecodeOperandReg(1);
- BuildForInPrepareResult(output_register, zero_smi, zero_smi, zero_smi,
- assembler);
+ Node* zero = __ SmiConstant(0);
+ BuildForInPrepareResult(output_register, zero, zero, zero, assembler);
__ Dispatch();
}
}
@@ -3211,8 +3334,7 @@ void Interpreter::DoSuspendGenerator(InterpreterAssembler* assembler) {
ExternalReference::debug_last_step_action_address(isolate_));
Node* step_action = __ Load(MachineType::Int8(), step_action_address);
STATIC_ASSERT(StepIn > StepNext);
- STATIC_ASSERT(StepFrame > StepNext);
- STATIC_ASSERT(LastStepAction == StepFrame);
+ STATIC_ASSERT(LastStepAction == StepIn);
Node* step_next = __ Int32Constant(StepNext);
__ Branch(__ Int32LessThanOrEqual(step_next, step_action), &if_stepping, &ok);
__ Bind(&ok);
diff --git a/deps/v8/src/interpreter/interpreter.h b/deps/v8/src/interpreter/interpreter.h
index 04f7e85b39..ac36815f14 100644
--- a/deps/v8/src/interpreter/interpreter.h
+++ b/deps/v8/src/interpreter/interpreter.h
@@ -148,10 +148,8 @@ class Interpreter {
InterpreterAssembler* assembler);
// Generates code to load a global.
- compiler::Node* BuildLoadGlobal(Callable ic, compiler::Node* context,
- compiler::Node* name_index,
- compiler::Node* feedback_slot,
- InterpreterAssembler* assembler);
+ void BuildLoadGlobal(int slot_operand_index, int name_operand_index,
+ TypeofMode typeof_mode, InterpreterAssembler* assembler);
// Generates code to prepare the result for ForInPrepare. Cache data
// are placed into the consecutive series of registers starting at
diff --git a/deps/v8/src/isolate-inl.h b/deps/v8/src/isolate-inl.h
index fc88676823..02993cfa6b 100644
--- a/deps/v8/src/isolate-inl.h
+++ b/deps/v8/src/isolate-inl.h
@@ -133,28 +133,23 @@ bool Isolate::IsArraySpeciesLookupChainIntact() {
Smi::cast(species_cell->value())->value() == kProtectorValid;
}
-bool Isolate::IsHasInstanceLookupChainIntact() {
- PropertyCell* has_instance_cell = heap()->has_instance_protector();
- return has_instance_cell->value() == Smi::FromInt(kProtectorValid);
-}
-
bool Isolate::IsStringLengthOverflowIntact() {
- PropertyCell* has_instance_cell = heap()->string_length_protector();
- return has_instance_cell->value() == Smi::FromInt(kProtectorValid);
+ PropertyCell* string_length_cell = heap()->string_length_protector();
+ return string_length_cell->value() == Smi::FromInt(kProtectorValid);
}
bool Isolate::IsFastArrayIterationIntact() {
- Cell* fast_iteration = heap()->fast_array_iteration_protector();
- return fast_iteration->value() == Smi::FromInt(kProtectorValid);
+ Cell* fast_iteration_cell = heap()->fast_array_iteration_protector();
+ return fast_iteration_cell->value() == Smi::FromInt(kProtectorValid);
}
bool Isolate::IsArrayBufferNeuteringIntact() {
- PropertyCell* fast_iteration = heap()->array_buffer_neutering_protector();
- return fast_iteration->value() == Smi::FromInt(kProtectorValid);
+ PropertyCell* buffer_neutering = heap()->array_buffer_neutering_protector();
+ return buffer_neutering->value() == Smi::FromInt(kProtectorValid);
}
bool Isolate::IsArrayIteratorLookupChainIntact() {
- Cell* array_iterator_cell = heap()->array_iterator_protector();
+ PropertyCell* array_iterator_cell = heap()->array_iterator_protector();
return array_iterator_cell->value() == Smi::FromInt(kProtectorValid);
}
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index c0018fe40e..bac61301c6 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -9,6 +9,7 @@
#include <fstream> // NOLINT(readability/streams)
#include <sstream>
+#include "src/assembler-inl.h"
#include "src/ast/ast-value-factory.h"
#include "src/ast/context-slot-cache.h"
#include "src/base/hashmap.h"
@@ -1460,6 +1461,9 @@ void Isolate::CancelScheduledExceptionFromTryCatch(v8::TryCatch* handler) {
DCHECK(scheduled_exception() != heap()->termination_exception());
clear_scheduled_exception();
}
+ if (thread_local_top_.pending_message_obj_ == handler->message_obj_) {
+ clear_pending_message();
+ }
}
@@ -1875,6 +1879,11 @@ bool InternalPromiseHasUserDefinedRejectHandler(Isolate* isolate,
Handle<JSReceiver>::cast(deferred_promise));
}
+ if (queue->IsSymbol()) {
+ return InternalPromiseHasUserDefinedRejectHandler(
+ isolate, Handle<JSPromise>::cast(deferred_promise));
+ }
+
Handle<FixedArray> queue_arr = Handle<FixedArray>::cast(queue);
Handle<FixedArray> deferred_promise_arr =
Handle<FixedArray>::cast(deferred_promise);
@@ -2022,6 +2031,47 @@ Isolate::ThreadDataTable::~ThreadDataTable() {
// DCHECK_NULL(list_);
}
+void Isolate::ReleaseManagedObjects() {
+ Isolate::ManagedObjectFinalizer* current =
+ managed_object_finalizers_list_.next_;
+ while (current != nullptr) {
+ Isolate::ManagedObjectFinalizer* next = current->next_;
+ current->Dispose();
+ delete current;
+ current = next;
+ }
+}
+
+Isolate::ManagedObjectFinalizer* Isolate::RegisterForReleaseAtTeardown(
+ void* value, Isolate::ManagedObjectFinalizer::Deleter deleter) {
+ DCHECK_NOT_NULL(value);
+ DCHECK_NOT_NULL(deleter);
+
+ Isolate::ManagedObjectFinalizer* ret = new Isolate::ManagedObjectFinalizer();
+ ret->value_ = value;
+ ret->deleter_ = deleter;
+ // Insert at head. We keep the head alive for the lifetime of the Isolate
+ // because otherwise we can't reset the head, should we delete it before
+ // the isolate expires
+ Isolate::ManagedObjectFinalizer* next = managed_object_finalizers_list_.next_;
+ managed_object_finalizers_list_.next_ = ret;
+ ret->prev_ = &managed_object_finalizers_list_;
+ ret->next_ = next;
+ if (next != nullptr) next->prev_ = ret;
+ return ret;
+}
+
+void Isolate::UnregisterFromReleaseAtTeardown(
+ Isolate::ManagedObjectFinalizer** finalizer_ptr) {
+ DCHECK_NOT_NULL(finalizer_ptr);
+ Isolate::ManagedObjectFinalizer* finalizer = *finalizer_ptr;
+ DCHECK_NOT_NULL(finalizer->prev_);
+
+ finalizer->prev_->next_ = finalizer->next_;
+ if (finalizer->next_ != nullptr) finalizer->next_->prev_ = finalizer->prev_;
+ delete finalizer;
+ *finalizer_ptr = nullptr;
+}
Isolate::PerIsolateThreadData::~PerIsolateThreadData() {
#if defined(USE_SIMULATOR)
@@ -2128,8 +2178,7 @@ class VerboseAccountingAllocator : public AccountingAllocator {
"\"time\": %f, "
"\"ptr\": \"%p\", "
"\"name\": \"%s\","
- "\"nesting\": %zu"
- "}\n",
+ "\"nesting\": %" PRIuS "}\n",
reinterpret_cast<void*>(heap_->isolate()), time,
reinterpret_cast<const void*>(zone), zone->name(),
nesting_deepth_.Value());
@@ -2146,9 +2195,9 @@ class VerboseAccountingAllocator : public AccountingAllocator {
"\"time\": %f, "
"\"ptr\": \"%p\", "
"\"name\": \"%s\", "
- "\"size\": %zu,"
- "\"nesting\": %zu"
- "}\n",
+ "\"size\": %" PRIuS
+ ","
+ "\"nesting\": %" PRIuS "}\n",
reinterpret_cast<void*>(heap_->isolate()), time,
reinterpret_cast<const void*>(zone), zone->name(),
zone->allocation_size(), nesting_deepth_.Value());
@@ -2164,9 +2213,9 @@ class VerboseAccountingAllocator : public AccountingAllocator {
"\"type\": \"zone\", "
"\"isolate\": \"%p\", "
"\"time\": %f, "
- "\"allocated\": %zu,"
- "\"pooled\": %zu"
- "}\n",
+ "\"allocated\": %" PRIuS
+ ","
+ "\"pooled\": %" PRIuS "}\n",
reinterpret_cast<void*>(heap_->isolate()), time, malloced, pooled);
}
@@ -2215,6 +2264,7 @@ Isolate::Isolate(bool enable_serializer)
// be fixed once the default isolate cleanup is done.
random_number_generator_(NULL),
rail_mode_(PERFORMANCE_ANIMATION),
+ promise_hook_or_debug_is_active_(false),
promise_hook_(NULL),
load_start_time_ms_(0),
serializer_enabled_(enable_serializer),
@@ -2237,7 +2287,8 @@ Isolate::Isolate(bool enable_serializer)
use_counter_callback_(NULL),
basic_block_profiler_(NULL),
cancelable_task_manager_(new CancelableTaskManager()),
- abort_on_uncaught_exception_callback_(NULL) {
+ abort_on_uncaught_exception_callback_(NULL),
+ total_regexp_code_generated_(0) {
{
base::LockGuard<base::Mutex> lock_guard(thread_data_table_mutex_.Pointer());
CHECK(thread_data_table_);
@@ -2390,6 +2441,7 @@ void Isolate::Deinit() {
root_index_map_ = NULL;
ClearSerializerData();
+ ReleaseManagedObjects();
}
@@ -2643,9 +2695,7 @@ bool Isolate::Init(Deserializer* des) {
bootstrapper_->Initialize(create_heap_objects);
builtins_.SetUp(this, create_heap_objects);
- if (create_heap_objects) {
- heap_.CreateFixedStubs();
- }
+ if (create_heap_objects) heap_.CreateFixedStubs();
if (FLAG_log_internal_timer_events) {
set_event_logger(Logger::DefaultEventLoggerSentinel);
@@ -2899,10 +2949,9 @@ Map* Isolate::get_initial_js_array_map(ElementsKind kind) {
return nullptr;
}
-
-bool Isolate::use_crankshaft() const {
+bool Isolate::use_crankshaft() {
return FLAG_opt && FLAG_crankshaft && !serializer_enabled_ &&
- CpuFeatures::SupportsCrankshaft();
+ CpuFeatures::SupportsCrankshaft() && !IsCodeCoverageEnabled();
}
bool Isolate::NeedsSourcePositionsForProfiling() const {
@@ -2911,6 +2960,15 @@ bool Isolate::NeedsSourcePositionsForProfiling() const {
debug_->is_active() || logger_->is_logging();
}
+bool Isolate::IsCodeCoverageEnabled() {
+ return heap()->code_coverage_list()->IsArrayList();
+}
+
+void Isolate::SetCodeCoverageList(Object* value) {
+ DCHECK(value->IsUndefined(this) || value->IsArrayList());
+ heap()->set_code_coverage_list(value);
+}
+
bool Isolate::IsArrayOrObjectPrototype(Object* object) {
Object* context = heap()->native_contexts_list();
while (!context->IsUndefined(this)) {
@@ -3058,15 +3116,6 @@ void Isolate::UpdateArrayProtectorOnSetElement(Handle<JSObject> object) {
handle(Smi::FromInt(kProtectorInvalid), this));
}
-void Isolate::InvalidateHasInstanceProtector() {
- DCHECK(factory()->has_instance_protector()->value()->IsSmi());
- DCHECK(IsHasInstanceLookupChainIntact());
- PropertyCell::SetValueWithInvalidation(
- factory()->has_instance_protector(),
- handle(Smi::FromInt(kProtectorInvalid), this));
- DCHECK(!IsHasInstanceLookupChainIntact());
-}
-
void Isolate::InvalidateIsConcatSpreadableProtector() {
DCHECK(factory()->is_concat_spreadable_protector()->value()->IsSmi());
DCHECK(IsIsConcatSpreadableLookupChainIntact());
@@ -3094,8 +3143,9 @@ void Isolate::InvalidateStringLengthOverflowProtector() {
void Isolate::InvalidateArrayIteratorProtector() {
DCHECK(factory()->array_iterator_protector()->value()->IsSmi());
DCHECK(IsArrayIteratorLookupChainIntact());
- factory()->array_iterator_protector()->set_value(
- Smi::FromInt(kProtectorInvalid));
+ PropertyCell::SetValueWithInvalidation(
+ factory()->array_iterator_protector(),
+ handle(Smi::FromInt(kProtectorInvalid), this));
DCHECK(!IsArrayIteratorLookupChainIntact());
}
@@ -3244,10 +3294,18 @@ void Isolate::FireCallCompletedCallback() {
}
}
-void Isolate::SetPromiseHook(PromiseHook hook) { promise_hook_ = hook; }
+void Isolate::DebugStateUpdated() {
+ promise_hook_or_debug_is_active_ = promise_hook_ || debug()->is_active();
+}
+
+void Isolate::SetPromiseHook(PromiseHook hook) {
+ promise_hook_ = hook;
+ DebugStateUpdated();
+}
void Isolate::RunPromiseHook(PromiseHookType type, Handle<JSPromise> promise,
Handle<Object> parent) {
+ if (debug()->is_active()) debug()->RunPromiseHook(type, promise, parent);
if (promise_hook_ == nullptr) return;
promise_hook_(type, v8::Utils::PromiseToLocal(promise),
v8::Utils::ToLocal(parent));
@@ -3271,33 +3329,9 @@ void Isolate::ReportPromiseReject(Handle<JSObject> promise,
v8::Utils::StackTraceToLocal(stack_trace)));
}
-namespace {
-class PromiseDebugEventScope {
- public:
- PromiseDebugEventScope(Isolate* isolate, int id)
- : isolate_(isolate), id_(id) {
- if (isolate_->debug()->is_active() && id_ != kDebugPromiseNoID) {
- isolate_->debug()->OnAsyncTaskEvent(debug::kDebugWillHandle, id_);
- }
- }
-
- ~PromiseDebugEventScope() {
- if (isolate_->debug()->is_active() && id_ != kDebugPromiseNoID) {
- isolate_->debug()->OnAsyncTaskEvent(debug::kDebugDidHandle, id_);
- }
- }
-
- private:
- Isolate* isolate_;
- int id_;
-};
-} // namespace
-
void Isolate::PromiseReactionJob(Handle<PromiseReactionJobInfo> info,
MaybeHandle<Object>* result,
MaybeHandle<Object>* maybe_exception) {
- PromiseDebugEventScope helper(this, info->debug_id());
-
Handle<Object> value(info->value(), this);
Handle<Object> tasks(info->tasks(), this);
Handle<JSFunction> promise_handle_fn = promise_handle();
@@ -3339,8 +3373,6 @@ void Isolate::PromiseReactionJob(Handle<PromiseReactionJobInfo> info,
void Isolate::PromiseResolveThenableJob(
Handle<PromiseResolveThenableJobInfo> info, MaybeHandle<Object>* result,
MaybeHandle<Object>* maybe_exception) {
- PromiseDebugEventScope helper(this, info->debug_id());
-
Handle<JSReceiver> thenable(info->thenable(), this);
Handle<JSFunction> resolve(info->resolve(), this);
Handle<JSFunction> reject(info->reject(), this);
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index 6bbc0fc343..444d99feb8 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -533,6 +533,8 @@ class Isolate {
// for legacy API reasons.
void TearDown();
+ void ReleaseManagedObjects();
+
static void GlobalTearDown();
void ClearSerializerData();
@@ -914,6 +916,11 @@ class Isolate {
RegExpStack* regexp_stack() { return regexp_stack_; }
+ size_t total_regexp_code_generated() { return total_regexp_code_generated_; }
+ void IncreaseTotalRegexpCodeGenerated(int size) {
+ total_regexp_code_generated_ += size;
+ }
+
List<int>* regexp_indices() { return &regexp_indices_; }
unibrow::Mapping<unibrow::Ecma262Canonicalize>*
@@ -962,12 +969,15 @@ class Isolate {
bool IsDead() { return has_fatal_error_; }
void SignalFatalError() { has_fatal_error_ = true; }
- bool use_crankshaft() const;
+ bool use_crankshaft();
bool initialized_from_snapshot() { return initialized_from_snapshot_; }
bool NeedsSourcePositionsForProfiling() const;
+ bool IsCodeCoverageEnabled();
+ void SetCodeCoverageList(Object* value);
+
double time_millis_since_init() {
return heap_.MonotonicallyIncreasingTimeInMs() - time_millis_at_init_;
}
@@ -990,7 +1000,6 @@ class Isolate {
bool IsFastArrayConstructorPrototypeChainIntact();
inline bool IsArraySpeciesLookupChainIntact();
- inline bool IsHasInstanceLookupChainIntact();
bool IsIsConcatSpreadableLookupChainIntact();
bool IsIsConcatSpreadableLookupChainIntact(JSReceiver* receiver);
inline bool IsStringLengthOverflowIntact();
@@ -1017,7 +1026,6 @@ class Isolate {
UpdateArrayProtectorOnSetElement(object);
}
void InvalidateArraySpeciesProtector();
- void InvalidateHasInstanceProtector();
void InvalidateIsConcatSpreadableProtector();
void InvalidateStringLengthOverflowProtector();
void InvalidateArrayIteratorProtector();
@@ -1124,9 +1132,12 @@ class Isolate {
int GetNextUniqueSharedFunctionInfoId() { return next_unique_sfi_id_++; }
#endif
- Address promise_hook_address() {
- return reinterpret_cast<Address>(&promise_hook_);
+ Address promise_hook_or_debug_is_active_address() {
+ return reinterpret_cast<Address>(&promise_hook_or_debug_is_active_);
}
+
+ void DebugStateUpdated();
+
void SetPromiseHook(PromiseHook hook);
void RunPromiseHook(PromiseHookType type, Handle<JSPromise> promise,
Handle<Object> parent);
@@ -1158,7 +1169,7 @@ class Isolate {
return cancelable_task_manager_;
}
- AstStringConstants* ast_string_constants() const {
+ const AstStringConstants* ast_string_constants() const {
return ast_string_constants_;
}
@@ -1196,6 +1207,42 @@ class Isolate {
base::Mutex* simulator_i_cache_mutex() { return &simulator_i_cache_mutex_; }
#endif
+ void set_allow_atomics_wait(bool set) { allow_atomics_wait_ = set; }
+ bool allow_atomics_wait() { return allow_atomics_wait_; }
+
+ // List of native heap values allocated by the runtime as part of its
+ // implementation that must be freed at isolate deinit.
+ class ManagedObjectFinalizer final {
+ public:
+ typedef void (*Deleter)(void*);
+ void Dispose() { deleter_(value_); }
+
+ private:
+ friend class Isolate;
+
+ ManagedObjectFinalizer() {
+ DCHECK_EQ(reinterpret_cast<void*>(this),
+ reinterpret_cast<void*>(&value_));
+ }
+
+ // value_ must be the first member
+ void* value_ = nullptr;
+ Deleter deleter_ = nullptr;
+ ManagedObjectFinalizer* prev_ = nullptr;
+ ManagedObjectFinalizer* next_ = nullptr;
+ };
+
+ // Register a native value for destruction at isolate teardown.
+ ManagedObjectFinalizer* RegisterForReleaseAtTeardown(
+ void* value, ManagedObjectFinalizer::Deleter deleter);
+
+ // Unregister a previously registered value from release at
+ // isolate teardown, deleting the ManagedObjectFinalizer.
+ // This transfers the responsibility of the previously managed value's
+ // deletion to the caller. Pass by pointer, because *finalizer_ptr gets
+ // reset to nullptr.
+ void UnregisterFromReleaseAtTeardown(ManagedObjectFinalizer** finalizer_ptr);
+
protected:
explicit Isolate(bool enable_serializer);
bool IsArrayOrObjectPrototype(Object* object);
@@ -1369,6 +1416,7 @@ class Isolate {
AccessCompilerData* access_compiler_data_;
base::RandomNumberGenerator* random_number_generator_;
base::AtomicValue<RAILMode> rail_mode_;
+ bool promise_hook_or_debug_is_active_;
PromiseHook promise_hook_;
base::Mutex rail_mutex_;
double load_start_time_ms_;
@@ -1404,7 +1452,7 @@ class Isolate {
std::unique_ptr<CodeEventDispatcher> code_event_dispatcher_;
FunctionEntryHook function_entry_hook_;
- AstStringConstants* ast_string_constants_;
+ const AstStringConstants* ast_string_constants_;
interpreter::Interpreter* interpreter_;
@@ -1474,6 +1522,12 @@ class Isolate {
base::Mutex simulator_i_cache_mutex_;
#endif
+ bool allow_atomics_wait_;
+
+ ManagedObjectFinalizer managed_object_finalizers_list_;
+
+ size_t total_regexp_code_generated_;
+
friend class ExecutionAccess;
friend class HandleScopeImplementer;
friend class HeapTester;
diff --git a/deps/v8/src/js/array.js b/deps/v8/src/js/array.js
index fca75a3f65..88e8cb3ea7 100644
--- a/deps/v8/src/js/array.js
+++ b/deps/v8/src/js/array.js
@@ -1270,7 +1270,14 @@ function ArrayReduceRight(callback, current) {
}
-function InnerArrayCopyWithin(target, start, end, array, length) {
+// ES#sec-array.prototype.copywithin
+// (Array.prototype.copyWithin ( target, start [ , end ] )
+function ArrayCopyWithin(target, start, end) {
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.copyWithin");
+
+ var array = TO_OBJECT(this);
+ var length = TO_LENGTH(array.length);
+
target = TO_INTEGER(target);
var to;
if (target < 0) {
@@ -1318,17 +1325,6 @@ function InnerArrayCopyWithin(target, start, end, array, length) {
}
-// ES6 draft 03-17-15, section 22.1.3.3
-function ArrayCopyWithin(target, start, end) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.copyWithin");
-
- var array = TO_OBJECT(this);
- var length = TO_LENGTH(array.length);
-
- return InnerArrayCopyWithin(target, start, end, array, length);
-}
-
-
function InnerArrayFind(predicate, thisArg, array, length) {
if (!IS_CALLABLE(predicate)) {
throw %make_type_error(kCalledNonCallable, predicate);
@@ -1532,7 +1528,14 @@ function getFunction(name, jsBuiltin, len) {
return f;
};
-var ArrayValues = getFunction("values", null, 0);
+// Array prototype functions that return iterators. They are exposed to the
+// public API via Template::SetIntrinsicDataProperty().
+var IteratorFunctions = {
+ "entries": getFunction("entries", null, 0),
+ "forEach": getFunction("forEach", ArrayForEach, 1),
+ "keys": getFunction("keys", null, 0),
+ "values": getFunction("values", null, 0)
+}
// Set up non-enumerable functions of the Array.prototype object and
// set their names.
@@ -1551,7 +1554,6 @@ utils.InstallFunctions(GlobalArray.prototype, DONT_ENUM, [
"splice", getFunction("splice", ArraySplice, 2),
"sort", getFunction("sort", ArraySort),
"filter", getFunction("filter", ArrayFilter, 1),
- "forEach", getFunction("forEach", ArrayForEach, 1),
"some", getFunction("some", ArraySome, 1),
"every", getFunction("every", ArrayEvery, 1),
"map", getFunction("map", ArrayMap, 1),
@@ -1564,12 +1566,18 @@ utils.InstallFunctions(GlobalArray.prototype, DONT_ENUM, [
"findIndex", getFunction("findIndex", ArrayFindIndex, 1),
"fill", getFunction("fill", ArrayFill, 1),
"includes", getFunction("includes", null, 1),
- "keys", getFunction("keys", null, 0),
- "entries", getFunction("entries", null, 0),
- iteratorSymbol, ArrayValues
+ "entries", IteratorFunctions.entries,
+ "forEach", IteratorFunctions.forEach,
+ "keys", IteratorFunctions.keys,
+ iteratorSymbol, IteratorFunctions.values
]);
-%FunctionSetName(ArrayValues, "values");
+utils.ForEachFunction = GlobalArray.prototype.forEach;
+
+%FunctionSetName(IteratorFunctions.entries, "entries");
+%FunctionSetName(IteratorFunctions.forEach, "forEach");
+%FunctionSetName(IteratorFunctions.keys, "keys");
+%FunctionSetName(IteratorFunctions.values, "values");
%FinishArrayPrototypeSetup(GlobalArray.prototype);
@@ -1612,8 +1620,7 @@ utils.Export(function(to) {
to.ArrayJoin = ArrayJoin;
to.ArrayPush = ArrayPush;
to.ArrayToString = ArrayToString;
- to.ArrayValues = ArrayValues;
- to.InnerArrayCopyWithin = InnerArrayCopyWithin;
+ to.ArrayValues = IteratorFunctions.values,
to.InnerArrayEvery = InnerArrayEvery;
to.InnerArrayFill = InnerArrayFill;
to.InnerArrayFilter = InnerArrayFilter;
@@ -1631,13 +1638,16 @@ utils.Export(function(to) {
});
%InstallToContext([
+ "array_entries_iterator", IteratorFunctions.entries,
+ "array_for_each_iterator", IteratorFunctions.forEach,
+ "array_keys_iterator", IteratorFunctions.keys,
"array_pop", ArrayPop,
"array_push", ArrayPush,
"array_shift", ArrayShift,
"array_splice", ArraySplice,
"array_slice", ArraySlice,
"array_unshift", ArrayUnshift,
- "array_values_iterator", ArrayValues,
+ "array_values_iterator", IteratorFunctions.values,
]);
});
diff --git a/deps/v8/src/js/async-await.js b/deps/v8/src/js/async-await.js
deleted file mode 100644
index f0104ed9ac..0000000000
--- a/deps/v8/src/js/async-await.js
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils, extrasUtils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var AsyncFunctionNext;
-var AsyncFunctionThrow;
-
-utils.Import(function(from) {
- AsyncFunctionNext = from.AsyncFunctionNext;
- AsyncFunctionThrow = from.AsyncFunctionThrow;
-});
-
-var promiseHandledBySymbol =
- utils.ImportNow("promise_handled_by_symbol");
-var promiseForwardingHandlerSymbol =
- utils.ImportNow("promise_forwarding_handler_symbol");
-
-// -------------------------------------------------------------------
-
-function PromiseCastResolved(value) {
- // TODO(caitp): This is non spec compliant. See v8:5694.
- if (%is_promise(value)) {
- return value;
- } else {
- var promise = %promise_internal_constructor(UNDEFINED);
- %promise_resolve(promise, value);
- return promise;
- }
-}
-
-// ES#abstract-ops-async-function-await
-// AsyncFunctionAwait ( value )
-// Shared logic for the core of await. The parser desugars
-// await awaited
-// into
-// yield AsyncFunctionAwait{Caught,Uncaught}(.generator, awaited, .promise)
-// The 'awaited' parameter is the value; the generator stands in
-// for the asyncContext, and .promise is the larger promise under
-// construction by the enclosing async function.
-function AsyncFunctionAwait(generator, awaited, outerPromise) {
- // Promise.resolve(awaited).then(
- // value => AsyncFunctionNext(value),
- // error => AsyncFunctionThrow(error)
- // );
- var promise = PromiseCastResolved(awaited);
-
- var onFulfilled = sentValue => {
- %_Call(AsyncFunctionNext, generator, sentValue);
- // The resulting Promise is a throwaway, so it doesn't matter what it
- // resolves to. What is important is that we don't end up keeping the
- // whole chain of intermediate Promises alive by returning the value
- // of AsyncFunctionNext, as that would create a memory leak.
- return;
- };
- var onRejected = sentError => {
- %_Call(AsyncFunctionThrow, generator, sentError);
- // Similarly, returning the huge Promise here would cause a long
- // resolution chain to find what the exception to throw is, and
- // create a similar memory leak, and it does not matter what
- // sort of rejection this intermediate Promise becomes.
- return;
- }
-
- var throwawayPromise = %promise_internal_constructor(promise);
-
- // The Promise will be thrown away and not handled, but it shouldn't trigger
- // unhandled reject events as its work is done
- %PromiseMarkAsHandled(throwawayPromise);
-
- if (DEBUG_IS_ACTIVE) {
- if (%is_promise(awaited)) {
- // Mark the reject handler callback to be a forwarding edge, rather
- // than a meaningful catch handler
- SET_PRIVATE(onRejected, promiseForwardingHandlerSymbol, true);
- }
-
- // Mark the dependency to outerPromise in case the throwaway Promise is
- // found on the Promise stack
- SET_PRIVATE(throwawayPromise, promiseHandledBySymbol, outerPromise);
- }
-
- %perform_promise_then(promise, onFulfilled, onRejected, throwawayPromise);
-}
-
-// Called by the parser from the desugaring of 'await' when catch
-// prediction indicates no locally surrounding catch block
-function AsyncFunctionAwaitUncaught(generator, awaited, outerPromise) {
- AsyncFunctionAwait(generator, awaited, outerPromise);
-}
-
-// Called by the parser from the desugaring of 'await' when catch
-// prediction indicates that there is a locally surrounding catch block
-function AsyncFunctionAwaitCaught(generator, awaited, outerPromise) {
- if (DEBUG_IS_ACTIVE && %is_promise(awaited)) {
- %PromiseMarkHandledHint(awaited);
- }
- AsyncFunctionAwait(generator, awaited, outerPromise);
-}
-
-// How the parser rejects promises from async/await desugaring
-function RejectPromiseNoDebugEvent(promise, reason) {
- return %promise_internal_reject(promise, reason, false);
-}
-
-function AsyncFunctionPromiseCreate() {
- var promise = %promise_internal_constructor(UNDEFINED);
- if (DEBUG_IS_ACTIVE) {
- // Push the Promise under construction in an async function on
- // the catch prediction stack to handle exceptions thrown before
- // the first await.
- // Assign ID and create a recurring task to save stack for future
- // resumptions from await.
- %DebugAsyncFunctionPromiseCreated(promise);
- }
- return promise;
-}
-
-function AsyncFunctionPromiseRelease(promise) {
- if (DEBUG_IS_ACTIVE) {
- // Pop the Promise under construction in an async function on
- // from catch prediction stack.
- %DebugPopPromise();
- }
-}
-
-%InstallToContext([
- "async_function_await_caught", AsyncFunctionAwaitCaught,
- "async_function_await_uncaught", AsyncFunctionAwaitUncaught,
- "reject_promise_no_debug_event", RejectPromiseNoDebugEvent,
- "async_function_promise_create", AsyncFunctionPromiseCreate,
- "async_function_promise_release", AsyncFunctionPromiseRelease,
-]);
-
-})
diff --git a/deps/v8/src/js/datetime-format-to-parts.js b/deps/v8/src/js/datetime-format-to-parts.js
deleted file mode 100644
index 3194f50672..0000000000
--- a/deps/v8/src/js/datetime-format-to-parts.js
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-"use strict";
-
-%CheckIsBootstrapping();
-
-var GlobalIntl = global.Intl;
-var FormatDateToParts = utils.ImportNow("FormatDateToParts");
-
-utils.InstallFunctions(GlobalIntl.DateTimeFormat.prototype, DONT_ENUM, [
- 'formatToParts', FormatDateToParts
-]);
-})
diff --git a/deps/v8/src/js/harmony-atomics.js b/deps/v8/src/js/harmony-atomics.js
index bfbf0c505e..daeba3f64f 100644
--- a/deps/v8/src/js/harmony-atomics.js
+++ b/deps/v8/src/js/harmony-atomics.js
@@ -13,10 +13,12 @@
var GlobalObject = global.Object;
var MaxSimple;
+var MinSimple;
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
utils.Import(function(from) {
MaxSimple = from.MaxSimple;
+ MinSimple = from.MinSimple;
});
// -------------------------------------------------------------------
@@ -101,7 +103,7 @@ function AtomicsExchangeJS(ia, index, value) {
}
function AtomicsIsLockFreeJS(size) {
- return %_AtomicsIsLockFree(size);
+ return %_AtomicsIsLockFree(TO_INTEGER(size));
}
function AtomicsWaitJS(ia, index, value, timeout) {
@@ -123,7 +125,12 @@ function AtomicsWaitJS(ia, index, value, timeout) {
function AtomicsWakeJS(ia, index, count) {
CheckSharedInteger32TypedArray(ia);
index = ValidateIndex(index, %_TypedArrayGetLength(ia));
- count = MaxSimple(0, TO_INTEGER(count));
+ if (IS_UNDEFINED(count)) {
+ count = kMaxUint32;
+ } else {
+ // Clamp to [0, kMaxUint32].
+ count = MinSimple(MaxSimple(0, TO_INTEGER(count)), kMaxUint32);
+ }
return %AtomicsWake(ia, index, count);
}
diff --git a/deps/v8/src/js/harmony-simd.js b/deps/v8/src/js/harmony-simd.js
deleted file mode 100644
index 0880b5bdf1..0000000000
--- a/deps/v8/src/js/harmony-simd.js
+++ /dev/null
@@ -1,923 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var GlobalSIMD = global.SIMD;
-var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
-
-// -------------------------------------------------------------------
-
-macro SIMD_FLOAT_TYPES(FUNCTION)
-FUNCTION(Float32x4, float32x4, 4)
-endmacro
-
-macro SIMD_INT_TYPES(FUNCTION)
-FUNCTION(Int32x4, int32x4, 4)
-FUNCTION(Int16x8, int16x8, 8)
-FUNCTION(Int8x16, int8x16, 16)
-endmacro
-
-macro SIMD_UINT_TYPES(FUNCTION)
-FUNCTION(Uint32x4, uint32x4, 4)
-FUNCTION(Uint16x8, uint16x8, 8)
-FUNCTION(Uint8x16, uint8x16, 16)
-endmacro
-
-macro SIMD_BOOL_TYPES(FUNCTION)
-FUNCTION(Bool32x4, bool32x4, 4)
-FUNCTION(Bool16x8, bool16x8, 8)
-FUNCTION(Bool8x16, bool8x16, 16)
-endmacro
-
-macro SIMD_ALL_TYPES(FUNCTION)
-SIMD_FLOAT_TYPES(FUNCTION)
-SIMD_INT_TYPES(FUNCTION)
-SIMD_UINT_TYPES(FUNCTION)
-SIMD_BOOL_TYPES(FUNCTION)
-endmacro
-
-macro DECLARE_GLOBALS(NAME, TYPE, LANES)
-var GlobalNAME = GlobalSIMD.NAME;
-endmacro
-
-SIMD_ALL_TYPES(DECLARE_GLOBALS)
-
-macro DECLARE_COMMON_FUNCTIONS(NAME, TYPE, LANES)
-function NAMECheckJS(a) {
- return %NAMECheck(a);
-}
-
-function NAMEToString() {
- var value = %ValueOf(this);
- if (typeof(value) !== 'TYPE') {
- throw %make_type_error(kIncompatibleMethodReceiver,
- "NAME.prototype.toString", this);
- }
- var str = "SIMD.NAME(";
- str += %NAMEExtractLane(value, 0);
- for (var i = 1; i < LANES; i++) {
- str += ", " + %NAMEExtractLane(value, i);
- }
- return str + ")";
-}
-
-function NAMEToLocaleString() {
- var value = %ValueOf(this);
- if (typeof(value) !== 'TYPE') {
- throw %make_type_error(kIncompatibleMethodReceiver,
- "NAME.prototype.toLocaleString", this);
- }
- var str = "SIMD.NAME(";
- str += %NAMEExtractLane(value, 0).toLocaleString();
- for (var i = 1; i < LANES; i++) {
- str += ", " + %NAMEExtractLane(value, i).toLocaleString();
- }
- return str + ")";
-}
-
-function NAMEValueOf() {
- var value = %ValueOf(this);
- if (typeof(value) !== 'TYPE') {
- throw %make_type_error(kIncompatibleMethodReceiver,
- "NAME.prototype.valueOf", this);
- }
- return value;
-}
-
-function NAMEExtractLaneJS(instance, lane) {
- return %NAMEExtractLane(instance, lane);
-}
-endmacro
-
-SIMD_ALL_TYPES(DECLARE_COMMON_FUNCTIONS)
-
-macro DECLARE_SHIFT_FUNCTIONS(NAME, TYPE, LANES)
-function NAMEShiftLeftByScalarJS(instance, shift) {
- return %NAMEShiftLeftByScalar(instance, shift);
-}
-
-function NAMEShiftRightByScalarJS(instance, shift) {
- return %NAMEShiftRightByScalar(instance, shift);
-}
-endmacro
-
-SIMD_INT_TYPES(DECLARE_SHIFT_FUNCTIONS)
-SIMD_UINT_TYPES(DECLARE_SHIFT_FUNCTIONS)
-
-macro SIMD_SMALL_INT_TYPES(FUNCTION)
-FUNCTION(Int16x8)
-FUNCTION(Int8x16)
-FUNCTION(Uint8x16)
-FUNCTION(Uint16x8)
-endmacro
-
-macro DECLARE_SMALL_INT_FUNCTIONS(NAME)
-function NAMEAddSaturateJS(a, b) {
- return %NAMEAddSaturate(a, b);
-}
-
-function NAMESubSaturateJS(a, b) {
- return %NAMESubSaturate(a, b);
-}
-endmacro
-
-SIMD_SMALL_INT_TYPES(DECLARE_SMALL_INT_FUNCTIONS)
-
-macro DECLARE_SIGNED_FUNCTIONS(NAME, TYPE, LANES)
-function NAMENegJS(a) {
- return %NAMENeg(a);
-}
-endmacro
-
-SIMD_FLOAT_TYPES(DECLARE_SIGNED_FUNCTIONS)
-SIMD_INT_TYPES(DECLARE_SIGNED_FUNCTIONS)
-
-macro DECLARE_BOOL_FUNCTIONS(NAME, TYPE, LANES)
-function NAMEReplaceLaneJS(instance, lane, value) {
- return %NAMEReplaceLane(instance, lane, value);
-}
-
-function NAMEAnyTrueJS(s) {
- return %NAMEAnyTrue(s);
-}
-
-function NAMEAllTrueJS(s) {
- return %NAMEAllTrue(s);
-}
-endmacro
-
-SIMD_BOOL_TYPES(DECLARE_BOOL_FUNCTIONS)
-
-macro SIMD_NUMERIC_TYPES(FUNCTION)
-SIMD_FLOAT_TYPES(FUNCTION)
-SIMD_INT_TYPES(FUNCTION)
-SIMD_UINT_TYPES(FUNCTION)
-endmacro
-
-macro DECLARE_NUMERIC_FUNCTIONS(NAME, TYPE, LANES)
-function NAMEReplaceLaneJS(instance, lane, value) {
- return %NAMEReplaceLane(instance, lane, TO_NUMBER(value));
-}
-
-function NAMESelectJS(selector, a, b) {
- return %NAMESelect(selector, a, b);
-}
-
-function NAMEAddJS(a, b) {
- return %NAMEAdd(a, b);
-}
-
-function NAMESubJS(a, b) {
- return %NAMESub(a, b);
-}
-
-function NAMEMulJS(a, b) {
- return %NAMEMul(a, b);
-}
-
-function NAMEMinJS(a, b) {
- return %NAMEMin(a, b);
-}
-
-function NAMEMaxJS(a, b) {
- return %NAMEMax(a, b);
-}
-
-function NAMEEqualJS(a, b) {
- return %NAMEEqual(a, b);
-}
-
-function NAMENotEqualJS(a, b) {
- return %NAMENotEqual(a, b);
-}
-
-function NAMELessThanJS(a, b) {
- return %NAMELessThan(a, b);
-}
-
-function NAMELessThanOrEqualJS(a, b) {
- return %NAMELessThanOrEqual(a, b);
-}
-
-function NAMEGreaterThanJS(a, b) {
- return %NAMEGreaterThan(a, b);
-}
-
-function NAMEGreaterThanOrEqualJS(a, b) {
- return %NAMEGreaterThanOrEqual(a, b);
-}
-
-function NAMELoadJS(tarray, index) {
- return %NAMELoad(tarray, index);
-}
-
-function NAMEStoreJS(tarray, index, a) {
- return %NAMEStore(tarray, index, a);
-}
-endmacro
-
-SIMD_NUMERIC_TYPES(DECLARE_NUMERIC_FUNCTIONS)
-
-macro SIMD_LOGICAL_TYPES(FUNCTION)
-SIMD_INT_TYPES(FUNCTION)
-SIMD_UINT_TYPES(FUNCTION)
-SIMD_BOOL_TYPES(FUNCTION)
-endmacro
-
-macro DECLARE_LOGICAL_FUNCTIONS(NAME, TYPE, LANES)
-function NAMEAndJS(a, b) {
- return %NAMEAnd(a, b);
-}
-
-function NAMEOrJS(a, b) {
- return %NAMEOr(a, b);
-}
-
-function NAMEXorJS(a, b) {
- return %NAMEXor(a, b);
-}
-
-function NAMENotJS(a) {
- return %NAMENot(a);
-}
-endmacro
-
-SIMD_LOGICAL_TYPES(DECLARE_LOGICAL_FUNCTIONS)
-
-macro SIMD_FROM_TYPES(FUNCTION)
-FUNCTION(Float32x4, Int32x4)
-FUNCTION(Float32x4, Uint32x4)
-FUNCTION(Int32x4, Float32x4)
-FUNCTION(Int32x4, Uint32x4)
-FUNCTION(Uint32x4, Float32x4)
-FUNCTION(Uint32x4, Int32x4)
-FUNCTION(Int16x8, Uint16x8)
-FUNCTION(Uint16x8, Int16x8)
-FUNCTION(Int8x16, Uint8x16)
-FUNCTION(Uint8x16, Int8x16)
-endmacro
-
-macro DECLARE_FROM_FUNCTIONS(TO, FROM)
-function TOFromFROMJS(a) {
- return %TOFromFROM(a);
-}
-endmacro
-
-SIMD_FROM_TYPES(DECLARE_FROM_FUNCTIONS)
-
-macro SIMD_FROM_BITS_TYPES(FUNCTION)
-FUNCTION(Float32x4, Int32x4)
-FUNCTION(Float32x4, Uint32x4)
-FUNCTION(Float32x4, Int16x8)
-FUNCTION(Float32x4, Uint16x8)
-FUNCTION(Float32x4, Int8x16)
-FUNCTION(Float32x4, Uint8x16)
-FUNCTION(Int32x4, Float32x4)
-FUNCTION(Int32x4, Uint32x4)
-FUNCTION(Int32x4, Int16x8)
-FUNCTION(Int32x4, Uint16x8)
-FUNCTION(Int32x4, Int8x16)
-FUNCTION(Int32x4, Uint8x16)
-FUNCTION(Uint32x4, Float32x4)
-FUNCTION(Uint32x4, Int32x4)
-FUNCTION(Uint32x4, Int16x8)
-FUNCTION(Uint32x4, Uint16x8)
-FUNCTION(Uint32x4, Int8x16)
-FUNCTION(Uint32x4, Uint8x16)
-FUNCTION(Int16x8, Float32x4)
-FUNCTION(Int16x8, Int32x4)
-FUNCTION(Int16x8, Uint32x4)
-FUNCTION(Int16x8, Uint16x8)
-FUNCTION(Int16x8, Int8x16)
-FUNCTION(Int16x8, Uint8x16)
-FUNCTION(Uint16x8, Float32x4)
-FUNCTION(Uint16x8, Int32x4)
-FUNCTION(Uint16x8, Uint32x4)
-FUNCTION(Uint16x8, Int16x8)
-FUNCTION(Uint16x8, Int8x16)
-FUNCTION(Uint16x8, Uint8x16)
-FUNCTION(Int8x16, Float32x4)
-FUNCTION(Int8x16, Int32x4)
-FUNCTION(Int8x16, Uint32x4)
-FUNCTION(Int8x16, Int16x8)
-FUNCTION(Int8x16, Uint16x8)
-FUNCTION(Int8x16, Uint8x16)
-FUNCTION(Uint8x16, Float32x4)
-FUNCTION(Uint8x16, Int32x4)
-FUNCTION(Uint8x16, Uint32x4)
-FUNCTION(Uint8x16, Int16x8)
-FUNCTION(Uint8x16, Uint16x8)
-FUNCTION(Uint8x16, Int8x16)
-endmacro
-
-macro DECLARE_FROM_BITS_FUNCTIONS(TO, FROM)
-function TOFromFROMBitsJS(a) {
- return %TOFromFROMBits(a);
-}
-endmacro
-
-SIMD_FROM_BITS_TYPES(DECLARE_FROM_BITS_FUNCTIONS)
-
-
-macro SIMD_LOADN_STOREN_TYPES(FUNCTION)
-FUNCTION(Float32x4, 1)
-FUNCTION(Float32x4, 2)
-FUNCTION(Float32x4, 3)
-FUNCTION(Int32x4, 1)
-FUNCTION(Int32x4, 2)
-FUNCTION(Int32x4, 3)
-FUNCTION(Uint32x4, 1)
-FUNCTION(Uint32x4, 2)
-FUNCTION(Uint32x4, 3)
-endmacro
-
-macro DECLARE_LOADN_STOREN_FUNCTIONS(NAME, COUNT)
-function NAMELoadCOUNTJS(tarray, index) {
- return %NAMELoadCOUNT(tarray, index);
-}
-
-function NAMEStoreCOUNTJS(tarray, index, a) {
- return %NAMEStoreCOUNT(tarray, index, a);
-}
-endmacro
-
-SIMD_LOADN_STOREN_TYPES(DECLARE_LOADN_STOREN_FUNCTIONS)
-
-//-------------------------------------------------------------------
-
-macro SIMD_X4_TYPES(FUNCTION)
-FUNCTION(Float32x4)
-FUNCTION(Int32x4)
-FUNCTION(Uint32x4)
-FUNCTION(Bool32x4)
-endmacro
-
-macro DECLARE_X4_FUNCTIONS(NAME)
-function NAMESplat(s) {
- return %CreateNAME(s, s, s, s);
-}
-
-function NAMESwizzleJS(a, c0, c1, c2, c3) {
- return %NAMESwizzle(a, c0, c1, c2, c3);
-}
-
-function NAMEShuffleJS(a, b, c0, c1, c2, c3) {
- return %NAMEShuffle(a, b, c0, c1, c2, c3);
-}
-endmacro
-
-SIMD_X4_TYPES(DECLARE_X4_FUNCTIONS)
-
-macro SIMD_X8_TYPES(FUNCTION)
-FUNCTION(Int16x8)
-FUNCTION(Uint16x8)
-FUNCTION(Bool16x8)
-endmacro
-
-macro DECLARE_X8_FUNCTIONS(NAME)
-function NAMESplat(s) {
- return %CreateNAME(s, s, s, s, s, s, s, s);
-}
-
-function NAMESwizzleJS(a, c0, c1, c2, c3, c4, c5, c6, c7) {
- return %NAMESwizzle(a, c0, c1, c2, c3, c4, c5, c6, c7);
-}
-
-function NAMEShuffleJS(a, b, c0, c1, c2, c3, c4, c5, c6, c7) {
- return %NAMEShuffle(a, b, c0, c1, c2, c3, c4, c5, c6, c7);
-}
-endmacro
-
-SIMD_X8_TYPES(DECLARE_X8_FUNCTIONS)
-
-macro SIMD_X16_TYPES(FUNCTION)
-FUNCTION(Int8x16)
-FUNCTION(Uint8x16)
-FUNCTION(Bool8x16)
-endmacro
-
-macro DECLARE_X16_FUNCTIONS(NAME)
-function NAMESplat(s) {
- return %CreateNAME(s, s, s, s, s, s, s, s, s, s, s, s, s, s, s, s);
-}
-
-function NAMESwizzleJS(a, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11,
- c12, c13, c14, c15) {
- return %NAMESwizzle(a, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11,
- c12, c13, c14, c15);
-}
-
-function NAMEShuffleJS(a, b, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10,
- c11, c12, c13, c14, c15) {
- return %NAMEShuffle(a, b, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10,
- c11, c12, c13, c14, c15);
-}
-endmacro
-
-SIMD_X16_TYPES(DECLARE_X16_FUNCTIONS)
-
-//-------------------------------------------------------------------
-
-function Float32x4Constructor(c0, c1, c2, c3) {
- if (!IS_UNDEFINED(new.target)) {
- throw %make_type_error(kNotConstructor, "Float32x4");
- }
- return %CreateFloat32x4(TO_NUMBER(c0), TO_NUMBER(c1),
- TO_NUMBER(c2), TO_NUMBER(c3));
-}
-
-
-function Int32x4Constructor(c0, c1, c2, c3) {
- if (!IS_UNDEFINED(new.target)) {
- throw %make_type_error(kNotConstructor, "Int32x4");
- }
- return %CreateInt32x4(TO_NUMBER(c0), TO_NUMBER(c1),
- TO_NUMBER(c2), TO_NUMBER(c3));
-}
-
-
-function Uint32x4Constructor(c0, c1, c2, c3) {
- if (!IS_UNDEFINED(new.target)) {
- throw %make_type_error(kNotConstructor, "Uint32x4");
- }
- return %CreateUint32x4(TO_NUMBER(c0), TO_NUMBER(c1),
- TO_NUMBER(c2), TO_NUMBER(c3));
-}
-
-
-function Bool32x4Constructor(c0, c1, c2, c3) {
- if (!IS_UNDEFINED(new.target)) {
- throw %make_type_error(kNotConstructor, "Bool32x4");
- }
- return %CreateBool32x4(c0, c1, c2, c3);
-}
-
-
-function Int16x8Constructor(c0, c1, c2, c3, c4, c5, c6, c7) {
- if (!IS_UNDEFINED(new.target)) {
- throw %make_type_error(kNotConstructor, "Int16x8");
- }
- return %CreateInt16x8(TO_NUMBER(c0), TO_NUMBER(c1),
- TO_NUMBER(c2), TO_NUMBER(c3),
- TO_NUMBER(c4), TO_NUMBER(c5),
- TO_NUMBER(c6), TO_NUMBER(c7));
-}
-
-
-function Uint16x8Constructor(c0, c1, c2, c3, c4, c5, c6, c7) {
- if (!IS_UNDEFINED(new.target)) {
- throw %make_type_error(kNotConstructor, "Uint16x8");
- }
- return %CreateUint16x8(TO_NUMBER(c0), TO_NUMBER(c1),
- TO_NUMBER(c2), TO_NUMBER(c3),
- TO_NUMBER(c4), TO_NUMBER(c5),
- TO_NUMBER(c6), TO_NUMBER(c7));
-}
-
-
-function Bool16x8Constructor(c0, c1, c2, c3, c4, c5, c6, c7) {
- if (!IS_UNDEFINED(new.target)) {
- throw %make_type_error(kNotConstructor, "Bool16x8");
- }
- return %CreateBool16x8(c0, c1, c2, c3, c4, c5, c6, c7);
-}
-
-
-function Int8x16Constructor(c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11,
- c12, c13, c14, c15) {
- if (!IS_UNDEFINED(new.target)) {
- throw %make_type_error(kNotConstructor, "Int8x16");
- }
- return %CreateInt8x16(TO_NUMBER(c0), TO_NUMBER(c1),
- TO_NUMBER(c2), TO_NUMBER(c3),
- TO_NUMBER(c4), TO_NUMBER(c5),
- TO_NUMBER(c6), TO_NUMBER(c7),
- TO_NUMBER(c8), TO_NUMBER(c9),
- TO_NUMBER(c10), TO_NUMBER(c11),
- TO_NUMBER(c12), TO_NUMBER(c13),
- TO_NUMBER(c14), TO_NUMBER(c15));
-}
-
-
-function Uint8x16Constructor(c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11,
- c12, c13, c14, c15) {
- if (!IS_UNDEFINED(new.target)) {
- throw %make_type_error(kNotConstructor, "Uint8x16");
- }
- return %CreateUint8x16(TO_NUMBER(c0), TO_NUMBER(c1),
- TO_NUMBER(c2), TO_NUMBER(c3),
- TO_NUMBER(c4), TO_NUMBER(c5),
- TO_NUMBER(c6), TO_NUMBER(c7),
- TO_NUMBER(c8), TO_NUMBER(c9),
- TO_NUMBER(c10), TO_NUMBER(c11),
- TO_NUMBER(c12), TO_NUMBER(c13),
- TO_NUMBER(c14), TO_NUMBER(c15));
-}
-
-
-function Bool8x16Constructor(c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11,
- c12, c13, c14, c15) {
- if (!IS_UNDEFINED(new.target)) {
- throw %make_type_error(kNotConstructor, "Bool8x16");
- }
- return %CreateBool8x16(c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12,
- c13, c14, c15);
-}
-
-
-function Float32x4AbsJS(a) {
- return %Float32x4Abs(a);
-}
-
-
-function Float32x4SqrtJS(a) {
- return %Float32x4Sqrt(a);
-}
-
-
-function Float32x4RecipApproxJS(a) {
- return %Float32x4RecipApprox(a);
-}
-
-
-function Float32x4RecipSqrtApproxJS(a) {
- return %Float32x4RecipSqrtApprox(a);
-}
-
-
-function Float32x4DivJS(a, b) {
- return %Float32x4Div(a, b);
-}
-
-
-function Float32x4MinNumJS(a, b) {
- return %Float32x4MinNum(a, b);
-}
-
-
-function Float32x4MaxNumJS(a, b) {
- return %Float32x4MaxNum(a, b);
-}
-
-
-%AddNamedProperty(GlobalSIMD, toStringTagSymbol, 'SIMD', READ_ONLY | DONT_ENUM);
-
-macro SETUP_SIMD_TYPE(NAME, TYPE, LANES)
-%SetCode(GlobalNAME, NAMEConstructor);
-%FunctionSetPrototype(GlobalNAME, {});
-%AddNamedProperty(GlobalNAME.prototype, 'constructor', GlobalNAME,
- DONT_ENUM);
-%AddNamedProperty(GlobalNAME.prototype, toStringTagSymbol, 'NAME',
- DONT_ENUM | READ_ONLY);
-utils.InstallFunctions(GlobalNAME.prototype, DONT_ENUM, [
- 'toLocaleString', NAMEToLocaleString,
- 'toString', NAMEToString,
- 'valueOf', NAMEValueOf,
-]);
-endmacro
-
-SIMD_ALL_TYPES(SETUP_SIMD_TYPE)
-
-//-------------------------------------------------------------------
-
-utils.InstallFunctions(GlobalFloat32x4, DONT_ENUM, [
- 'splat', Float32x4Splat,
- 'check', Float32x4CheckJS,
- 'extractLane', Float32x4ExtractLaneJS,
- 'replaceLane', Float32x4ReplaceLaneJS,
- 'neg', Float32x4NegJS,
- 'abs', Float32x4AbsJS,
- 'sqrt', Float32x4SqrtJS,
- 'reciprocalApproximation', Float32x4RecipApproxJS,
- 'reciprocalSqrtApproximation', Float32x4RecipSqrtApproxJS,
- 'add', Float32x4AddJS,
- 'sub', Float32x4SubJS,
- 'mul', Float32x4MulJS,
- 'div', Float32x4DivJS,
- 'min', Float32x4MinJS,
- 'max', Float32x4MaxJS,
- 'minNum', Float32x4MinNumJS,
- 'maxNum', Float32x4MaxNumJS,
- 'lessThan', Float32x4LessThanJS,
- 'lessThanOrEqual', Float32x4LessThanOrEqualJS,
- 'greaterThan', Float32x4GreaterThanJS,
- 'greaterThanOrEqual', Float32x4GreaterThanOrEqualJS,
- 'equal', Float32x4EqualJS,
- 'notEqual', Float32x4NotEqualJS,
- 'select', Float32x4SelectJS,
- 'swizzle', Float32x4SwizzleJS,
- 'shuffle', Float32x4ShuffleJS,
- 'fromInt32x4', Float32x4FromInt32x4JS,
- 'fromUint32x4', Float32x4FromUint32x4JS,
- 'fromInt32x4Bits', Float32x4FromInt32x4BitsJS,
- 'fromUint32x4Bits', Float32x4FromUint32x4BitsJS,
- 'fromInt16x8Bits', Float32x4FromInt16x8BitsJS,
- 'fromUint16x8Bits', Float32x4FromUint16x8BitsJS,
- 'fromInt8x16Bits', Float32x4FromInt8x16BitsJS,
- 'fromUint8x16Bits', Float32x4FromUint8x16BitsJS,
- 'load', Float32x4LoadJS,
- 'load1', Float32x4Load1JS,
- 'load2', Float32x4Load2JS,
- 'load3', Float32x4Load3JS,
- 'store', Float32x4StoreJS,
- 'store1', Float32x4Store1JS,
- 'store2', Float32x4Store2JS,
- 'store3', Float32x4Store3JS,
-]);
-
-utils.InstallFunctions(GlobalInt32x4, DONT_ENUM, [
- 'splat', Int32x4Splat,
- 'check', Int32x4CheckJS,
- 'extractLane', Int32x4ExtractLaneJS,
- 'replaceLane', Int32x4ReplaceLaneJS,
- 'neg', Int32x4NegJS,
- 'add', Int32x4AddJS,
- 'sub', Int32x4SubJS,
- 'mul', Int32x4MulJS,
- 'min', Int32x4MinJS,
- 'max', Int32x4MaxJS,
- 'and', Int32x4AndJS,
- 'or', Int32x4OrJS,
- 'xor', Int32x4XorJS,
- 'not', Int32x4NotJS,
- 'shiftLeftByScalar', Int32x4ShiftLeftByScalarJS,
- 'shiftRightByScalar', Int32x4ShiftRightByScalarJS,
- 'lessThan', Int32x4LessThanJS,
- 'lessThanOrEqual', Int32x4LessThanOrEqualJS,
- 'greaterThan', Int32x4GreaterThanJS,
- 'greaterThanOrEqual', Int32x4GreaterThanOrEqualJS,
- 'equal', Int32x4EqualJS,
- 'notEqual', Int32x4NotEqualJS,
- 'select', Int32x4SelectJS,
- 'swizzle', Int32x4SwizzleJS,
- 'shuffle', Int32x4ShuffleJS,
- 'fromFloat32x4', Int32x4FromFloat32x4JS,
- 'fromUint32x4', Int32x4FromUint32x4JS,
- 'fromFloat32x4Bits', Int32x4FromFloat32x4BitsJS,
- 'fromUint32x4Bits', Int32x4FromUint32x4BitsJS,
- 'fromInt16x8Bits', Int32x4FromInt16x8BitsJS,
- 'fromUint16x8Bits', Int32x4FromUint16x8BitsJS,
- 'fromInt8x16Bits', Int32x4FromInt8x16BitsJS,
- 'fromUint8x16Bits', Int32x4FromUint8x16BitsJS,
- 'load', Int32x4LoadJS,
- 'load1', Int32x4Load1JS,
- 'load2', Int32x4Load2JS,
- 'load3', Int32x4Load3JS,
- 'store', Int32x4StoreJS,
- 'store1', Int32x4Store1JS,
- 'store2', Int32x4Store2JS,
- 'store3', Int32x4Store3JS,
-]);
-
-utils.InstallFunctions(GlobalUint32x4, DONT_ENUM, [
- 'splat', Uint32x4Splat,
- 'check', Uint32x4CheckJS,
- 'extractLane', Uint32x4ExtractLaneJS,
- 'replaceLane', Uint32x4ReplaceLaneJS,
- 'add', Uint32x4AddJS,
- 'sub', Uint32x4SubJS,
- 'mul', Uint32x4MulJS,
- 'min', Uint32x4MinJS,
- 'max', Uint32x4MaxJS,
- 'and', Uint32x4AndJS,
- 'or', Uint32x4OrJS,
- 'xor', Uint32x4XorJS,
- 'not', Uint32x4NotJS,
- 'shiftLeftByScalar', Uint32x4ShiftLeftByScalarJS,
- 'shiftRightByScalar', Uint32x4ShiftRightByScalarJS,
- 'lessThan', Uint32x4LessThanJS,
- 'lessThanOrEqual', Uint32x4LessThanOrEqualJS,
- 'greaterThan', Uint32x4GreaterThanJS,
- 'greaterThanOrEqual', Uint32x4GreaterThanOrEqualJS,
- 'equal', Uint32x4EqualJS,
- 'notEqual', Uint32x4NotEqualJS,
- 'select', Uint32x4SelectJS,
- 'swizzle', Uint32x4SwizzleJS,
- 'shuffle', Uint32x4ShuffleJS,
- 'fromFloat32x4', Uint32x4FromFloat32x4JS,
- 'fromInt32x4', Uint32x4FromInt32x4JS,
- 'fromFloat32x4Bits', Uint32x4FromFloat32x4BitsJS,
- 'fromInt32x4Bits', Uint32x4FromInt32x4BitsJS,
- 'fromInt16x8Bits', Uint32x4FromInt16x8BitsJS,
- 'fromUint16x8Bits', Uint32x4FromUint16x8BitsJS,
- 'fromInt8x16Bits', Uint32x4FromInt8x16BitsJS,
- 'fromUint8x16Bits', Uint32x4FromUint8x16BitsJS,
- 'load', Uint32x4LoadJS,
- 'load1', Uint32x4Load1JS,
- 'load2', Uint32x4Load2JS,
- 'load3', Uint32x4Load3JS,
- 'store', Uint32x4StoreJS,
- 'store1', Uint32x4Store1JS,
- 'store2', Uint32x4Store2JS,
- 'store3', Uint32x4Store3JS,
-]);
-
-utils.InstallFunctions(GlobalBool32x4, DONT_ENUM, [
- 'splat', Bool32x4Splat,
- 'check', Bool32x4CheckJS,
- 'extractLane', Bool32x4ExtractLaneJS,
- 'replaceLane', Bool32x4ReplaceLaneJS,
- 'and', Bool32x4AndJS,
- 'or', Bool32x4OrJS,
- 'xor', Bool32x4XorJS,
- 'not', Bool32x4NotJS,
- 'anyTrue', Bool32x4AnyTrueJS,
- 'allTrue', Bool32x4AllTrueJS,
- 'swizzle', Bool32x4SwizzleJS,
- 'shuffle', Bool32x4ShuffleJS,
-]);
-
-utils.InstallFunctions(GlobalInt16x8, DONT_ENUM, [
- 'splat', Int16x8Splat,
- 'check', Int16x8CheckJS,
- 'extractLane', Int16x8ExtractLaneJS,
- 'replaceLane', Int16x8ReplaceLaneJS,
- 'neg', Int16x8NegJS,
- 'add', Int16x8AddJS,
- 'sub', Int16x8SubJS,
- 'addSaturate', Int16x8AddSaturateJS,
- 'subSaturate', Int16x8SubSaturateJS,
- 'mul', Int16x8MulJS,
- 'min', Int16x8MinJS,
- 'max', Int16x8MaxJS,
- 'and', Int16x8AndJS,
- 'or', Int16x8OrJS,
- 'xor', Int16x8XorJS,
- 'not', Int16x8NotJS,
- 'shiftLeftByScalar', Int16x8ShiftLeftByScalarJS,
- 'shiftRightByScalar', Int16x8ShiftRightByScalarJS,
- 'lessThan', Int16x8LessThanJS,
- 'lessThanOrEqual', Int16x8LessThanOrEqualJS,
- 'greaterThan', Int16x8GreaterThanJS,
- 'greaterThanOrEqual', Int16x8GreaterThanOrEqualJS,
- 'equal', Int16x8EqualJS,
- 'notEqual', Int16x8NotEqualJS,
- 'select', Int16x8SelectJS,
- 'swizzle', Int16x8SwizzleJS,
- 'shuffle', Int16x8ShuffleJS,
- 'fromUint16x8', Int16x8FromUint16x8JS,
- 'fromFloat32x4Bits', Int16x8FromFloat32x4BitsJS,
- 'fromInt32x4Bits', Int16x8FromInt32x4BitsJS,
- 'fromUint32x4Bits', Int16x8FromUint32x4BitsJS,
- 'fromUint16x8Bits', Int16x8FromUint16x8BitsJS,
- 'fromInt8x16Bits', Int16x8FromInt8x16BitsJS,
- 'fromUint8x16Bits', Int16x8FromUint8x16BitsJS,
- 'load', Int16x8LoadJS,
- 'store', Int16x8StoreJS,
-]);
-
-utils.InstallFunctions(GlobalUint16x8, DONT_ENUM, [
- 'splat', Uint16x8Splat,
- 'check', Uint16x8CheckJS,
- 'extractLane', Uint16x8ExtractLaneJS,
- 'replaceLane', Uint16x8ReplaceLaneJS,
- 'add', Uint16x8AddJS,
- 'sub', Uint16x8SubJS,
- 'addSaturate', Uint16x8AddSaturateJS,
- 'subSaturate', Uint16x8SubSaturateJS,
- 'mul', Uint16x8MulJS,
- 'min', Uint16x8MinJS,
- 'max', Uint16x8MaxJS,
- 'and', Uint16x8AndJS,
- 'or', Uint16x8OrJS,
- 'xor', Uint16x8XorJS,
- 'not', Uint16x8NotJS,
- 'shiftLeftByScalar', Uint16x8ShiftLeftByScalarJS,
- 'shiftRightByScalar', Uint16x8ShiftRightByScalarJS,
- 'lessThan', Uint16x8LessThanJS,
- 'lessThanOrEqual', Uint16x8LessThanOrEqualJS,
- 'greaterThan', Uint16x8GreaterThanJS,
- 'greaterThanOrEqual', Uint16x8GreaterThanOrEqualJS,
- 'equal', Uint16x8EqualJS,
- 'notEqual', Uint16x8NotEqualJS,
- 'select', Uint16x8SelectJS,
- 'swizzle', Uint16x8SwizzleJS,
- 'shuffle', Uint16x8ShuffleJS,
- 'fromInt16x8', Uint16x8FromInt16x8JS,
- 'fromFloat32x4Bits', Uint16x8FromFloat32x4BitsJS,
- 'fromInt32x4Bits', Uint16x8FromInt32x4BitsJS,
- 'fromUint32x4Bits', Uint16x8FromUint32x4BitsJS,
- 'fromInt16x8Bits', Uint16x8FromInt16x8BitsJS,
- 'fromInt8x16Bits', Uint16x8FromInt8x16BitsJS,
- 'fromUint8x16Bits', Uint16x8FromUint8x16BitsJS,
- 'load', Uint16x8LoadJS,
- 'store', Uint16x8StoreJS,
-]);
-
-utils.InstallFunctions(GlobalBool16x8, DONT_ENUM, [
- 'splat', Bool16x8Splat,
- 'check', Bool16x8CheckJS,
- 'extractLane', Bool16x8ExtractLaneJS,
- 'replaceLane', Bool16x8ReplaceLaneJS,
- 'and', Bool16x8AndJS,
- 'or', Bool16x8OrJS,
- 'xor', Bool16x8XorJS,
- 'not', Bool16x8NotJS,
- 'anyTrue', Bool16x8AnyTrueJS,
- 'allTrue', Bool16x8AllTrueJS,
- 'swizzle', Bool16x8SwizzleJS,
- 'shuffle', Bool16x8ShuffleJS,
-]);
-
-utils.InstallFunctions(GlobalInt8x16, DONT_ENUM, [
- 'splat', Int8x16Splat,
- 'check', Int8x16CheckJS,
- 'extractLane', Int8x16ExtractLaneJS,
- 'replaceLane', Int8x16ReplaceLaneJS,
- 'neg', Int8x16NegJS,
- 'add', Int8x16AddJS,
- 'sub', Int8x16SubJS,
- 'addSaturate', Int8x16AddSaturateJS,
- 'subSaturate', Int8x16SubSaturateJS,
- 'mul', Int8x16MulJS,
- 'min', Int8x16MinJS,
- 'max', Int8x16MaxJS,
- 'and', Int8x16AndJS,
- 'or', Int8x16OrJS,
- 'xor', Int8x16XorJS,
- 'not', Int8x16NotJS,
- 'shiftLeftByScalar', Int8x16ShiftLeftByScalarJS,
- 'shiftRightByScalar', Int8x16ShiftRightByScalarJS,
- 'lessThan', Int8x16LessThanJS,
- 'lessThanOrEqual', Int8x16LessThanOrEqualJS,
- 'greaterThan', Int8x16GreaterThanJS,
- 'greaterThanOrEqual', Int8x16GreaterThanOrEqualJS,
- 'equal', Int8x16EqualJS,
- 'notEqual', Int8x16NotEqualJS,
- 'select', Int8x16SelectJS,
- 'swizzle', Int8x16SwizzleJS,
- 'shuffle', Int8x16ShuffleJS,
- 'fromUint8x16', Int8x16FromUint8x16JS,
- 'fromFloat32x4Bits', Int8x16FromFloat32x4BitsJS,
- 'fromInt32x4Bits', Int8x16FromInt32x4BitsJS,
- 'fromUint32x4Bits', Int8x16FromUint32x4BitsJS,
- 'fromInt16x8Bits', Int8x16FromInt16x8BitsJS,
- 'fromUint16x8Bits', Int8x16FromUint16x8BitsJS,
- 'fromUint8x16Bits', Int8x16FromUint8x16BitsJS,
- 'load', Int8x16LoadJS,
- 'store', Int8x16StoreJS,
-]);
-
-utils.InstallFunctions(GlobalUint8x16, DONT_ENUM, [
- 'splat', Uint8x16Splat,
- 'check', Uint8x16CheckJS,
- 'extractLane', Uint8x16ExtractLaneJS,
- 'replaceLane', Uint8x16ReplaceLaneJS,
- 'add', Uint8x16AddJS,
- 'sub', Uint8x16SubJS,
- 'addSaturate', Uint8x16AddSaturateJS,
- 'subSaturate', Uint8x16SubSaturateJS,
- 'mul', Uint8x16MulJS,
- 'min', Uint8x16MinJS,
- 'max', Uint8x16MaxJS,
- 'and', Uint8x16AndJS,
- 'or', Uint8x16OrJS,
- 'xor', Uint8x16XorJS,
- 'not', Uint8x16NotJS,
- 'shiftLeftByScalar', Uint8x16ShiftLeftByScalarJS,
- 'shiftRightByScalar', Uint8x16ShiftRightByScalarJS,
- 'lessThan', Uint8x16LessThanJS,
- 'lessThanOrEqual', Uint8x16LessThanOrEqualJS,
- 'greaterThan', Uint8x16GreaterThanJS,
- 'greaterThanOrEqual', Uint8x16GreaterThanOrEqualJS,
- 'equal', Uint8x16EqualJS,
- 'notEqual', Uint8x16NotEqualJS,
- 'select', Uint8x16SelectJS,
- 'swizzle', Uint8x16SwizzleJS,
- 'shuffle', Uint8x16ShuffleJS,
- 'fromInt8x16', Uint8x16FromInt8x16JS,
- 'fromFloat32x4Bits', Uint8x16FromFloat32x4BitsJS,
- 'fromInt32x4Bits', Uint8x16FromInt32x4BitsJS,
- 'fromUint32x4Bits', Uint8x16FromUint32x4BitsJS,
- 'fromInt16x8Bits', Uint8x16FromInt16x8BitsJS,
- 'fromUint16x8Bits', Uint8x16FromUint16x8BitsJS,
- 'fromInt8x16Bits', Uint8x16FromInt8x16BitsJS,
- 'load', Uint8x16LoadJS,
- 'store', Uint8x16StoreJS,
-]);
-
-utils.InstallFunctions(GlobalBool8x16, DONT_ENUM, [
- 'splat', Bool8x16Splat,
- 'check', Bool8x16CheckJS,
- 'extractLane', Bool8x16ExtractLaneJS,
- 'replaceLane', Bool8x16ReplaceLaneJS,
- 'and', Bool8x16AndJS,
- 'or', Bool8x16OrJS,
- 'xor', Bool8x16XorJS,
- 'not', Bool8x16NotJS,
- 'anyTrue', Bool8x16AnyTrueJS,
- 'allTrue', Bool8x16AllTrueJS,
- 'swizzle', Bool8x16SwizzleJS,
- 'shuffle', Bool8x16ShuffleJS,
-]);
-
-})
diff --git a/deps/v8/src/js/i18n.js b/deps/v8/src/js/i18n.js
index 50ed5bcb89..e6b8ba5709 100644
--- a/deps/v8/src/js/i18n.js
+++ b/deps/v8/src/js/i18n.js
@@ -153,6 +153,13 @@ var DEFAULT_ICU_LOCALE = UNDEFINED;
function GetDefaultICULocaleJS() {
if (IS_UNDEFINED(DEFAULT_ICU_LOCALE)) {
DEFAULT_ICU_LOCALE = %GetDefaultICULocale();
+ // Check that this is a valid default, otherwise fall back to "und"
+ for (let service in AVAILABLE_LOCALES) {
+ if (IS_UNDEFINED(getAvailableLocalesOf(service)[DEFAULT_ICU_LOCALE])) {
+ DEFAULT_ICU_LOCALE = "und";
+ break;
+ }
+ }
}
return DEFAULT_ICU_LOCALE;
}
@@ -298,19 +305,16 @@ function supportedLocalesOf(service, locales, options) {
var requestedLocales = initializeLocaleList(locales);
- // Cache these, they don't ever change per service.
- if (IS_UNDEFINED(AVAILABLE_LOCALES[service])) {
- AVAILABLE_LOCALES[service] = getAvailableLocalesOf(service);
- }
+ var availableLocales = getAvailableLocalesOf(service);
// Use either best fit or lookup algorithm to match locales.
if (matcher === 'best fit') {
return initializeLocaleList(bestFitSupportedLocalesOf(
- requestedLocales, AVAILABLE_LOCALES[service]));
+ requestedLocales, availableLocales));
}
return initializeLocaleList(lookupSupportedLocalesOf(
- requestedLocales, AVAILABLE_LOCALES[service]));
+ requestedLocales, availableLocales));
}
@@ -437,17 +441,14 @@ function lookupMatcher(service, requestedLocales) {
throw %make_error(kWrongServiceType, service);
}
- // Cache these, they don't ever change per service.
- if (IS_UNDEFINED(AVAILABLE_LOCALES[service])) {
- AVAILABLE_LOCALES[service] = getAvailableLocalesOf(service);
- }
+ var availableLocales = getAvailableLocalesOf(service);
for (var i = 0; i < requestedLocales.length; ++i) {
// Remove all extensions.
var locale = %RegExpInternalReplace(
GetAnyExtensionRE(), requestedLocales[i], '');
do {
- if (!IS_UNDEFINED(AVAILABLE_LOCALES[service][locale])) {
+ if (!IS_UNDEFINED(availableLocales[locale])) {
// Return the resolved locale and extension.
var extensionMatch = %regexp_internal_match(
GetUnicodeExtensionRE(), requestedLocales[i]);
@@ -658,6 +659,11 @@ function getOptimalLanguageTag(original, resolved) {
* that is supported. This is required by the spec.
*/
function getAvailableLocalesOf(service) {
+ // Cache these, they don't ever change per service.
+ if (!IS_UNDEFINED(AVAILABLE_LOCALES[service])) {
+ return AVAILABLE_LOCALES[service];
+ }
+
var available = %AvailableLocalesOf(service);
for (var i in available) {
@@ -672,6 +678,8 @@ function getAvailableLocalesOf(service) {
}
}
+ AVAILABLE_LOCALES[service] = available;
+
return available;
}
@@ -723,8 +731,8 @@ function addWECPropertyIfDefined(object, property, value) {
* Returns titlecased word, aMeRricA -> America.
*/
function toTitleCaseWord(word) {
- return %StringToUpperCase(%_Call(StringSubstr, word, 0, 1)) +
- %StringToLowerCase(%_Call(StringSubstr, word, 1));
+ return %StringToUpperCaseI18N(%_Call(StringSubstr, word, 0, 1)) +
+ %StringToLowerCaseI18N(%_Call(StringSubstr, word, 1));
}
/**
@@ -745,7 +753,7 @@ function toTitleCaseTimezoneLocation(location) {
var parts = %StringSplit(match[2], separator, kMaxUint32);
for (var i = 1; i < parts.length; i++) {
var part = parts[i]
- var lowercasedPart = %StringToLowerCase(part);
+ var lowercasedPart = %StringToLowerCaseI18N(part);
result = result + separator +
((lowercasedPart !== 'es' &&
lowercasedPart !== 'of' && lowercasedPart !== 'au') ?
@@ -851,6 +859,8 @@ function isStructuallyValidLanguageTag(locale) {
return false;
}
+ locale = %StringToLowerCaseI18N(locale);
+
// Just return if it's a x- form. It's all private.
if (%StringIndexOf(locale, 'x-', 0) === 0) {
return true;
@@ -1177,7 +1187,7 @@ function CreateNumberFormat(locales, options) {
var currencyDisplay = getOption(
'currencyDisplay', 'string', ['code', 'symbol', 'name'], 'symbol');
if (internalOptions.style === 'currency') {
- defineWEProperty(internalOptions, 'currency', %StringToUpperCase(currency));
+ defineWEProperty(internalOptions, 'currency', %StringToUpperCaseI18N(currency));
defineWEProperty(internalOptions, 'currencyDisplay', currencyDisplay);
}
@@ -1203,7 +1213,7 @@ function CreateNumberFormat(locales, options) {
var mnsd = options['minimumSignificantDigits'];
var mxsd = options['maximumSignificantDigits'];
if (!IS_UNDEFINED(mnsd) || !IS_UNDEFINED(mxsd)) {
- mnsd = getNumberOption(options, 'minimumSignificantDigits', 1, 21, 0);
+ mnsd = getNumberOption(options, 'minimumSignificantDigits', 1, 21, 1);
defineWEProperty(internalOptions, 'minimumSignificantDigits', mnsd);
mxsd = getNumberOption(options, 'maximumSignificantDigits', mnsd, 21, 21);
@@ -1766,7 +1776,7 @@ function canonicalizeTimeZoneID(tzID) {
tzID = TO_STRING(tzID);
// Special case handling (UTC, GMT).
- var upperID = %StringToUpperCase(tzID);
+ var upperID = %StringToUpperCaseI18N(tzID);
if (upperID === 'UTC' || upperID === 'GMT' ||
upperID === 'ETC/UTC' || upperID === 'ETC/GMT') {
return 'UTC';
@@ -2051,6 +2061,7 @@ OverrideFunction(GlobalString.prototype, 'normalize', function() {
}
);
+// TODO(littledan): Rewrite these two functions as C++ builtins
function ToLowerCaseI18N() {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.toLowerCase");
return %StringToLowerCaseI18N(TO_STRING(this));
@@ -2075,18 +2086,6 @@ function ToLocaleUpperCaseI18N(locales) {
%FunctionSetLength(ToLocaleUpperCaseI18N, 0);
-%FunctionRemovePrototype(ToLowerCaseI18N);
-%FunctionRemovePrototype(ToUpperCaseI18N);
-%FunctionRemovePrototype(ToLocaleLowerCaseI18N);
-%FunctionRemovePrototype(ToLocaleUpperCaseI18N);
-
-utils.Export(function(to) {
- to.ToLowerCaseI18N = ToLowerCaseI18N;
- to.ToUpperCaseI18N = ToUpperCaseI18N;
- to.ToLocaleLowerCaseI18N = ToLocaleLowerCaseI18N;
- to.ToLocaleUpperCaseI18N = ToLocaleUpperCaseI18N;
-});
-
/**
* Formats a Number object (this) using locale and options values.
@@ -2167,9 +2166,23 @@ OverrideFunction(GlobalDate.prototype, 'toLocaleTimeString', function() {
);
%FunctionRemovePrototype(FormatDateToParts);
+%FunctionRemovePrototype(ToLowerCaseI18N);
+%FunctionRemovePrototype(ToUpperCaseI18N);
+%FunctionRemovePrototype(ToLocaleLowerCaseI18N);
+%FunctionRemovePrototype(ToLocaleUpperCaseI18N);
+
+utils.SetFunctionName(FormatDateToParts, "formatToParts");
+utils.SetFunctionName(ToLowerCaseI18N, "toLowerCase");
+utils.SetFunctionName(ToUpperCaseI18N, "toUpperCase");
+utils.SetFunctionName(ToLocaleLowerCaseI18N, "toLocaleLowerCase");
+utils.SetFunctionName(ToLocaleUpperCaseI18N, "toLocaleUpperCase");
utils.Export(function(to) {
to.FormatDateToParts = FormatDateToParts;
+ to.ToLowerCaseI18N = ToLowerCaseI18N;
+ to.ToUpperCaseI18N = ToUpperCaseI18N;
+ to.ToLocaleLowerCaseI18N = ToLocaleLowerCaseI18N;
+ to.ToLocaleUpperCaseI18N = ToLocaleUpperCaseI18N;
});
})
diff --git a/deps/v8/src/js/icu-case-mapping.js b/deps/v8/src/js/icu-case-mapping.js
deleted file mode 100644
index 9806249d71..0000000000
--- a/deps/v8/src/js/icu-case-mapping.js
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-"use strict";
-
-%CheckIsBootstrapping();
-
-var GlobalString = global.String;
-var OverrideFunction = utils.OverrideFunction;
-var ToLowerCaseI18N = utils.ImportNow("ToLowerCaseI18N");
-var ToUpperCaseI18N = utils.ImportNow("ToUpperCaseI18N");
-var ToLocaleLowerCaseI18N = utils.ImportNow("ToLocaleLowerCaseI18N");
-var ToLocaleUpperCaseI18N = utils.ImportNow("ToLocaleUpperCaseI18N");
-
-OverrideFunction(GlobalString.prototype, 'toLowerCase', ToLowerCaseI18N, true);
-OverrideFunction(GlobalString.prototype, 'toUpperCase', ToUpperCaseI18N, true);
-OverrideFunction(GlobalString.prototype, 'toLocaleLowerCase',
- ToLocaleLowerCaseI18N, true);
-OverrideFunction(GlobalString.prototype, 'toLocaleUpperCase',
- ToLocaleUpperCaseI18N, true);
-
-})
diff --git a/deps/v8/src/js/macros.py b/deps/v8/src/js/macros.py
index 955c89fd07..f10da42c65 100644
--- a/deps/v8/src/js/macros.py
+++ b/deps/v8/src/js/macros.py
@@ -64,7 +64,6 @@ macro IS_SCRIPT(arg) = (%_ClassOf(arg) === 'Script');
macro IS_SET(arg) = (%_ClassOf(arg) === 'Set');
macro IS_SET_ITERATOR(arg) = (%_ClassOf(arg) === 'Set Iterator');
macro IS_SHAREDARRAYBUFFER(arg) = (%_ClassOf(arg) === 'SharedArrayBuffer');
-macro IS_SIMD_VALUE(arg) = (%IsSimdValue(arg));
macro IS_STRING(arg) = (typeof(arg) === 'string');
macro IS_SYMBOL(arg) = (typeof(arg) === 'symbol');
macro IS_TYPEDARRAY(arg) = (%_IsTypedArray(arg));
@@ -140,11 +139,11 @@ macro FIXED_ARRAY_SET(array, index, value) = (%_FixedArraySet(array, (index) | 0
# TODO(adamk): Find a more robust way to force Smi representation.
macro FIXED_ARRAY_SET_SMI(array, index, value) = (FIXED_ARRAY_SET(array, index, (value) | 0));
-macro ORDERED_HASH_TABLE_BUCKET_COUNT(table) = (FIXED_ARRAY_GET(table, 0));
-macro ORDERED_HASH_TABLE_ELEMENT_COUNT(table) = (FIXED_ARRAY_GET(table, 1));
-macro ORDERED_HASH_TABLE_SET_ELEMENT_COUNT(table, count) = (FIXED_ARRAY_SET_SMI(table, 1, count));
-macro ORDERED_HASH_TABLE_DELETED_COUNT(table) = (FIXED_ARRAY_GET(table, 2));
-macro ORDERED_HASH_TABLE_SET_DELETED_COUNT(table, count) = (FIXED_ARRAY_SET_SMI(table, 2, count));
+macro ORDERED_HASH_TABLE_BUCKET_COUNT(table) = (FIXED_ARRAY_GET(table, 2));
+macro ORDERED_HASH_TABLE_ELEMENT_COUNT(table) = (FIXED_ARRAY_GET(table, 0));
+macro ORDERED_HASH_TABLE_SET_ELEMENT_COUNT(table, count) = (FIXED_ARRAY_SET_SMI(table, 0, count));
+macro ORDERED_HASH_TABLE_DELETED_COUNT(table) = (FIXED_ARRAY_GET(table, 1));
+macro ORDERED_HASH_TABLE_SET_DELETED_COUNT(table, count) = (FIXED_ARRAY_SET_SMI(table, 1, count));
macro ORDERED_HASH_TABLE_BUCKET_AT(table, bucket) = (FIXED_ARRAY_GET(table, 3 + (bucket)));
macro ORDERED_HASH_TABLE_SET_BUCKET_AT(table, bucket, entry) = (FIXED_ARRAY_SET(table, 3 + (bucket), entry));
diff --git a/deps/v8/src/js/prologue.js b/deps/v8/src/js/prologue.js
index e51ab558b6..4b40d9a0fd 100644
--- a/deps/v8/src/js/prologue.js
+++ b/deps/v8/src/js/prologue.js
@@ -169,11 +169,9 @@ function PostNatives(utils) {
var expose_list = [
"FormatDateToParts",
"MapEntries",
- "MapIterator",
"MapIteratorNext",
"MaxSimple",
"MinSimple",
- "SetIterator",
"SetIteratorNext",
"SetValues",
"ToLocaleLowerCaseI18N",
@@ -203,7 +201,6 @@ function PostNatives(utils) {
function PostExperimentals(utils) {
%CheckIsBootstrapping();
- %ExportExperimentalFromRuntime(exports_container);
for ( ; !IS_UNDEFINED(imports); imports = imports.next) {
imports(exports_container);
}
diff --git a/deps/v8/src/js/promise.js b/deps/v8/src/js/promise.js
index 95ab793591..27571daabb 100644
--- a/deps/v8/src/js/promise.js
+++ b/deps/v8/src/js/promise.js
@@ -19,14 +19,6 @@ var promiseForwardingHandlerSymbol =
var GlobalPromise = global.Promise;
// -------------------------------------------------------------------
-
-// Core functionality.
-
-function PromiseIdResolveHandler(x) { return x; }
-function PromiseIdRejectHandler(r) { %_ReThrow(r); }
-SET_PRIVATE(PromiseIdRejectHandler, promiseForwardingHandlerSymbol, true);
-
-// -------------------------------------------------------------------
// Define exported functions.
// Combinators.
@@ -137,9 +129,4 @@ utils.InstallFunctions(GlobalPromise, DONT_ENUM, [
"race", PromiseRace,
]);
-%InstallToContext([
- "promise_id_resolve_handler", PromiseIdResolveHandler,
- "promise_id_reject_handler", PromiseIdRejectHandler
-]);
-
})
diff --git a/deps/v8/src/js/string.js b/deps/v8/src/js/string.js
index c0587350cd..5992f80201 100644
--- a/deps/v8/src/js/string.js
+++ b/deps/v8/src/js/string.js
@@ -9,21 +9,9 @@
// -------------------------------------------------------------------
// Imports
-var ArrayJoin;
-var GlobalRegExp = global.RegExp;
var GlobalString = global.String;
-var MaxSimple;
-var MinSimple;
var matchSymbol = utils.ImportNow("match_symbol");
-var replaceSymbol = utils.ImportNow("replace_symbol");
var searchSymbol = utils.ImportNow("search_symbol");
-var splitSymbol = utils.ImportNow("split_symbol");
-
-utils.Import(function(from) {
- ArrayJoin = from.ArrayJoin;
- MaxSimple = from.MaxSimple;
- MinSimple = from.MinSimple;
-});
//-------------------------------------------------------------------
@@ -58,154 +46,6 @@ function StringMatchJS(pattern) {
return regexp[matchSymbol](subject);
}
-// ES#sec-getsubstitution
-// GetSubstitution(matched, str, position, captures, replacement)
-// Expand the $-expressions in the string and return a new string with
-// the result.
-function GetSubstitution(matched, string, position, captures, replacement) {
- var matchLength = matched.length;
- var stringLength = string.length;
- var capturesLength = captures.length;
- var tailPos = position + matchLength;
- var result = "";
- var pos, expansion, peek, next, scaledIndex, advance, newScaledIndex;
-
- var next = %StringIndexOf(replacement, '$', 0);
- if (next < 0) {
- result += replacement;
- return result;
- }
-
- if (next > 0) result += %_SubString(replacement, 0, next);
-
- while (true) {
- expansion = '$';
- pos = next + 1;
- if (pos < replacement.length) {
- peek = %_StringCharCodeAt(replacement, pos);
- if (peek == 36) { // $$
- ++pos;
- result += '$';
- } else if (peek == 38) { // $& - match
- ++pos;
- result += matched;
- } else if (peek == 96) { // $` - prefix
- ++pos;
- result += %_SubString(string, 0, position);
- } else if (peek == 39) { // $' - suffix
- ++pos;
- result += %_SubString(string, tailPos, stringLength);
- } else if (peek >= 48 && peek <= 57) {
- // Valid indices are $1 .. $9, $01 .. $09 and $10 .. $99
- scaledIndex = (peek - 48);
- advance = 1;
- if (pos + 1 < replacement.length) {
- next = %_StringCharCodeAt(replacement, pos + 1);
- if (next >= 48 && next <= 57) {
- newScaledIndex = scaledIndex * 10 + ((next - 48));
- if (newScaledIndex < capturesLength) {
- scaledIndex = newScaledIndex;
- advance = 2;
- }
- }
- }
- if (scaledIndex != 0 && scaledIndex < capturesLength) {
- var capture = captures.at(scaledIndex);
- if (!IS_UNDEFINED(capture)) result += capture;
- pos += advance;
- } else {
- result += '$';
- }
- } else {
- result += '$';
- }
- } else {
- result += '$';
- }
-
- // Go the the next $ in the replacement.
- next = %StringIndexOf(replacement, '$', pos);
-
- // Return if there are no more $ characters in the replacement. If we
- // haven't reached the end, we need to append the suffix.
- if (next < 0) {
- if (pos < replacement.length) {
- result += %_SubString(replacement, pos, replacement.length);
- }
- return result;
- }
-
- // Append substring between the previous and the next $ character.
- if (next > pos) {
- result += %_SubString(replacement, pos, next);
- }
- }
- return result;
-}
-
-// ES6, section 21.1.3.14
-function StringReplace(search, replace) {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.replace");
-
- // Decision tree for dispatch
- // .. regexp search (in src/js/regexp.js, RegExpReplace)
- // .... string replace
- // ...... non-global search
- // ........ empty string replace
- // ........ non-empty string replace (with $-expansion)
- // ...... global search
- // ........ no need to circumvent last match info override
- // ........ need to circument last match info override
- // .... function replace
- // ...... global search
- // ...... non-global search
- // .. string search
- // .... special case that replaces with one single character
- // ...... function replace
- // ...... string replace (with $-expansion)
-
- if (!IS_NULL_OR_UNDEFINED(search)) {
- var replacer = search[replaceSymbol];
- if (!IS_UNDEFINED(replacer)) {
- return %_Call(replacer, search, this, replace);
- }
- }
-
- var subject = TO_STRING(this);
-
- search = TO_STRING(search);
-
- if (search.length == 1 &&
- subject.length > 0xFF &&
- IS_STRING(replace) &&
- %StringIndexOf(replace, '$', 0) < 0) {
- // Searching by traversing a cons string tree and replace with cons of
- // slices works only when the replaced string is a single character, being
- // replaced by a simple string and only pays off for long strings.
- return %StringReplaceOneCharWithString(subject, search, replace);
- }
- var start = %StringIndexOf(subject, search, 0);
- if (start < 0) return subject;
- var end = start + search.length;
-
- var result = %_SubString(subject, 0, start);
-
- // Compute the string to replace with.
- if (IS_CALLABLE(replace)) {
- result += replace(search, start, subject);
- } else {
- // In this case, we don't have any capture groups and can get away with
- // faking the captures object by simply setting its length to 1.
- const captures = { length: 1 };
- const matched = %_SubString(subject, start, end);
- result += GetSubstitution(matched, subject, start, captures,
- TO_STRING(replace));
- }
-
- return result + %_SubString(subject, end, subject.length);
-}
-
-
// ES6 21.1.3.15.
function StringSearch(pattern) {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.search");
@@ -267,70 +107,6 @@ function StringSlice(start, end) {
}
-// ES6 21.1.3.17.
-function StringSplitJS(separator, limit) {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.split");
-
- if (!IS_NULL_OR_UNDEFINED(separator)) {
- var splitter = separator[splitSymbol];
- if (!IS_UNDEFINED(splitter)) {
- return %_Call(splitter, separator, this, limit);
- }
- }
-
- var subject = TO_STRING(this);
- limit = (IS_UNDEFINED(limit)) ? kMaxUint32 : TO_UINT32(limit);
-
- var length = subject.length;
- var separator_string = TO_STRING(separator);
-
- if (limit === 0) return [];
-
- // ECMA-262 says that if separator is undefined, the result should
- // be an array of size 1 containing the entire string.
- if (IS_UNDEFINED(separator)) return [subject];
-
- var separator_length = separator_string.length;
-
- // If the separator string is empty then return the elements in the subject.
- if (separator_length === 0) return %StringToArray(subject, limit);
-
- return %StringSplit(subject, separator_string, limit);
-}
-
-
-// ECMA-262, 15.5.4.16
-function StringToLowerCaseJS() {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.toLowerCase");
-
- return %StringToLowerCase(TO_STRING(this));
-}
-
-
-// ECMA-262, 15.5.4.17
-function StringToLocaleLowerCase() {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.toLocaleLowerCase");
-
- return %StringToLowerCase(TO_STRING(this));
-}
-
-
-// ECMA-262, 15.5.4.18
-function StringToUpperCaseJS() {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.toUpperCase");
-
- return %StringToUpperCase(TO_STRING(this));
-}
-
-
-// ECMA-262, 15.5.4.19
-function StringToLocaleUpperCase() {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.toLocaleUpperCase");
-
- return %StringToUpperCase(TO_STRING(this));
-}
-
-
// ES6 draft, revision 26 (2014-07-18), section B.2.3.2.1
function HtmlEscape(str) {
return %RegExpInternalReplace(/"/g, TO_STRING(str), "&quot;");
@@ -515,14 +291,8 @@ utils.InstallFunctions(GlobalString.prototype, DONT_ENUM, [
"concat", StringConcat,
"match", StringMatchJS,
"repeat", StringRepeat,
- "replace", StringReplace,
"search", StringSearch,
"slice", StringSlice,
- "split", StringSplitJS,
- "toLowerCase", StringToLowerCaseJS,
- "toLocaleLowerCase", StringToLocaleLowerCase,
- "toUpperCase", StringToUpperCaseJS,
- "toLocaleUpperCase", StringToLocaleUpperCase,
"link", StringLink,
"anchor", StringAnchor,
@@ -539,14 +309,4 @@ utils.InstallFunctions(GlobalString.prototype, DONT_ENUM, [
"sup", StringSup
]);
-// -------------------------------------------------------------------
-// Exports
-
-utils.Export(function(to) {
- to.StringMatch = StringMatchJS;
- to.StringReplace = StringReplace;
- to.StringSlice = StringSlice;
- to.StringSplit = StringSplitJS;
-});
-
})
diff --git a/deps/v8/src/js/typedarray.js b/deps/v8/src/js/typedarray.js
index 3a5cb84755..ef1c1d29e7 100644
--- a/deps/v8/src/js/typedarray.js
+++ b/deps/v8/src/js/typedarray.js
@@ -20,7 +20,6 @@ var GlobalArray = global.Array;
var GlobalArrayBuffer = global.ArrayBuffer;
var GlobalArrayBufferPrototype = GlobalArrayBuffer.prototype;
var GlobalObject = global.Object;
-var InnerArrayCopyWithin;
var InnerArrayEvery;
var InnerArrayFill;
var InnerArrayFilter;
@@ -68,7 +67,6 @@ utils.Import(function(from) {
ArrayValues = from.ArrayValues;
GetIterator = from.GetIterator;
GetMethod = from.GetMethod;
- InnerArrayCopyWithin = from.InnerArrayCopyWithin;
InnerArrayEvery = from.InnerArrayEvery;
InnerArrayFill = from.InnerArrayFill;
InnerArrayFilter = from.InnerArrayFilter;
@@ -163,8 +161,7 @@ function NAMEConstructByArrayBuffer(obj, buffer, byteOffset, length) {
}
newByteLength = bufferByteLength - offset;
if (newByteLength < 0) {
- throw %make_range_error(kInvalidTypedArrayAlignment,
- "byte length", "NAME", ELEMENT_SIZE);
+ throw %make_range_error(kInvalidOffset, offset);
}
} else {
newByteLength = length * ELEMENT_SIZE;
@@ -438,17 +435,6 @@ function TypedArrayGetToStringTag() {
}
-function TypedArrayCopyWithin(target, start, end) {
- if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
-
- var length = %_TypedArrayGetLength(this);
-
- // TODO(littledan): Replace with a memcpy for better performance
- return InnerArrayCopyWithin(target, start, end, this, length);
-}
-%FunctionSetLength(TypedArrayCopyWithin, 2);
-
-
// ES6 draft 05-05-15, section 22.2.3.7
function TypedArrayEvery(f, receiver) {
if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
@@ -531,25 +517,6 @@ function TypedArrayReverse() {
return PackedArrayReverse(this, length);
}
-
-function TypedArrayComparefn(x, y) {
- if (x === 0 && x === y) {
- x = 1 / x;
- y = 1 / y;
- }
- if (x < y) {
- return -1;
- } else if (x > y) {
- return 1;
- } else if (NUMBER_IS_NAN(x) && NUMBER_IS_NAN(y)) {
- return NUMBER_IS_NAN(y) ? 0 : 1;
- } else if (NUMBER_IS_NAN(x)) {
- return 1;
- }
- return 0;
-}
-
-
// ES6 draft 05-18-15, section 22.2.3.25
function TypedArraySort(comparefn) {
if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
@@ -557,7 +524,7 @@ function TypedArraySort(comparefn) {
var length = %_TypedArrayGetLength(this);
if (IS_UNDEFINED(comparefn)) {
- comparefn = TypedArrayComparefn;
+ return %TypedArraySortFast(this);
}
return InnerArraySort(this, length, comparefn);
@@ -858,7 +825,6 @@ utils.InstallGetter(GlobalTypedArray.prototype, toStringTagSymbol,
utils.InstallFunctions(GlobalTypedArray.prototype, DONT_ENUM, [
"subarray", TypedArraySubArray,
"set", TypedArraySet,
- "copyWithin", TypedArrayCopyWithin,
"every", TypedArrayEvery,
"fill", TypedArrayFill,
"filter", TypedArrayFilter,
diff --git a/deps/v8/src/json-parser.cc b/deps/v8/src/json-parser.cc
index 6c65234d50..2ec79caefc 100644
--- a/deps/v8/src/json-parser.cc
+++ b/deps/v8/src/json-parser.cc
@@ -399,8 +399,8 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonObject() {
->NowContains(value)) {
Handle<FieldType> value_type(
value->OptimalType(isolate(), expected_representation));
- Map::GeneralizeField(target, descriptor, expected_representation,
- value_type);
+ Map::GeneralizeField(target, descriptor, details.constness(),
+ expected_representation, value_type);
}
DCHECK(target->instance_descriptors()
->GetFieldType(descriptor)
@@ -478,12 +478,12 @@ void JsonParser<seq_one_byte>::CommitStateToJsonObject(
DCHECK(!json_object->map()->is_dictionary_map());
DisallowHeapAllocation no_gc;
-
+ DescriptorArray* descriptors = json_object->map()->instance_descriptors();
int length = properties->length();
for (int i = 0; i < length; i++) {
Handle<Object> value = (*properties)[i];
// Initializing store.
- json_object->WriteToField(i, *value);
+ json_object->WriteToField(i, descriptors->GetDetails(i), *value);
}
}
diff --git a/deps/v8/src/json-stringifier.cc b/deps/v8/src/json-stringifier.cc
index b91b57142a..a187fb54bd 100644
--- a/deps/v8/src/json-stringifier.cc
+++ b/deps/v8/src/json-stringifier.cc
@@ -323,7 +323,6 @@ JsonStringifier::Result JsonStringifier::Serialize_(Handle<Object> object,
case JS_VALUE_TYPE:
if (deferred_string_key) SerializeDeferredKey(comma, key);
return SerializeJSValue(Handle<JSValue>::cast(object));
- case SIMD128_VALUE_TYPE:
case SYMBOL_TYPE:
return UNCHANGED;
default:
diff --git a/deps/v8/src/keys.cc b/deps/v8/src/keys.cc
index 35ca22301f..af3d393f07 100644
--- a/deps/v8/src/keys.cc
+++ b/deps/v8/src/keys.cc
@@ -227,7 +227,7 @@ void TrySettingEmptyEnumCache(JSReceiver* object) {
map->SetEnumLength(0);
}
-bool CheckAndInitalizeSimpleEnumCache(JSReceiver* object) {
+bool CheckAndInitalizeEmptyEnumCache(JSReceiver* object) {
if (object->map()->EnumLength() == kInvalidEnumCacheSentinel) {
TrySettingEmptyEnumCache(object);
}
@@ -248,7 +248,7 @@ void FastKeyAccumulator::Prepare() {
for (PrototypeIterator iter(isolate_, *receiver_); !iter.IsAtEnd();
iter.Advance()) {
JSReceiver* current = iter.GetCurrent<JSReceiver>();
- bool has_no_properties = CheckAndInitalizeSimpleEnumCache(current);
+ bool has_no_properties = CheckAndInitalizeEmptyEnumCache(current);
if (has_no_properties) continue;
last_prototype = current;
has_empty_prototype_ = false;
@@ -271,6 +271,8 @@ static Handle<FixedArray> ReduceFixedArrayTo(Isolate* isolate,
return isolate->factory()->CopyFixedArrayUpTo(array, length);
}
+// Initializes and directly returns the enume cache. Users of this function
+// have to make sure to never directly leak the enum cache.
Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
Handle<JSObject> object) {
Handle<Map> map(object->map());
@@ -370,25 +372,6 @@ MaybeHandle<FixedArray> GetOwnKeysWithElements(Isolate* isolate,
return result;
}
-MaybeHandle<FixedArray> GetOwnKeysWithUninitializedEnumCache(
- Isolate* isolate, Handle<JSObject> object) {
- // Uninitalized enum cache
- Map* map = object->map();
- if (object->elements() != isolate->heap()->empty_fixed_array() ||
- object->elements() != isolate->heap()->empty_slow_element_dictionary()) {
- // Assume that there are elements.
- return MaybeHandle<FixedArray>();
- }
- int number_of_own_descriptors = map->NumberOfOwnDescriptors();
- if (number_of_own_descriptors == 0) {
- map->SetEnumLength(0);
- return isolate->factory()->empty_fixed_array();
- }
- // We have no elements but possibly enumerable property keys, hence we can
- // directly initialize the enum cache.
- return GetFastEnumPropertyKeys(isolate, object);
-}
-
bool OnlyHasSimpleProperties(Map* map) {
return map->instance_type() > LAST_CUSTOM_ELEMENTS_RECEIVER;
}
@@ -428,8 +411,7 @@ MaybeHandle<FixedArray> FastKeyAccumulator::GetKeysFast(
if (enum_length == kInvalidEnumCacheSentinel) {
Handle<FixedArray> keys;
// Try initializing the enum cache and return own properties.
- if (GetOwnKeysWithUninitializedEnumCache(isolate_, object)
- .ToHandle(&keys)) {
+ if (GetOwnKeysWithUninitializedEnumCache().ToHandle(&keys)) {
if (FLAG_trace_for_in_enumerate) {
PrintF("| strings=%d symbols=0 elements=0 || prototypes>=1 ||\n",
keys->length());
@@ -444,6 +426,28 @@ MaybeHandle<FixedArray> FastKeyAccumulator::GetKeysFast(
return GetOwnKeysWithElements<true>(isolate_, object, keys_conversion);
}
+MaybeHandle<FixedArray>
+FastKeyAccumulator::GetOwnKeysWithUninitializedEnumCache() {
+ Handle<JSObject> object = Handle<JSObject>::cast(receiver_);
+ // Uninitalized enum cache
+ Map* map = object->map();
+ if (object->elements()->length() != 0) {
+ // Assume that there are elements.
+ return MaybeHandle<FixedArray>();
+ }
+ int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+ if (number_of_own_descriptors == 0) {
+ map->SetEnumLength(0);
+ return isolate_->factory()->empty_fixed_array();
+ }
+ // We have no elements but possibly enumerable property keys, hence we can
+ // directly initialize the enum cache.
+ Handle<FixedArray> keys = GetFastEnumPropertyKeys(isolate_, object);
+ if (is_for_in_) return keys;
+ // Do not leak the enum cache as it might end up as an elements backing store.
+ return isolate_->factory()->CopyFixedArray(keys);
+}
+
MaybeHandle<FixedArray> FastKeyAccumulator::GetKeysSlow(
GetKeysConversion keys_conversion) {
KeyAccumulator accumulator(isolate_, mode_, filter_);
@@ -798,7 +802,8 @@ Maybe<bool> KeyAccumulator::CollectOwnJSProxyKeys(Handle<JSReceiver> receiver,
Zone set_zone(isolate_->allocator(), ZONE_NAME);
const int kPresent = 1;
const int kGone = 0;
- IdentityMap<int> unchecked_result_keys(isolate_->heap(), &set_zone);
+ IdentityMap<int, ZoneAllocationPolicy> unchecked_result_keys(
+ isolate_->heap(), ZoneAllocationPolicy(&set_zone));
int unchecked_result_keys_size = 0;
for (int i = 0; i < trap_result->length(); ++i) {
DCHECK(trap_result->get(i)->IsUniqueName());
diff --git a/deps/v8/src/keys.h b/deps/v8/src/keys.h
index 63b8b26ce2..c5ac93c098 100644
--- a/deps/v8/src/keys.h
+++ b/deps/v8/src/keys.h
@@ -53,9 +53,10 @@ class KeyAccumulator final BASE_EMBEDDED {
Handle<AccessCheckInfo> access_check_info, Handle<JSReceiver> receiver,
Handle<JSObject> object);
+ // Might return directly the object's enum_cache, copy the result before using
+ // as an elements backing store for a JSObject.
static Handle<FixedArray> GetOwnEnumPropertyKeys(Isolate* isolate,
Handle<JSObject> object);
-
void AddKey(Object* key, AddKeyConversion convert = DO_NOT_CONVERT);
void AddKey(Handle<Object> key, AddKeyConversion convert = DO_NOT_CONVERT);
void AddKeys(Handle<FixedArray> array, AddKeyConversion convert);
@@ -140,6 +141,8 @@ class FastKeyAccumulator {
MaybeHandle<FixedArray> GetKeysFast(GetKeysConversion convert);
MaybeHandle<FixedArray> GetKeysSlow(GetKeysConversion convert);
+ MaybeHandle<FixedArray> GetOwnKeysWithUninitializedEnumCache();
+
Isolate* isolate_;
Handle<JSReceiver> receiver_;
Handle<JSReceiver> last_non_empty_prototype_;
diff --git a/deps/v8/src/label.h b/deps/v8/src/label.h
new file mode 100644
index 0000000000..e77a2afcc1
--- /dev/null
+++ b/deps/v8/src/label.h
@@ -0,0 +1,92 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_LABEL_H_
+#define V8_LABEL_H_
+
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// Labels represent pc locations; they are typically jump or call targets.
+// After declaration, a label can be freely used to denote known or (yet)
+// unknown pc location. Assembler::bind() is used to bind a label to the
+// current pc. A label can be bound only once.
+
+class Label {
+ public:
+ enum Distance { kNear, kFar };
+
+ INLINE(Label()) {
+ Unuse();
+ UnuseNear();
+ }
+
+ INLINE(~Label()) {
+ DCHECK(!is_linked());
+ DCHECK(!is_near_linked());
+ }
+
+ INLINE(void Unuse()) { pos_ = 0; }
+ INLINE(void UnuseNear()) { near_link_pos_ = 0; }
+
+ INLINE(bool is_bound() const) { return pos_ < 0; }
+ INLINE(bool is_unused() const) { return pos_ == 0 && near_link_pos_ == 0; }
+ INLINE(bool is_linked() const) { return pos_ > 0; }
+ INLINE(bool is_near_linked() const) { return near_link_pos_ > 0; }
+
+ // Returns the position of bound or linked labels. Cannot be used
+ // for unused labels.
+ int pos() const {
+ if (pos_ < 0) return -pos_ - 1;
+ if (pos_ > 0) return pos_ - 1;
+ UNREACHABLE();
+ return 0;
+ }
+
+ int near_link_pos() const { return near_link_pos_ - 1; }
+
+ private:
+ // pos_ encodes both the binding state (via its sign)
+ // and the binding position (via its value) of a label.
+ //
+ // pos_ < 0 bound label, pos() returns the jump target position
+ // pos_ == 0 unused label
+ // pos_ > 0 linked label, pos() returns the last reference position
+ int pos_;
+
+ // Behaves like |pos_| in the "> 0" case, but for near jumps to this label.
+ int near_link_pos_;
+
+ void bind_to(int pos) {
+ pos_ = -pos - 1;
+ DCHECK(is_bound());
+ }
+ void link_to(int pos, Distance distance = kFar) {
+ if (distance == kNear) {
+ near_link_pos_ = pos + 1;
+ DCHECK(is_near_linked());
+ } else {
+ pos_ = pos + 1;
+ DCHECK(is_linked());
+ }
+ }
+
+ friend class Assembler;
+ friend class Displacement;
+ friend class RegExpMacroAssemblerIrregexp;
+
+#if V8_TARGET_ARCH_ARM64
+ // On ARM64, the Assembler keeps track of pointers to Labels to resolve
+ // branches to distant targets. Copying labels would confuse the Assembler.
+ DISALLOW_COPY_AND_ASSIGN(Label); // NOLINT
+#endif
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_LABEL_H_
diff --git a/deps/v8/src/log-utils.h b/deps/v8/src/log-utils.h
index bc414e848d..69fa6f902c 100644
--- a/deps/v8/src/log-utils.h
+++ b/deps/v8/src/log-utils.h
@@ -32,7 +32,7 @@ class Log {
return FLAG_log || FLAG_log_api || FLAG_log_code || FLAG_log_gc ||
FLAG_log_handles || FLAG_log_suspect || FLAG_ll_prof ||
FLAG_perf_basic_prof || FLAG_perf_prof ||
- FLAG_log_internal_timer_events || FLAG_prof_cpp;
+ FLAG_log_internal_timer_events || FLAG_prof_cpp || FLAG_trace_ic;
}
// Frees all resources acquired in Initialize and Open... functions.
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index 0095cf43a7..8994147ae0 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -1249,28 +1249,6 @@ void Logger::HeapSampleItemEvent(const char* type, int number, int bytes) {
}
-void Logger::DebugTag(const char* call_site_tag) {
- if (!log_->IsEnabled() || !FLAG_log) return;
- Log::MessageBuilder msg(log_);
- msg.Append("debug-tag,%s", call_site_tag);
- msg.WriteToLogFile();
-}
-
-
-void Logger::DebugEvent(const char* event_type, Vector<uint16_t> parameter) {
- if (!log_->IsEnabled() || !FLAG_log) return;
- StringBuilder s(parameter.length() + 1);
- for (int i = 0; i < parameter.length(); ++i) {
- s.AddCharacter(static_cast<char>(parameter[i]));
- }
- char* parameter_string = s.Finalize();
- Log::MessageBuilder msg(log_);
- msg.Append("debug-queue-event,%s,%15.3f,%s", event_type,
- base::OS::TimeCurrentMillis(), parameter_string);
- DeleteArray(parameter_string);
- msg.WriteToLogFile();
-}
-
void Logger::RuntimeCallTimerEvent() {
RuntimeCallStats* stats = isolate_->counters()->runtime_call_stats();
RuntimeCallTimer* timer = stats->current_timer();
@@ -1312,6 +1290,93 @@ void Logger::TickEvent(v8::TickSample* sample, bool overflow) {
msg.WriteToLogFile();
}
+void Logger::ICEvent(const char* type, bool keyed, const Address pc, int line,
+ int column, Map* map, Object* key, char old_state,
+ char new_state, const char* modifier,
+ const char* slow_stub_reason) {
+ if (!log_->IsEnabled() || !FLAG_trace_ic) return;
+ Log::MessageBuilder msg(log_);
+ if (keyed) msg.Append("Keyed");
+ msg.Append("%s,", type);
+ msg.AppendAddress(pc);
+ msg.Append(",%d,%d,", line, column);
+ msg.Append(old_state);
+ msg.Append(",");
+ msg.Append(new_state);
+ msg.Append(",");
+ msg.AppendAddress(reinterpret_cast<Address>(map));
+ msg.Append(",");
+ if (key->IsSmi()) {
+ msg.Append("%d", Smi::cast(key)->value());
+ } else if (key->IsNumber()) {
+ msg.Append("%lf", key->Number());
+ } else if (key->IsString()) {
+ msg.AppendDetailed(String::cast(key), false);
+ } else if (key->IsSymbol()) {
+ msg.AppendSymbolName(Symbol::cast(key));
+ }
+ msg.Append(",%s,", modifier);
+ if (slow_stub_reason != nullptr) {
+ msg.AppendDoubleQuotedString(slow_stub_reason);
+ }
+ msg.WriteToLogFile();
+}
+
+void Logger::CompareIC(const Address pc, int line, int column, Code* stub,
+ const char* op, const char* old_left,
+ const char* old_right, const char* old_state,
+ const char* new_left, const char* new_right,
+ const char* new_state) {
+ if (!log_->IsEnabled() || !FLAG_trace_ic) return;
+ Log::MessageBuilder msg(log_);
+ msg.Append("CompareIC,");
+ msg.AppendAddress(pc);
+ msg.Append(",%d,%d,", line, column);
+ msg.AppendAddress(reinterpret_cast<Address>(stub));
+ msg.Append(",%s,%s,%s,%s,%s,%s,%s", op, old_left, old_right, old_state,
+ new_left, new_right, new_state);
+ msg.WriteToLogFile();
+}
+
+void Logger::BinaryOpIC(const Address pc, int line, int column, Code* stub,
+ const char* old_state, const char* new_state,
+ AllocationSite* allocation_site) {
+ if (!log_->IsEnabled() || !FLAG_trace_ic) return;
+ Log::MessageBuilder msg(log_);
+ msg.Append("BinaryOpIC,");
+ msg.AppendAddress(pc);
+ msg.Append(",%d,%d,", line, column);
+ msg.AppendAddress(reinterpret_cast<Address>(stub));
+ msg.Append(",%s,%s,", old_state, new_state);
+ if (allocation_site != nullptr) {
+ msg.AppendAddress(reinterpret_cast<Address>(allocation_site));
+ }
+ msg.WriteToLogFile();
+}
+
+void Logger::ToBooleanIC(const Address pc, int line, int column, Code* stub,
+ const char* old_state, const char* new_state) {
+ if (!log_->IsEnabled() || !FLAG_trace_ic) return;
+ Log::MessageBuilder msg(log_);
+ msg.Append("ToBooleanIC,");
+ msg.AppendAddress(pc);
+ msg.Append(",%d,%d,", line, column);
+ msg.AppendAddress(reinterpret_cast<Address>(stub));
+ msg.Append(",%s,%s,", old_state, new_state);
+ msg.WriteToLogFile();
+}
+
+void Logger::PatchIC(const Address pc, const Address test, int delta) {
+ if (!log_->IsEnabled() || !FLAG_trace_ic) return;
+ Log::MessageBuilder msg(log_);
+ msg.Append("PatchIC,");
+ msg.AppendAddress(pc);
+ msg.Append(",");
+ msg.AppendAddress(test);
+ msg.Append(",");
+ msg.Append("%d,", delta);
+ msg.WriteToLogFile();
+}
void Logger::StopProfiler() {
if (!log_->IsEnabled()) return;
@@ -1458,10 +1523,6 @@ void Logger::LogCodeObject(Object* object) {
description = "A load global IC from the snapshot";
tag = Logger::LOAD_GLOBAL_IC_TAG;
break;
- case AbstractCode::CALL_IC:
- description = "A call IC from the snapshot";
- tag = CodeEventListener::CALL_IC_TAG;
- break;
case AbstractCode::STORE_IC:
description = "A store IC from the snapshot";
tag = CodeEventListener::STORE_IC_TAG;
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index b7a5fc6bd3..6fcb257cc9 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -136,12 +136,6 @@ class Logger : public CodeEventListener {
// object.
void SuspectReadEvent(Name* name, Object* obj);
- // Emits an event when a message is put on or read from a debugging queue.
- // DebugTag lets us put a call-site specific label on the event.
- void DebugTag(const char* call_site_tag);
- void DebugEvent(const char* event_type, Vector<uint16_t> parameter);
-
-
// ==== Events logged by --log-api. ====
void ApiSecurityCheck();
void ApiNamedPropertyAccess(const char* tag, JSObject* holder, Object* name);
@@ -189,6 +183,21 @@ class Logger : public CodeEventListener {
void CodeDeoptEvent(Code* code, Address pc, int fp_to_sp_delta);
+ void ICEvent(const char* type, bool keyed, const Address pc, int line,
+ int column, Map* map, Object* key, char old_state,
+ char new_state, const char* modifier,
+ const char* slow_stub_reason);
+ void CompareIC(const Address pc, int line, int column, Code* stub,
+ const char* op, const char* old_left, const char* old_right,
+ const char* old_state, const char* new_left,
+ const char* new_right, const char* new_state);
+ void BinaryOpIC(const Address pc, int line, int column, Code* stub,
+ const char* old_state, const char* new_state,
+ AllocationSite* allocation_site);
+ void ToBooleanIC(const Address pc, int line, int column, Code* stub,
+ const char* old_state, const char* new_state);
+ void PatchIC(const Address pc, const Address test, int delta);
+
// ==== Events logged by --log-gc. ====
// Heap sampling events: start, end, and individual types.
void HeapSampleBeginEvent(const char* space, const char* kind);
diff --git a/deps/v8/src/lookup.cc b/deps/v8/src/lookup.cc
index 1075f90e16..6f50b245dc 100644
--- a/deps/v8/src/lookup.cc
+++ b/deps/v8/src/lookup.cc
@@ -191,9 +191,6 @@ void LookupIterator::InternalUpdateProtector() {
} else if (*name_ == heap()->is_concat_spreadable_symbol()) {
if (!isolate_->IsIsConcatSpreadableLookupChainIntact()) return;
isolate_->InvalidateIsConcatSpreadableProtector();
- } else if (*name_ == heap()->has_instance_symbol()) {
- if (!isolate_->IsHasInstanceLookupChainIntact()) return;
- isolate_->InvalidateHasInstanceProtector();
} else if (*name_ == heap()->iterator_symbol()) {
if (!isolate_->IsArrayIteratorLookupChainIntact()) return;
if (holder_->IsJSArray()) {
@@ -237,9 +234,21 @@ void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
}
if (!holder->HasFastProperties()) return;
+ PropertyConstness new_constness = kConst;
+ if (FLAG_track_constant_fields) {
+ if (constness() == kConst) {
+ DCHECK_EQ(kData, property_details_.kind());
+ // Check that current value matches new value otherwise we should make
+ // the property mutable.
+ if (!IsConstFieldValueEqualTo(*value)) new_constness = kMutable;
+ }
+ } else {
+ new_constness = kMutable;
+ }
+
Handle<Map> old_map(holder->map(), isolate_);
- Handle<Map> new_map =
- Map::PrepareForDataProperty(old_map, descriptor_number(), value);
+ Handle<Map> new_map = Map::PrepareForDataProperty(
+ old_map, descriptor_number(), new_constness, value);
if (old_map.is_identical_to(new_map)) {
// Update the property details if the representation was None.
@@ -271,7 +280,10 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
Handle<Map> old_map(holder->map(), isolate_);
Handle<Map> new_map = Map::ReconfigureExistingProperty(
old_map, descriptor_number(), i::kData, attributes);
- new_map = Map::PrepareForDataProperty(new_map, descriptor_number(), value);
+ // Force mutable to avoid changing constant value by reconfiguring
+ // kData -> kAccessor -> kData.
+ new_map = Map::PrepareForDataProperty(new_map, descriptor_number(),
+ kMutable, value);
JSObject::MigrateToMap(holder, new_map);
ReloadPropertyInformation<false>();
} else {
@@ -296,7 +308,7 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
state_ = DATA;
}
- WriteDataValue(value);
+ WriteDataValue(value, true);
#if VERIFY_HEAP
if (FLAG_verify_heap) {
@@ -360,8 +372,8 @@ void LookupIterator::PrepareTransitionToDataProperty(
return;
}
- Handle<Map> transition =
- Map::TransitionToDataProperty(map, name_, value, attributes, store_mode);
+ Handle<Map> transition = Map::TransitionToDataProperty(
+ map, name_, value, attributes, kDefaultFieldConstness, store_mode);
state_ = TRANSITION;
transition_ = transition;
@@ -603,6 +615,39 @@ Handle<Object> LookupIterator::FetchValue() const {
return handle(result, isolate_);
}
+bool LookupIterator::IsConstFieldValueEqualTo(Object* value) const {
+ DCHECK(!IsElement());
+ DCHECK(holder_->HasFastProperties());
+ DCHECK_EQ(kField, property_details_.location());
+ DCHECK_EQ(kConst, property_details_.constness());
+ Handle<JSObject> holder = GetHolder<JSObject>();
+ FieldIndex field_index = FieldIndex::ForDescriptor(holder->map(), number_);
+ if (property_details_.representation().IsDouble()) {
+ if (!value->IsNumber()) return false;
+ uint64_t bits;
+ if (holder->IsUnboxedDoubleField(field_index)) {
+ bits = holder->RawFastDoublePropertyAsBitsAt(field_index);
+ } else {
+ Object* current_value = holder->RawFastPropertyAt(field_index);
+ DCHECK(current_value->IsMutableHeapNumber());
+ bits = HeapNumber::cast(current_value)->value_as_bits();
+ }
+ // Use bit representation of double to to check for hole double, since
+ // manipulating the signaling NaN used for the hole in C++, e.g. with
+ // bit_cast or value(), will change its value on ia32 (the x87 stack is
+ // used to return values and stores to the stack silently clear the
+ // signalling bit).
+ if (bits == kHoleNanInt64) {
+ // Uninitialized double field.
+ return true;
+ }
+ return bit_cast<double>(bits) == value->Number();
+ } else {
+ Object* current_value = holder->RawFastPropertyAt(field_index);
+ return current_value->IsUninitialized(isolate()) || current_value == value;
+ }
+}
+
int LookupIterator::GetFieldDescriptorIndex() const {
DCHECK(has_property_);
DCHECK(holder_->HasFastProperties());
@@ -625,10 +670,19 @@ int LookupIterator::GetConstantIndex() const {
DCHECK(holder_->HasFastProperties());
DCHECK_EQ(kDescriptor, property_details_.location());
DCHECK_EQ(kData, property_details_.kind());
+ DCHECK(!FLAG_track_constant_fields);
DCHECK(!IsElement());
return descriptor_number();
}
+Handle<Map> LookupIterator::GetFieldOwnerMap() const {
+ DCHECK(has_property_);
+ DCHECK(holder_->HasFastProperties());
+ DCHECK_EQ(kField, property_details_.location());
+ DCHECK(!IsElement());
+ Map* holder_map = holder_->map();
+ return handle(holder_map->FindFieldOwner(descriptor_number()), isolate_);
+}
FieldIndex LookupIterator::GetFieldIndex() const {
DCHECK(has_property_);
@@ -673,8 +727,8 @@ Handle<Object> LookupIterator::GetDataValue() const {
return value;
}
-
-void LookupIterator::WriteDataValue(Handle<Object> value) {
+void LookupIterator::WriteDataValue(Handle<Object> value,
+ bool initializing_store) {
DCHECK_EQ(DATA, state_);
Handle<JSReceiver> holder = GetHolder<JSReceiver>();
if (IsElement()) {
@@ -683,10 +737,16 @@ void LookupIterator::WriteDataValue(Handle<Object> value) {
accessor->Set(object, number_, *value);
} else if (holder->HasFastProperties()) {
if (property_details_.location() == kField) {
+ // Check that in case of kConst field the existing value is equal to
+ // |value|.
+ DCHECK_IMPLIES(
+ !initializing_store && property_details_.constness() == kConst,
+ IsConstFieldValueEqualTo(*value));
JSObject::cast(*holder)->WriteToField(descriptor_number(),
property_details_, *value);
} else {
DCHECK_EQ(kDescriptor, property_details_.location());
+ DCHECK_EQ(kConst, property_details_.constness());
}
} else if (holder->IsJSGlobalObject()) {
GlobalDictionary* dictionary = JSObject::cast(*holder)->global_dictionary();
diff --git a/deps/v8/src/lookup.h b/deps/v8/src/lookup.h
index 5f7a293e36..190a75edb3 100644
--- a/deps/v8/src/lookup.h
+++ b/deps/v8/src/lookup.h
@@ -236,6 +236,9 @@ class V8_EXPORT_PRIVATE LookupIterator final BASE_EMBEDDED {
Representation representation() const {
return property_details().representation();
}
+ PropertyLocation location() const { return property_details().location(); }
+ PropertyConstness constness() const { return property_details().constness(); }
+ Handle<Map> GetFieldOwnerMap() const;
FieldIndex GetFieldIndex() const;
Handle<FieldType> GetFieldType() const;
int GetFieldDescriptorIndex() const;
@@ -252,7 +255,7 @@ class V8_EXPORT_PRIVATE LookupIterator final BASE_EMBEDDED {
}
Handle<InterceptorInfo> GetInterceptorForFailedAccessCheck() const;
Handle<Object> GetDataValue() const;
- void WriteDataValue(Handle<Object> value);
+ void WriteDataValue(Handle<Object> value, bool initializing_store);
inline void UpdateProtector() {
if (IsElement()) return;
if (*name_ == heap()->is_concat_spreadable_symbol() ||
@@ -303,6 +306,7 @@ class V8_EXPORT_PRIVATE LookupIterator final BASE_EMBEDDED {
template <bool is_element>
void RestartInternal(InterceptorState interceptor_state);
Handle<Object> FetchValue() const;
+ bool IsConstFieldValueEqualTo(Object* value) const;
template <bool is_element>
void ReloadPropertyInformation();
diff --git a/deps/v8/src/machine-type.cc b/deps/v8/src/machine-type.cc
index 9289673bd7..ba555dd36a 100644
--- a/deps/v8/src/machine-type.cc
+++ b/deps/v8/src/machine-type.cc
@@ -32,6 +32,12 @@ const char* MachineReprToString(MachineRepresentation rep) {
return "kRepFloat64";
case MachineRepresentation::kSimd128:
return "kRepSimd128";
+ case MachineRepresentation::kSimd1x4:
+ return "kRepSimd1x4";
+ case MachineRepresentation::kSimd1x8:
+ return "kRepSimd1x8";
+ case MachineRepresentation::kSimd1x16:
+ return "kRepSimd1x16";
case MachineRepresentation::kTaggedSigned:
return "kRepTaggedSigned";
case MachineRepresentation::kTaggedPointer:
diff --git a/deps/v8/src/machine-type.h b/deps/v8/src/machine-type.h
index a59aced36e..1f87cf297b 100644
--- a/deps/v8/src/machine-type.h
+++ b/deps/v8/src/machine-type.h
@@ -15,7 +15,7 @@
namespace v8 {
namespace internal {
-enum class MachineRepresentation : uint8_t {
+enum class MachineRepresentation {
kNone,
kBit,
kWord8,
@@ -29,8 +29,11 @@ enum class MachineRepresentation : uint8_t {
kFloat32,
kFloat64,
kSimd128,
+ kSimd1x4, // SIMD boolean vector types.
+ kSimd1x8,
+ kSimd1x16,
kFirstFPRepresentation = kFloat32,
- kLastRepresentation = kSimd128
+ kLastRepresentation = kSimd1x16
};
static_assert(static_cast<int>(MachineRepresentation::kLastRepresentation) <
@@ -39,7 +42,7 @@ static_assert(static_cast<int>(MachineRepresentation::kLastRepresentation) <
const char* MachineReprToString(MachineRepresentation);
-enum class MachineSemantic : uint8_t {
+enum class MachineSemantic {
kNone,
kBool,
kInt32,
@@ -127,6 +130,16 @@ class MachineType {
static MachineType Simd128() {
return MachineType(MachineRepresentation::kSimd128, MachineSemantic::kNone);
}
+ static MachineType Simd1x4() {
+ return MachineType(MachineRepresentation::kSimd1x4, MachineSemantic::kNone);
+ }
+ static MachineType Simd1x8() {
+ return MachineType(MachineRepresentation::kSimd1x8, MachineSemantic::kNone);
+ }
+ static MachineType Simd1x16() {
+ return MachineType(MachineRepresentation::kSimd1x16,
+ MachineSemantic::kNone);
+ }
static MachineType Pointer() {
return MachineType(PointerRepresentation(), MachineSemantic::kNone);
}
@@ -173,6 +186,16 @@ class MachineType {
static MachineType RepSimd128() {
return MachineType(MachineRepresentation::kSimd128, MachineSemantic::kNone);
}
+ static MachineType RepSimd1x4() {
+ return MachineType(MachineRepresentation::kSimd1x4, MachineSemantic::kNone);
+ }
+ static MachineType RepSimd1x8() {
+ return MachineType(MachineRepresentation::kSimd1x8, MachineSemantic::kNone);
+ }
+ static MachineType RepSimd1x16() {
+ return MachineType(MachineRepresentation::kSimd1x16,
+ MachineSemantic::kNone);
+ }
static MachineType RepTagged() {
return MachineType(MachineRepresentation::kTagged, MachineSemantic::kNone);
}
@@ -201,6 +224,12 @@ class MachineType {
return MachineType::Float64();
case MachineRepresentation::kSimd128:
return MachineType::Simd128();
+ case MachineRepresentation::kSimd1x4:
+ return MachineType::Simd1x4();
+ case MachineRepresentation::kSimd1x8:
+ return MachineType::Simd1x8();
+ case MachineRepresentation::kSimd1x16:
+ return MachineType::Simd1x16();
case MachineRepresentation::kTagged:
return MachineType::AnyTagged();
case MachineRepresentation::kTaggedSigned:
diff --git a/deps/v8/src/managed.h b/deps/v8/src/managed.h
new file mode 100644
index 0000000000..b738ec567b
--- /dev/null
+++ b/deps/v8/src/managed.h
@@ -0,0 +1,81 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_MANAGED_H_
+#define V8_WASM_MANAGED_H_
+
+#include "src/factory.h"
+#include "src/global-handles.h"
+#include "src/handles.h"
+#include "src/isolate.h"
+
+namespace v8 {
+namespace internal {
+// An object that wraps a pointer to a C++ object and manages its lifetime.
+// The C++ object will be deleted when the managed wrapper object is
+// garbage collected, or, last resort, if the isolate is torn down before GC,
+// as part of Isolate::Dispose().
+// Managed<CppType> may be used polymorphically as Foreign, where the held
+// address is typed as CppType**. The double indirection is due to the
+// use, by Managed, of Isolate::ManagedObjectFinalizer, which has a CppType*
+// first field.
+template <class CppType>
+class Managed : public Foreign {
+ public:
+ V8_INLINE CppType* get() {
+ return *(reinterpret_cast<CppType**>(foreign_address()));
+ }
+
+ static Managed<CppType>* cast(Object* obj) {
+ SLOW_DCHECK(obj->IsForeign());
+ return reinterpret_cast<Managed<CppType>*>(obj);
+ }
+
+ static Handle<Managed<CppType>> New(Isolate* isolate, CppType* ptr) {
+ Isolate::ManagedObjectFinalizer* node =
+ isolate->RegisterForReleaseAtTeardown(ptr,
+ Managed<CppType>::NativeDelete);
+ Handle<Managed<CppType>> handle = Handle<Managed<CppType>>::cast(
+ isolate->factory()->NewForeign(reinterpret_cast<Address>(node)));
+ RegisterWeakCallbackForDelete(isolate, handle);
+ return handle;
+ }
+
+ private:
+ static void RegisterWeakCallbackForDelete(Isolate* isolate,
+ Handle<Managed<CppType>> handle) {
+ Handle<Object> global_handle = isolate->global_handles()->Create(*handle);
+ GlobalHandles::MakeWeak(global_handle.location(), global_handle.location(),
+ &Managed<CppType>::GCDelete,
+ v8::WeakCallbackType::kFinalizer);
+ }
+
+ static void GCDelete(const v8::WeakCallbackInfo<void>& data) {
+ Managed<CppType>** p =
+ reinterpret_cast<Managed<CppType>**>(data.GetParameter());
+
+ Isolate::ManagedObjectFinalizer* finalizer = (*p)->GetFinalizer();
+
+ Isolate* isolate = reinterpret_cast<Isolate*>(data.GetIsolate());
+ finalizer->Dispose();
+ isolate->UnregisterFromReleaseAtTeardown(&finalizer);
+
+ (*p)->set_foreign_address(static_cast<Address>(nullptr));
+ GlobalHandles::Destroy(reinterpret_cast<Object**>(p));
+ }
+
+ static void NativeDelete(void* value) {
+ CppType* typed_value = reinterpret_cast<CppType*>(value);
+ delete typed_value;
+ }
+
+ Isolate::ManagedObjectFinalizer* GetFinalizer() {
+ return reinterpret_cast<Isolate::ManagedObjectFinalizer*>(
+ foreign_address());
+ }
+};
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_MANAGED_H_
diff --git a/deps/v8/src/map-updater.cc b/deps/v8/src/map-updater.cc
index a9b9a60f6d..f82c2cf033 100644
--- a/deps/v8/src/map-updater.cc
+++ b/deps/v8/src/map-updater.cc
@@ -22,10 +22,6 @@ inline bool EqualImmutableValues(Object* obj1, Object* obj2) {
return false;
}
-inline bool LocationFitsInto(PropertyLocation what, PropertyLocation where) {
- return where == kField || what == kDescriptor;
-}
-
} // namespace
Name* MapUpdater::GetKey(int descriptor) const {
@@ -36,7 +32,7 @@ PropertyDetails MapUpdater::GetDetails(int descriptor) const {
DCHECK_LE(0, descriptor);
if (descriptor == modified_descriptor_) {
return PropertyDetails(new_kind_, new_attributes_, new_location_,
- new_representation_);
+ new_constness_, new_representation_);
}
return old_descriptors_->GetDetails(descriptor);
}
@@ -89,6 +85,7 @@ Handle<FieldType> MapUpdater::GetOrComputeFieldType(
Handle<Map> MapUpdater::ReconfigureToDataField(int descriptor,
PropertyAttributes attributes,
+ PropertyConstness constness,
Representation representation,
Handle<FieldType> field_type) {
DCHECK_EQ(kInitialized, state_);
@@ -98,8 +95,6 @@ Handle<Map> MapUpdater::ReconfigureToDataField(int descriptor,
new_kind_ = kData;
new_attributes_ = attributes;
new_location_ = kField;
- new_representation_ = representation;
- new_field_type_ = field_type;
PropertyDetails old_details =
old_descriptors_->GetDetails(modified_descriptor_);
@@ -107,16 +102,25 @@ Handle<Map> MapUpdater::ReconfigureToDataField(int descriptor,
// If property kind is not reconfigured merge the result with
// representation/field type from the old descriptor.
if (old_details.kind() == new_kind_) {
+ new_constness_ = GeneralizeConstness(constness, old_details.constness());
+
Representation old_representation = old_details.representation();
- new_representation_ = new_representation_.generalize(old_representation);
+ new_representation_ = representation.generalize(old_representation);
Handle<FieldType> old_field_type =
GetOrComputeFieldType(old_descriptors_, modified_descriptor_,
old_details.location(), new_representation_);
- new_field_type_ = Map::GeneralizeFieldType(
- old_representation, old_field_type, new_representation_,
- new_field_type_, isolate_);
+ new_field_type_ =
+ Map::GeneralizeFieldType(old_representation, old_field_type,
+ new_representation_, field_type, isolate_);
+ } else {
+ // We don't know if this is a first property kind reconfiguration
+ // and we don't know which value was in this property previously
+ // therefore we can't treat such a property as constant.
+ new_constness_ = kMutable;
+ new_representation_ = representation;
+ new_field_type_ = field_type;
}
if (TryRecofigureToDataFieldInplace() == kEnd) return result_map_;
@@ -149,6 +153,16 @@ Handle<Map> MapUpdater::Update() {
return result_map_;
}
+void MapUpdater::GeneralizeField(Handle<Map> map, int modify_index,
+ PropertyConstness new_constness,
+ Representation new_representation,
+ Handle<FieldType> new_field_type) {
+ Map::GeneralizeField(map, modify_index, new_constness, new_representation,
+ new_field_type);
+
+ DCHECK_EQ(*old_descriptors_, old_map_->instance_descriptors());
+}
+
MapUpdater::State MapUpdater::CopyGeneralizeAllFields(const char* reason) {
result_map_ = Map::CopyGeneralizeAllFields(old_map_, new_elements_kind_,
modified_descriptor_, new_kind_,
@@ -187,8 +201,8 @@ MapUpdater::State MapUpdater::TryRecofigureToDataFieldInplace() {
Handle<Map> field_owner(old_map_->FindFieldOwner(modified_descriptor_),
isolate_);
- Map::GeneralizeField(field_owner, modified_descriptor_, new_representation_,
- new_field_type_);
+ GeneralizeField(field_owner, modified_descriptor_, new_constness_,
+ new_representation_, new_field_type_);
// Check that the descriptor array was updated.
DCHECK(old_descriptors_->GetDetails(modified_descriptor_)
.representation()
@@ -228,19 +242,23 @@ MapUpdater::State MapUpdater::FindRootMap() {
old_details.attributes() != new_attributes_) {
return CopyGeneralizeAllFields("GenAll_RootModification1");
}
- if (!new_representation_.fits_into(old_details.representation())) {
+ if (old_details.location() != kField) {
return CopyGeneralizeAllFields("GenAll_RootModification2");
}
- if (old_details.location() != kField) {
+ if (new_constness_ != old_details.constness()) {
return CopyGeneralizeAllFields("GenAll_RootModification3");
}
+ if (!new_representation_.fits_into(old_details.representation())) {
+ return CopyGeneralizeAllFields("GenAll_RootModification4");
+ }
+
DCHECK_EQ(kData, old_details.kind());
DCHECK_EQ(kData, new_kind_);
DCHECK_EQ(kField, new_location_);
FieldType* old_field_type =
old_descriptors_->GetFieldType(modified_descriptor_);
if (!new_field_type_->NowIs(old_field_type)) {
- return CopyGeneralizeAllFields("GenAll_RootModification4");
+ return CopyGeneralizeAllFields("GenAll_RootModification5");
}
}
@@ -276,12 +294,13 @@ MapUpdater::State MapUpdater::FindTargetMap() {
// TODO(ishell): mutable accessors are not implemented yet.
return CopyGeneralizeAllFields("GenAll_Incompatible");
}
- // Check if old location fits into tmp location.
- if (!LocationFitsInto(old_details.location(), tmp_details.location())) {
+ PropertyConstness tmp_constness = tmp_details.constness();
+ if (!IsGeneralizableTo(old_details.constness(), tmp_constness)) {
+ break;
+ }
+ if (!IsGeneralizableTo(old_details.location(), tmp_details.location())) {
break;
}
-
- // Check if old representation fits into tmp representation.
Representation tmp_representation = tmp_details.representation();
if (!old_details.representation().fits_into(tmp_representation)) {
break;
@@ -290,7 +309,8 @@ MapUpdater::State MapUpdater::FindTargetMap() {
if (tmp_details.location() == kField) {
Handle<FieldType> old_field_type =
GetOrComputeFieldType(i, old_details.location(), tmp_representation);
- Map::GeneralizeField(tmp_map, i, tmp_representation, old_field_type);
+ GeneralizeField(tmp_map, i, tmp_constness, tmp_representation,
+ old_field_type);
} else {
// kDescriptor: Check that the value matches.
if (!EqualImmutableValues(GetValue(i), tmp_descriptors->GetValue(i))) {
@@ -311,6 +331,7 @@ MapUpdater::State MapUpdater::FindTargetMap() {
target_descriptors->GetDetails(modified_descriptor_);
DCHECK_EQ(new_kind_, details.kind());
DCHECK_EQ(new_attributes_, details.attributes());
+ DCHECK(IsGeneralizableTo(new_constness_, details.constness()));
DCHECK_EQ(new_location_, details.location());
DCHECK(new_representation_.fits_into(details.representation()));
if (new_location_ == kField) {
@@ -341,7 +362,6 @@ MapUpdater::State MapUpdater::FindTargetMap() {
Handle<Map> tmp_map(transition, isolate_);
Handle<DescriptorArray> tmp_descriptors(tmp_map->instance_descriptors(),
isolate_);
-
#ifdef DEBUG
// Check that target map is compatible.
PropertyDetails tmp_details = tmp_descriptors->GetDetails(i);
@@ -404,6 +424,14 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
PropertyKind next_kind = old_details.kind();
PropertyAttributes next_attributes = old_details.attributes();
+ DCHECK_EQ(next_kind, target_details.kind());
+ DCHECK_EQ(next_attributes, target_details.attributes());
+
+ PropertyConstness next_constness = GeneralizeConstness(
+ old_details.constness(), target_details.constness());
+
+ // Note: failed values equality check does not invalidate per-object
+ // property constness.
PropertyLocation next_location =
old_details.location() == kField ||
target_details.location() == kField ||
@@ -412,13 +440,16 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
? kField
: kDescriptor;
+ if (!FLAG_track_constant_fields && next_location == kField) {
+ next_constness = kMutable;
+ }
+ // Ensure that mutable values are stored in fields.
+ DCHECK_IMPLIES(next_constness == kMutable, next_location == kField);
+
Representation next_representation =
old_details.representation().generalize(
target_details.representation());
- DCHECK_EQ(next_kind, target_details.kind());
- DCHECK_EQ(next_attributes, target_details.attributes());
-
if (next_location == kField) {
Handle<FieldType> old_field_type =
GetOrComputeFieldType(i, old_details.location(), next_representation);
@@ -434,8 +465,9 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
Handle<Object> wrapped_type(Map::WrapFieldType(next_field_type));
Descriptor d;
if (next_kind == kData) {
- d = Descriptor::DataField(key, current_offset, wrapped_type,
- next_attributes, next_representation);
+ d = Descriptor::DataField(key, current_offset, next_attributes,
+ next_constness, next_representation,
+ wrapped_type);
} else {
// TODO(ishell): mutable accessors are not implemented yet.
UNIMPLEMENTED();
@@ -444,10 +476,12 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
new_descriptors->Set(i, &d);
} else {
DCHECK_EQ(kDescriptor, next_location);
+ DCHECK_EQ(kConst, next_constness);
Handle<Object> value(GetValue(i), isolate_);
Descriptor d;
if (next_kind == kData) {
+ DCHECK(!FLAG_track_constant_fields);
d = Descriptor::DataConstant(key, value, next_attributes);
} else {
DCHECK_EQ(kAccessor, next_kind);
@@ -465,6 +499,7 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
PropertyKind next_kind = old_details.kind();
PropertyAttributes next_attributes = old_details.attributes();
+ PropertyConstness next_constness = old_details.constness();
PropertyLocation next_location = old_details.location();
Representation next_representation = old_details.representation();
@@ -476,8 +511,10 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
Handle<Object> wrapped_type(Map::WrapFieldType(old_field_type));
Descriptor d;
if (next_kind == kData) {
- d = Descriptor::DataField(key, current_offset, wrapped_type,
- next_attributes, next_representation);
+ DCHECK_IMPLIES(!FLAG_track_constant_fields, next_constness == kMutable);
+ d = Descriptor::DataField(key, current_offset, next_attributes,
+ next_constness, next_representation,
+ wrapped_type);
} else {
// TODO(ishell): mutable accessors are not implemented yet.
UNIMPLEMENTED();
@@ -486,6 +523,7 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
new_descriptors->Set(i, &d);
} else {
DCHECK_EQ(kDescriptor, next_location);
+ DCHECK_EQ(kConst, next_constness);
Handle<Object> value(GetValue(i), isolate_);
if (next_kind == kData) {
@@ -518,6 +556,7 @@ Handle<Map> MapUpdater::FindSplitMap(Handle<DescriptorArray> descriptors) {
PropertyDetails next_details = next_descriptors->GetDetails(i);
DCHECK_EQ(details.kind(), next_details.kind());
DCHECK_EQ(details.attributes(), next_details.attributes());
+ if (details.constness() != next_details.constness()) break;
if (details.location() != next_details.location()) break;
if (!details.representation().Equals(next_details.representation())) break;
diff --git a/deps/v8/src/map-updater.h b/deps/v8/src/map-updater.h
index 68b720365b..338914448f 100644
--- a/deps/v8/src/map-updater.h
+++ b/deps/v8/src/map-updater.h
@@ -55,6 +55,7 @@ class MapUpdater {
// performs the steps 1-5.
Handle<Map> ReconfigureToDataField(int descriptor,
PropertyAttributes attributes,
+ PropertyConstness constness,
Representation representation,
Handle<FieldType> field_type);
@@ -141,6 +142,11 @@ class MapUpdater {
Handle<DescriptorArray> descriptors, int descriptor,
PropertyLocation location, Representation representation);
+ void GeneralizeField(Handle<Map> map, int modify_index,
+ PropertyConstness new_constness,
+ Representation new_representation,
+ Handle<FieldType> new_field_type);
+
Isolate* isolate_;
Handle<Map> old_map_;
Handle<DescriptorArray> old_descriptors_;
@@ -157,6 +163,7 @@ class MapUpdater {
int modified_descriptor_ = -1;
PropertyKind new_kind_ = kData;
PropertyAttributes new_attributes_ = NONE;
+ PropertyConstness new_constness_ = kMutable;
PropertyLocation new_location_ = kField;
Representation new_representation_ = Representation::None();
diff --git a/deps/v8/src/messages.h b/deps/v8/src/messages.h
index dc8c9d05df..bb595c2f8a 100644
--- a/deps/v8/src/messages.h
+++ b/deps/v8/src/messages.h
@@ -269,6 +269,7 @@ class ErrorUtils : public AllStatic {
"ArrayBuffer subclass returned this from species constructor") \
T(ArrayFunctionsOnFrozen, "Cannot modify frozen array elements") \
T(ArrayFunctionsOnSealed, "Cannot add/remove sealed array elements") \
+ T(AtomicsWaitNotAllowed, "Atomics.wait cannot be called in this context") \
T(CalledNonCallable, "% is not a function") \
T(CalledOnNonObject, "% called on non-object") \
T(CalledOnNullOrUndefined, "% called on null or undefined") \
@@ -295,7 +296,7 @@ class ErrorUtils : public AllStatic {
T(DebuggerFrame, "Debugger: Invalid frame index.") \
T(DebuggerType, "Debugger: Parameters have wrong types.") \
T(DeclarationMissingInitializer, "Missing initializer in % declaration") \
- T(DefineDisallowed, "Cannot define property:%, object is not extensible.") \
+ T(DefineDisallowed, "Cannot define property %, object is not extensible") \
T(DetachedOperation, "Cannot perform % on a detached ArrayBuffer") \
T(DuplicateTemplateProperty, "Object template has duplicate property '%'") \
T(ExtendsValueNotConstructor, \
@@ -307,6 +308,7 @@ class ErrorUtils : public AllStatic {
T(IllegalInvocation, "Illegal invocation") \
T(ImmutablePrototypeSet, \
"Immutable prototype object '%' cannot have their prototype set") \
+ T(ImportCallNotNewExpression, "Cannot use new with import") \
T(IncompatibleMethodReceiver, "Method % called on incompatible receiver %") \
T(InstanceofNonobjectProto, \
"Function has non-object prototype '%' in instanceof check") \
@@ -314,7 +316,6 @@ class ErrorUtils : public AllStatic {
T(InvalidInOperatorUse, "Cannot use 'in' operator to search for '%' in %") \
T(InvalidRegExpExecResult, \
"RegExp exec method returned something other than an Object or null") \
- T(InvalidSimdOperation, "% is not a valid type for this SIMD operation.") \
T(IteratorResultNotAnObject, "Iterator result % is not an object") \
T(IteratorValueNotAnObject, "Iterator value % is not an entry object") \
T(LanguageID, "Language ID should be string or object.") \
@@ -350,7 +351,7 @@ class ErrorUtils : public AllStatic {
T(ObjectGetterExpectingFunction, \
"Object.prototype.__defineGetter__: Expecting function") \
T(ObjectGetterCallable, "Getter must be a function: %") \
- T(ObjectNotExtensible, "Can't add property %, object is not extensible") \
+ T(ObjectNotExtensible, "Cannot add property %, object is not extensible") \
T(ObjectSetterExpectingFunction, \
"Object.prototype.__defineSetter__: Expecting function") \
T(ObjectSetterCallable, "Setter must be a function: %") \
@@ -474,10 +475,11 @@ class ErrorUtils : public AllStatic {
T(StrictCannotCreateProperty, "Cannot create property '%' on % '%'") \
T(SymbolIteratorInvalid, \
"Result of the Symbol.iterator method is not an object") \
+ T(SymbolAsyncIteratorInvalid, \
+ "Result of the Symbol.asyncIterator method is not an object") \
T(SymbolKeyFor, "% is not a symbol") \
T(SymbolToNumber, "Cannot convert a Symbol value to a number") \
T(SymbolToString, "Cannot convert a Symbol value to a string") \
- T(SimdToNumber, "Cannot convert a SIMD value to a number") \
T(ThrowMethodMissing, "The iterator does not provide a 'throw' method.") \
T(UndefinedOrNullToObject, "Cannot convert undefined or null to object") \
T(ValueAndAccessor, \
@@ -506,8 +508,7 @@ class ErrorUtils : public AllStatic {
T(InvalidDataViewAccessorOffset, \
"Offset is outside the bounds of the DataView") \
T(InvalidDataViewLength, "Invalid DataView length %") \
- T(InvalidDataViewOffset, \
- "Start offset % is outside the bounds of the buffer") \
+ T(InvalidOffset, "Start offset % is outside the bounds of the buffer") \
T(InvalidHint, "Invalid hint: %") \
T(InvalidLanguageTag, "Invalid language tag: %") \
T(InvalidWeakMapKey, "Invalid value used as weak map key") \
@@ -517,8 +518,6 @@ class ErrorUtils : public AllStatic {
T(InvalidTypedArrayAlignment, "% of % should be a multiple of %") \
T(InvalidTypedArrayIndex, "Invalid typed array index") \
T(InvalidTypedArrayLength, "Invalid typed array length") \
- T(InvalidSimdIndex, "Index out of bounds for SIMD operation") \
- T(InvalidSimdLaneValue, "Lane value out of bounds for SIMD operation") \
T(LetInLexicalBinding, "let is disallowed as a lexically bound name") \
T(LocaleMatcher, "Illegal value for localeMatcher:%") \
T(NormalizationForm, "The normalization form should be one of %.") \
@@ -589,11 +588,14 @@ class ErrorUtils : public AllStatic {
T(PushPastSafeLength, \
"Pushing % elements on an array-like of length % " \
"is disallowed, as the total surpasses 2**53-1") \
- T(ElementAfterRest, "Rest element must be last element in array") \
+ T(ElementAfterRest, "Rest element must be last element") \
T(BadSetterRestParameter, \
"Setter function argument must not be a rest parameter") \
T(ParamDupe, "Duplicate parameter name not allowed in this context") \
T(ParenthesisInArgString, "Function arg string contains parenthesis") \
+ T(ArgStringTerminatesParametersEarly, \
+ "Arg string terminates parameters early") \
+ T(UnexpectedEndOfArgString, "Unexpected end of arg string") \
T(RuntimeWrongNumArgs, "Runtime function given wrong number of arguments") \
T(SingleFunctionLiteral, "Single function literal required") \
T(SloppyFunction, \
@@ -641,6 +643,8 @@ class ErrorUtils : public AllStatic {
T(UnexpectedTokenNumber, "Unexpected number") \
T(UnexpectedTokenString, "Unexpected string") \
T(UnexpectedTokenRegExp, "Unexpected regular expression") \
+ T(UnexpectedLexicalDeclaration, \
+ "Lexical declaration cannot appear in a single-statement context") \
T(UnknownLabel, "Undefined label '%'") \
T(UnresolvableExport, \
"The requested module does not provide an export named '%'") \
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index 963ed4acc9..b06ec84b9b 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -41,7 +41,7 @@
#include "src/assembler.h"
#include "src/debug/debug.h"
-
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -137,6 +137,17 @@ int RelocInfo::target_address_size() {
return Assembler::kSpecialTargetSize;
}
+Address Assembler::target_address_at(Address pc, Code* code) {
+ Address constant_pool = code ? code->constant_pool() : NULL;
+ return target_address_at(pc, constant_pool);
+}
+
+void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
+ Address target,
+ ICacheFlushMode icache_flush_mode) {
+ Address constant_pool = code ? code->constant_pool() : NULL;
+ set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
+}
Address Assembler::target_address_from_return_address(Address pc) {
return pc - kCallTargetAddressOffset;
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index dec4c18889..9b259bbf83 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -155,6 +155,7 @@ int ToNumber(Register reg);
Register ToRegister(int num);
static const bool kSimpleFPAliasing = true;
+static const bool kSimdMaskRegisters = false;
// Coprocessor register.
struct FPURegister {
@@ -472,17 +473,10 @@ class Assembler : public AssemblerBase {
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
set_target_address_at(isolate, pc, target, icache_flush_mode);
}
- INLINE(static Address target_address_at(Address pc, Code* code)) {
- Address constant_pool = code ? code->constant_pool() : NULL;
- return target_address_at(pc, constant_pool);
- }
+ INLINE(static Address target_address_at(Address pc, Code* code));
INLINE(static void set_target_address_at(
Isolate* isolate, Address pc, Code* code, Address target,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
- Address constant_pool = code ? code->constant_pool() : NULL;
- set_target_address_at(isolate, pc, constant_pool, target,
- icache_flush_mode);
- }
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index 04a0b9f012..e29da4cb70 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -235,8 +235,6 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
__ Branch(slow, greater, t4, Operand(FIRST_JS_RECEIVER_TYPE));
// Call runtime on identical symbols since we need to throw a TypeError.
__ Branch(slow, eq, t4, Operand(SYMBOL_TYPE));
- // Call runtime on identical SIMD values since we must throw a TypeError.
- __ Branch(slow, eq, t4, Operand(SIMD128_VALUE_TYPE));
} else {
__ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
// Comparing JS objects with <=, >= is complicated.
@@ -244,8 +242,6 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
__ Branch(slow, greater, t4, Operand(FIRST_JS_RECEIVER_TYPE));
// Call runtime on identical symbols since we need to throw a TypeError.
__ Branch(slow, eq, t4, Operand(SYMBOL_TYPE));
- // Call runtime on identical SIMD values since we must throw a TypeError.
- __ Branch(slow, eq, t4, Operand(SIMD128_VALUE_TYPE));
// Normally here we fall through to return_equal, but undefined is
// special: (undefined == undefined) == true, but
// (undefined <= undefined) == false! See ECMAScript 11.8.5.
@@ -1146,9 +1142,9 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// We build an EntryFrame.
__ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used.
- int marker = type();
- __ li(t2, Operand(Smi::FromInt(marker)));
- __ li(t1, Operand(Smi::FromInt(marker)));
+ StackFrame::Type marker = type();
+ __ li(t2, Operand(StackFrame::TypeToMarker(marker)));
+ __ li(t1, Operand(StackFrame::TypeToMarker(marker)));
__ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
isolate)));
__ lw(t0, MemOperand(t0));
@@ -1179,12 +1175,12 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ lw(t2, MemOperand(t1));
__ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
__ sw(fp, MemOperand(t1));
- __ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+ __ li(t0, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
Label cont;
__ b(&cont);
__ nop(); // Branch delay slot nop.
__ bind(&non_outermost_js);
- __ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
+ __ li(t0, Operand(StackFrame::INNER_JSENTRY_FRAME));
__ bind(&cont);
__ push(t0);
@@ -1251,10 +1247,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Check if the current stack frame is marked as the outermost JS frame.
Label non_outermost_js_2;
__ pop(t1);
- __ Branch(&non_outermost_js_2,
- ne,
- t1,
- Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+ __ Branch(&non_outermost_js_2, ne, t1,
+ Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ li(t1, Operand(ExternalReference(js_entry_sp)));
__ sw(zero_reg, MemOperand(t1));
__ bind(&non_outermost_js_2);
@@ -1277,50 +1271,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Jump(ra);
}
-
-void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
- // Return address is in ra.
- Label miss;
-
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register index = LoadDescriptor::NameRegister();
- Register scratch = t1;
- Register result = v0;
- DCHECK(!scratch.is(receiver) && !scratch.is(index));
- DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()));
-
- StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
- &miss, // When not a string.
- &miss, // When not a number.
- &miss, // When index out of range.
- RECEIVER_IS_STRING);
- char_at_generator.GenerateFast(masm);
- __ Ret();
-
- StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
-
- __ bind(&miss);
- PropertyAccessCompiler::TailCallBuiltin(
- masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
-}
-
-
-void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
- Label miss;
- Register receiver = LoadDescriptor::ReceiverRegister();
- // Ensure that the vector and slot registers won't be clobbered before
- // calling the miss handler.
- DCHECK(!AreAliased(t0, t1, LoadWithVectorDescriptor::VectorRegister(),
- LoadWithVectorDescriptor::SlotRegister()));
-
- NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, t0,
- t1, &miss);
- __ bind(&miss);
- PropertyAccessCompiler::TailCallBuiltin(
- masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
-}
-
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
@@ -1422,7 +1372,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// (6) External string. Make it, offset-wise, look like a sequential string.
// Go to (4).
// (7) Short external string or not a string? If yes, bail out to runtime.
- // (8) Sliced string. Replace subject with parent. Go to (1).
+ // (8) Sliced or thin string. Replace subject with parent. Go to (1).
Label seq_string /* 4 */, external_string /* 6 */, check_underlying /* 1 */,
not_seq_nor_cons /* 5 */, not_long_external /* 7 */;
@@ -1443,6 +1393,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// (2) Sequential or cons? If not, go to (5).
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+ STATIC_ASSERT(kThinStringTag > kExternalStringTag);
STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
// Go to (5).
@@ -1469,12 +1420,12 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Branch(&runtime, ls, a3, Operand(a1));
__ sra(a1, a1, kSmiTagSize); // Untag the Smi.
- STATIC_ASSERT(kStringEncodingMask == 4);
- STATIC_ASSERT(kOneByteStringTag == 4);
+ STATIC_ASSERT(kStringEncodingMask == 8);
+ STATIC_ASSERT(kOneByteStringTag == 8);
STATIC_ASSERT(kTwoByteStringTag == 0);
__ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for one-byte.
__ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset));
- __ sra(a3, a0, 2); // a3 is 1 for ASCII, 0 for UC16 (used below).
+ __ sra(a3, a0, 3); // a3 is 1 for ASCII, 0 for UC16 (used below).
__ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
__ Movz(t9, t1, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
@@ -1719,12 +1670,18 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
__ Branch(&runtime, ne, at, Operand(zero_reg));
- // (8) Sliced string. Replace subject with parent. Go to (4).
+ // (8) Sliced or thin string. Replace subject with parent. Go to (4).
+ Label thin_string;
+ __ Branch(&thin_string, eq, a1, Operand(kThinStringTag));
// Load offset into t0 and replace subject string with parent.
__ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
__ sra(t0, t0, kSmiTagSize);
__ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
__ jmp(&check_underlying); // Go to (4).
+
+ __ bind(&thin_string);
+ __ lw(subject, FieldMemOperand(subject, ThinString::kActualOffset));
+ __ jmp(&check_underlying); // Go to (4).
#endif // V8_INTERPRETED_REGEXP
}
@@ -1886,187 +1843,6 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
-// Note: feedback_vector and slot are clobbered after the call.
-static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
- Register slot) {
- __ Lsa(at, feedback_vector, slot, kPointerSizeLog2 - kSmiTagSize);
- __ lw(slot, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
- __ Addu(slot, slot, Operand(Smi::FromInt(1)));
- __ sw(slot, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
-}
-
-void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
- // a0 - number of arguments
- // a1 - function
- // a3 - slot id
- // a2 - vector
- // t0 - loaded from vector[slot]
- __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, at);
- __ Branch(miss, ne, a1, Operand(at));
-
- // Increment the call count for monomorphic function calls.
- IncrementCallCount(masm, a2, a3);
-
- __ mov(a2, t0);
- __ mov(a3, a1);
- ArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
-}
-
-
-void CallICStub::Generate(MacroAssembler* masm) {
- // a0 - number of arguments
- // a1 - function
- // a3 - slot id (Smi)
- // a2 - vector
- Label extra_checks_or_miss, call, call_function, call_count_incremented;
-
- // The checks. First, does r1 match the recorded monomorphic target?
- __ Lsa(t0, a2, a3, kPointerSizeLog2 - kSmiTagSize);
- __ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize));
-
- // We don't know that we have a weak cell. We might have a private symbol
- // or an AllocationSite, but the memory is safe to examine.
- // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
- // FixedArray.
- // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
- // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
- // computed, meaning that it can't appear to be a pointer. If the low bit is
- // 0, then hash is computed, but the 0 bit prevents the field from appearing
- // to be a pointer.
- STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
- STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
- WeakCell::kValueOffset &&
- WeakCell::kValueOffset == Symbol::kHashFieldSlot);
-
- __ lw(t1, FieldMemOperand(t0, WeakCell::kValueOffset));
- __ Branch(&extra_checks_or_miss, ne, a1, Operand(t1));
-
- // The compare above could have been a SMI/SMI comparison. Guard against this
- // convincing us that we have a monomorphic JSFunction.
- __ JumpIfSmi(a1, &extra_checks_or_miss);
-
- __ bind(&call_function);
-
- // Increment the call count for monomorphic function calls.
- IncrementCallCount(masm, a2, a3);
-
- __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
- tail_call_mode()),
- RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg));
-
- __ bind(&extra_checks_or_miss);
- Label uninitialized, miss, not_allocation_site;
-
- __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
- __ Branch(&call, eq, t0, Operand(at));
-
- // Verify that t0 contains an AllocationSite
- __ lw(t1, FieldMemOperand(t0, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
- __ Branch(&not_allocation_site, ne, t1, Operand(at));
-
- HandleArrayCase(masm, &miss);
-
- __ bind(&not_allocation_site);
-
- // The following cases attempt to handle MISS cases without going to the
- // runtime.
- if (FLAG_trace_ic) {
- __ Branch(&miss);
- }
-
- __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
- __ Branch(&uninitialized, eq, t0, Operand(at));
-
- // We are going megamorphic. If the feedback is a JSFunction, it is fine
- // to handle it here. More complex cases are dealt with in the runtime.
- __ AssertNotSmi(t0);
- __ GetObjectType(t0, t1, t1);
- __ Branch(&miss, ne, t1, Operand(JS_FUNCTION_TYPE));
- __ Lsa(t0, a2, a3, kPointerSizeLog2 - kSmiTagSize);
- __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
- __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
-
- __ bind(&call);
- IncrementCallCount(masm, a2, a3);
-
- __ bind(&call_count_incremented);
-
- __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
- RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg));
-
- __ bind(&uninitialized);
-
- // We are going monomorphic, provided we actually have a JSFunction.
- __ JumpIfSmi(a1, &miss);
-
- // Goto miss case if we do not have a function.
- __ GetObjectType(a1, t0, t0);
- __ Branch(&miss, ne, t0, Operand(JS_FUNCTION_TYPE));
-
- // Make sure the function is not the Array() function, which requires special
- // behavior on MISS.
- __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, t0);
- __ Branch(&miss, eq, a1, Operand(t0));
-
- // Make sure the function belongs to the same native context.
- __ lw(t0, FieldMemOperand(a1, JSFunction::kContextOffset));
- __ lw(t0, ContextMemOperand(t0, Context::NATIVE_CONTEXT_INDEX));
- __ lw(t1, NativeContextMemOperand());
- __ Branch(&miss, ne, t0, Operand(t1));
-
- // Store the function. Use a stub since we need a frame for allocation.
- // a2 - vector
- // a3 - slot
- // a1 - function
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- CreateWeakCellStub create_stub(masm->isolate());
- __ SmiTag(a0);
- __ Push(a0);
- __ Push(a2, a3);
- __ Push(cp, a1);
- __ CallStub(&create_stub);
- __ Pop(cp, a1);
- __ Pop(a2, a3);
- __ Pop(a0);
- __ SmiUntag(a0);
- }
-
- __ Branch(&call_function);
-
- // We are here because tracing is on or we encountered a MISS case we can't
- // handle here.
- __ bind(&miss);
- GenerateMiss(masm);
-
- __ Branch(&call_count_incremented);
-}
-
-
-void CallICStub::GenerateMiss(MacroAssembler* masm) {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve the number of arguments as Smi.
- __ SmiTag(a0);
- __ Push(a0);
-
- // Push the receiver and the function and feedback info.
- __ Push(a1, a2, a3);
-
- // Call the entry.
- __ CallRuntime(Runtime::kCallIC_Miss);
-
- // Move result to a1 and exit the internal frame.
- __ mov(a1, v0);
-
- // Restore number of arguments.
- __ Pop(a0);
- __ SmiUntag(a0);
-}
-
-
// StringCharCodeAtGenerator.
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
DCHECK(!t0.is(index_));
@@ -2164,51 +1940,6 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
}
-
-// -------------------------------------------------------------------------
-// StringCharFromCodeGenerator
-
-void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
- // Fast case of Heap::LookupSingleCharacterStringFromCode.
-
- DCHECK(!t0.is(result_));
- DCHECK(!t0.is(code_));
-
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiShiftSize == 0);
- DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU + 1));
- __ And(t0, code_, Operand(kSmiTagMask |
- ((~String::kMaxOneByteCharCodeU) << kSmiTagSize)));
- __ Branch(&slow_case_, ne, t0, Operand(zero_reg));
-
- __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
- // At this point code register contains smi tagged one-byte char code.
- STATIC_ASSERT(kSmiTag == 0);
- __ Lsa(result_, result_, code_, kPointerSizeLog2 - kSmiTagSize);
- __ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
- __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
- __ Branch(&slow_case_, eq, result_, Operand(t0));
- __ bind(&exit_);
-}
-
-
-void StringCharFromCodeGenerator::GenerateSlow(
- MacroAssembler* masm,
- const RuntimeCallHelper& call_helper) {
- __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
-
- __ bind(&slow_case_);
- call_helper.BeforeCall(masm);
- __ push(code_);
- __ CallRuntime(Runtime::kStringCharFromCode);
- __ Move(result_, v0);
-
- call_helper.AfterCall(masm);
- __ Branch(&exit_);
-
- __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
-}
-
void StringHelper::GenerateFlatOneByteStringEquals(
MacroAssembler* masm, Register left, Register right, Register scratch1,
Register scratch2, Register scratch3) {
@@ -3127,12 +2858,6 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
__ Addu(sp, sp, a1);
}
-void CallICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadFeedbackVector(a2);
- CallICStub stub(isolate(), state());
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
-}
-
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
@@ -3487,499 +3212,6 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
GenerateCase(masm, FAST_ELEMENTS);
}
-void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a1 : function
- // -- cp : context
- // -- fp : frame pointer
- // -- ra : return address
- // -----------------------------------
- __ AssertFunction(a1);
-
- // Make a2 point to the JavaScript frame.
- __ mov(a2, fp);
- if (skip_stub_frame()) {
- // For Ignition we need to skip the handler/stub frame to reach the
- // JavaScript frame for the function.
- __ lw(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
- }
- if (FLAG_debug_code) {
- Label ok;
- __ lw(a3, MemOperand(a2, StandardFrameConstants::kFunctionOffset));
- __ Branch(&ok, eq, a1, Operand(a3));
- __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
- __ bind(&ok);
- }
-
- // Check if we have rest parameters (only possible if we have an
- // arguments adaptor frame below the function frame).
- Label no_rest_parameters;
- __ lw(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
- __ lw(a3, MemOperand(a2, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ Branch(&no_rest_parameters, ne, a3,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Check if the arguments adaptor frame contains more arguments than
- // specified by the function's internal formal parameter count.
- Label rest_parameters;
- __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a3,
- FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
- __ Subu(a0, a0, Operand(a3));
- __ Branch(&rest_parameters, gt, a0, Operand(zero_reg));
-
- // Return an empty rest parameter array.
- __ bind(&no_rest_parameters);
- {
- // ----------- S t a t e -------------
- // -- cp : context
- // -- ra : return address
- // -----------------------------------
-
- // Allocate an empty rest parameter array.
- Label allocate, done_allocate;
- __ Allocate(JSArray::kSize, v0, a0, a1, &allocate, NO_ALLOCATION_FLAGS);
- __ bind(&done_allocate);
-
- // Setup the rest parameter array in v0.
- __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, a1);
- __ sw(a1, FieldMemOperand(v0, JSArray::kMapOffset));
- __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
- __ sw(a1, FieldMemOperand(v0, JSArray::kPropertiesOffset));
- __ sw(a1, FieldMemOperand(v0, JSArray::kElementsOffset));
- __ Move(a1, Smi::kZero);
- __ Ret(USE_DELAY_SLOT);
- __ sw(a1, FieldMemOperand(v0, JSArray::kLengthOffset)); // In delay slot
- STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
-
- // Fall back to %AllocateInNewSpace.
- __ bind(&allocate);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(Smi::FromInt(JSArray::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- }
- __ jmp(&done_allocate);
- }
-
- __ bind(&rest_parameters);
- {
- // Compute the pointer to the first rest parameter (skippping the receiver).
- __ Lsa(a2, a2, a0, kPointerSizeLog2 - 1);
- __ Addu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
- 1 * kPointerSize));
-
- // ----------- S t a t e -------------
- // -- cp : context
- // -- a0 : number of rest parameters (tagged)
- // -- a1 : function
- // -- a2 : pointer to first rest parameters
- // -- ra : return address
- // -----------------------------------
-
- // Allocate space for the rest parameter array plus the backing store.
- Label allocate, done_allocate;
- __ li(t0, Operand(JSArray::kSize + FixedArray::kHeaderSize));
- __ Lsa(t0, t0, a0, kPointerSizeLog2 - 1);
- __ Allocate(t0, v0, a3, t1, &allocate, NO_ALLOCATION_FLAGS);
- __ bind(&done_allocate);
-
- // Setup the elements array in v0.
- __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
- __ sw(at, FieldMemOperand(v0, FixedArray::kMapOffset));
- __ sw(a0, FieldMemOperand(v0, FixedArray::kLengthOffset));
- __ Addu(a3, v0, Operand(FixedArray::kHeaderSize));
- {
- Label loop, done_loop;
- __ sll(at, a0, kPointerSizeLog2 - 1);
- __ Addu(a1, a3, at);
- __ bind(&loop);
- __ Branch(&done_loop, eq, a1, Operand(a3));
- __ lw(at, MemOperand(a2, 0 * kPointerSize));
- __ sw(at, FieldMemOperand(a3, 0 * kPointerSize));
- __ Subu(a2, a2, Operand(1 * kPointerSize));
- __ Addu(a3, a3, Operand(1 * kPointerSize));
- __ jmp(&loop);
- __ bind(&done_loop);
- }
-
- // Setup the rest parameter array in a3.
- __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, at);
- __ sw(at, FieldMemOperand(a3, JSArray::kMapOffset));
- __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
- __ sw(at, FieldMemOperand(a3, JSArray::kPropertiesOffset));
- __ sw(v0, FieldMemOperand(a3, JSArray::kElementsOffset));
- __ sw(a0, FieldMemOperand(a3, JSArray::kLengthOffset));
- STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a3); // In delay slot
-
- // Fall back to %AllocateInNewSpace (if not too big).
- Label too_big_for_new_space;
- __ bind(&allocate);
- __ Branch(&too_big_for_new_space, gt, t0,
- Operand(kMaxRegularHeapObjectSize));
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(t0);
- __ Push(a0, a2, t0);
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- __ Pop(a0, a2);
- }
- __ jmp(&done_allocate);
-
- // Fall back to %NewStrictArguments.
- __ bind(&too_big_for_new_space);
- __ Push(a1);
- __ TailCallRuntime(Runtime::kNewStrictArguments);
- }
-}
-
-
-void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a1 : function
- // -- cp : context
- // -- fp : frame pointer
- // -- ra : return address
- // -----------------------------------
- __ AssertFunction(a1);
-
- // Make t0 point to the JavaScript frame.
- __ mov(t0, fp);
- if (skip_stub_frame()) {
- // For Ignition we need to skip the handler/stub frame to reach the
- // JavaScript frame for the function.
- __ lw(t0, MemOperand(t0, StandardFrameConstants::kCallerFPOffset));
- }
- if (FLAG_debug_code) {
- Label ok;
- __ lw(a3, MemOperand(t0, StandardFrameConstants::kFunctionOffset));
- __ Branch(&ok, eq, a1, Operand(a3));
- __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
- __ bind(&ok);
- }
-
- // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
- __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a2,
- FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
- __ Lsa(a3, t0, a2, kPointerSizeLog2 - 1);
- __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // a1 : function
- // a2 : number of parameters (tagged)
- // a3 : parameters pointer
- // t0 : Javascript frame pointer
- // Registers used over whole function:
- // t1 : arguments count (tagged)
- // t2 : mapped parameter count (tagged)
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ lw(t0, MemOperand(t0, StandardFrameConstants::kCallerFPOffset));
- __ lw(a0, MemOperand(t0, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ Branch(&adaptor_frame, eq, a0,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // No adaptor, parameter count = argument count.
- __ mov(t1, a2);
- __ Branch(USE_DELAY_SLOT, &try_allocate);
- __ mov(t2, a2); // In delay slot.
-
- // We have an adaptor frame. Patch the parameters pointer.
- __ bind(&adaptor_frame);
- __ lw(t1, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ Lsa(t0, t0, t1, 1);
- __ Addu(a3, t0, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // t1 = argument count (tagged)
- // t2 = parameter count (tagged)
- // Compute the mapped parameter count = min(t2, t1) in t2.
- __ mov(t2, a2);
- __ Branch(&try_allocate, le, t2, Operand(t1));
- __ mov(t2, t1);
-
- __ bind(&try_allocate);
-
- // Compute the sizes of backing store, parameter map, and arguments object.
- // 1. Parameter map, has 2 extra words containing context and backing store.
- const int kParameterMapHeaderSize =
- FixedArray::kHeaderSize + 2 * kPointerSize;
- // If there are no mapped parameters, we do not need the parameter_map.
- Label param_map_size;
- DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
- __ Branch(USE_DELAY_SLOT, &param_map_size, eq, t2, Operand(zero_reg));
- __ mov(t5, zero_reg); // In delay slot: param map size = 0 when t2 == 0.
- __ sll(t5, t2, 1);
- __ addiu(t5, t5, kParameterMapHeaderSize);
- __ bind(&param_map_size);
-
- // 2. Backing store.
- __ Lsa(t5, t5, t1, 1);
- __ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
-
- // 3. Arguments object.
- __ Addu(t5, t5, Operand(JSSloppyArgumentsObject::kSize));
-
- // Do the allocation of all three objects in one go.
- __ Allocate(t5, v0, t5, t0, &runtime, NO_ALLOCATION_FLAGS);
-
- // v0 = address of new object(s) (tagged)
- // a2 = argument count (smi-tagged)
- // Get the arguments boilerplate from the current native context into t0.
- const int kNormalOffset =
- Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
- const int kAliasedOffset =
- Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
-
- __ lw(t0, NativeContextMemOperand());
- Label skip2_ne, skip2_eq;
- __ Branch(&skip2_ne, ne, t2, Operand(zero_reg));
- __ lw(t0, MemOperand(t0, kNormalOffset));
- __ bind(&skip2_ne);
-
- __ Branch(&skip2_eq, eq, t2, Operand(zero_reg));
- __ lw(t0, MemOperand(t0, kAliasedOffset));
- __ bind(&skip2_eq);
-
- // v0 = address of new object (tagged)
- // a2 = argument count (smi-tagged)
- // t0 = address of arguments map (tagged)
- // t2 = mapped parameter count (tagged)
- __ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset));
- __ LoadRoot(t5, Heap::kEmptyFixedArrayRootIndex);
- __ sw(t5, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ sw(t5, FieldMemOperand(v0, JSObject::kElementsOffset));
-
- // Set up the callee in-object property.
- __ AssertNotSmi(a1);
- __ sw(a1, FieldMemOperand(v0, JSSloppyArgumentsObject::kCalleeOffset));
-
- // Use the length (smi tagged) and set that as an in-object property too.
- __ AssertSmi(t1);
- __ sw(t1, FieldMemOperand(v0, JSSloppyArgumentsObject::kLengthOffset));
-
- // Set up the elements pointer in the allocated arguments object.
- // If we allocated a parameter map, t0 will point there, otherwise
- // it will point to the backing store.
- __ Addu(t0, v0, Operand(JSSloppyArgumentsObject::kSize));
- __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
-
- // v0 = address of new object (tagged)
- // a2 = argument count (tagged)
- // t0 = address of parameter map or backing store (tagged)
- // t2 = mapped parameter count (tagged)
- // Initialize parameter map. If there are no mapped arguments, we're done.
- Label skip_parameter_map;
- Label skip3;
- __ Branch(&skip3, ne, t2, Operand(Smi::kZero));
- // Move backing store address to a1, because it is
- // expected there when filling in the unmapped arguments.
- __ mov(a1, t0);
- __ bind(&skip3);
-
- __ Branch(&skip_parameter_map, eq, t2, Operand(Smi::kZero));
-
- __ LoadRoot(t1, Heap::kSloppyArgumentsElementsMapRootIndex);
- __ sw(t1, FieldMemOperand(t0, FixedArray::kMapOffset));
- __ Addu(t1, t2, Operand(Smi::FromInt(2)));
- __ sw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset));
- __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
- __ Lsa(t1, t0, t2, 1);
- __ Addu(t1, t1, Operand(kParameterMapHeaderSize));
- __ sw(t1, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
-
- // Copy the parameter slots and the holes in the arguments.
- // We need to fill in mapped_parameter_count slots. They index the context,
- // where parameters are stored in reverse order, at
- // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
- // The mapped parameter thus need to get indices
- // MIN_CONTEXT_SLOTS+parameter_count-1 ..
- // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
- // We loop from right to left.
- Label parameters_loop, parameters_test;
- __ mov(t1, t2);
- __ Addu(t5, a2, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
- __ Subu(t5, t5, Operand(t2));
- __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
- __ Lsa(a1, t0, t1, 1);
- __ Addu(a1, a1, Operand(kParameterMapHeaderSize));
-
- // a1 = address of backing store (tagged)
- // t0 = address of parameter map (tagged)
- // a0 = temporary scratch (a.o., for address calculation)
- // t1 = loop variable (tagged)
- // t3 = the hole value
- __ jmp(&parameters_test);
-
- __ bind(&parameters_loop);
- __ Subu(t1, t1, Operand(Smi::FromInt(1)));
- __ sll(a0, t1, 1);
- __ Addu(a0, a0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
- __ Addu(t6, t0, a0);
- __ sw(t5, MemOperand(t6));
- __ Subu(a0, a0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
- __ Addu(t6, a1, a0);
- __ sw(t3, MemOperand(t6));
- __ Addu(t5, t5, Operand(Smi::FromInt(1)));
- __ bind(&parameters_test);
- __ Branch(&parameters_loop, ne, t1, Operand(Smi::kZero));
-
- // t1 = argument count (tagged).
- __ lw(t1, FieldMemOperand(v0, JSSloppyArgumentsObject::kLengthOffset));
-
- __ bind(&skip_parameter_map);
- // v0 = address of new object (tagged)
- // a1 = address of backing store (tagged)
- // t1 = argument count (tagged)
- // t2 = mapped parameter count (tagged)
- // t5 = scratch
- // Copy arguments header and remaining slots (if there are any).
- __ LoadRoot(t5, Heap::kFixedArrayMapRootIndex);
- __ sw(t5, FieldMemOperand(a1, FixedArray::kMapOffset));
- __ sw(t1, FieldMemOperand(a1, FixedArray::kLengthOffset));
-
- Label arguments_loop, arguments_test;
- __ sll(t6, t2, 1);
- __ Subu(a3, a3, Operand(t6));
- __ jmp(&arguments_test);
-
- __ bind(&arguments_loop);
- __ Subu(a3, a3, Operand(kPointerSize));
- __ lw(t0, MemOperand(a3, 0));
- __ Lsa(t5, a1, t2, 1);
- __ sw(t0, FieldMemOperand(t5, FixedArray::kHeaderSize));
- __ Addu(t2, t2, Operand(Smi::FromInt(1)));
-
- __ bind(&arguments_test);
- __ Branch(&arguments_loop, lt, t2, Operand(t1));
-
- // Return.
- __ Ret();
-
- // Do the runtime call to allocate the arguments object.
- // t1 = argument count (tagged)
- __ bind(&runtime);
- __ Push(a1, a3, t1);
- __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
-void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a1 : function
- // -- cp : context
- // -- fp : frame pointer
- // -- ra : return address
- // -----------------------------------
- __ AssertFunction(a1);
-
- // Make a2 point to the JavaScript frame.
- __ mov(a2, fp);
- if (skip_stub_frame()) {
- // For Ignition we need to skip the handler/stub frame to reach the
- // JavaScript frame for the function.
- __ lw(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
- }
- if (FLAG_debug_code) {
- Label ok;
- __ lw(a3, MemOperand(a2, StandardFrameConstants::kFunctionOffset));
- __ Branch(&ok, eq, a1, Operand(a3));
- __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
- __ bind(&ok);
- }
-
- // Check if we have an arguments adaptor frame below the function frame.
- Label arguments_adaptor, arguments_done;
- __ lw(a3, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
- __ lw(a0, MemOperand(a3, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ Branch(&arguments_adaptor, eq, a0,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- {
- __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a0,
- FieldMemOperand(t0, SharedFunctionInfo::kFormalParameterCountOffset));
- __ Lsa(a2, a2, a0, kPointerSizeLog2 - 1);
- __ Addu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
- 1 * kPointerSize));
- }
- __ Branch(&arguments_done);
- __ bind(&arguments_adaptor);
- {
- __ lw(a0, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ Lsa(a2, a3, a0, kPointerSizeLog2 - 1);
- __ Addu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
- 1 * kPointerSize));
- }
- __ bind(&arguments_done);
-
- // ----------- S t a t e -------------
- // -- cp : context
- // -- a0 : number of rest parameters (tagged)
- // -- a1 : function
- // -- a2 : pointer to first rest parameters
- // -- ra : return address
- // -----------------------------------
-
- // Allocate space for the strict arguments object plus the backing store.
- Label allocate, done_allocate;
- __ li(t0, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
- __ Lsa(t0, t0, a0, kPointerSizeLog2 - 1);
- __ Allocate(t0, v0, a3, t1, &allocate, NO_ALLOCATION_FLAGS);
- __ bind(&done_allocate);
-
- // Setup the elements array in v0.
- __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
- __ sw(at, FieldMemOperand(v0, FixedArray::kMapOffset));
- __ sw(a0, FieldMemOperand(v0, FixedArray::kLengthOffset));
- __ Addu(a3, v0, Operand(FixedArray::kHeaderSize));
- {
- Label loop, done_loop;
- __ sll(at, a0, kPointerSizeLog2 - 1);
- __ Addu(a1, a3, at);
- __ bind(&loop);
- __ Branch(&done_loop, eq, a1, Operand(a3));
- __ lw(at, MemOperand(a2, 0 * kPointerSize));
- __ sw(at, FieldMemOperand(a3, 0 * kPointerSize));
- __ Subu(a2, a2, Operand(1 * kPointerSize));
- __ Addu(a3, a3, Operand(1 * kPointerSize));
- __ Branch(&loop);
- __ bind(&done_loop);
- }
-
- // Setup the strict arguments object in a3.
- __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, at);
- __ sw(at, FieldMemOperand(a3, JSStrictArgumentsObject::kMapOffset));
- __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
- __ sw(at, FieldMemOperand(a3, JSStrictArgumentsObject::kPropertiesOffset));
- __ sw(v0, FieldMemOperand(a3, JSStrictArgumentsObject::kElementsOffset));
- __ sw(a0, FieldMemOperand(a3, JSStrictArgumentsObject::kLengthOffset));
- STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a3); // In delay slot
-
- // Fall back to %AllocateInNewSpace (if not too big).
- Label too_big_for_new_space;
- __ bind(&allocate);
- __ Branch(&too_big_for_new_space, gt, t0, Operand(kMaxRegularHeapObjectSize));
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(t0);
- __ Push(a0, a2, t0);
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- __ Pop(a0, a2);
- }
- __ jmp(&done_allocate);
-
- // Fall back to %NewStrictArguments.
- __ bind(&too_big_for_new_space);
- __ Push(a1);
- __ TailCallRuntime(Runtime::kNewStrictArguments);
-}
-
-
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
return ref0.address() - ref1.address();
}
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index a57299abf6..beab163ace 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -610,6 +610,9 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
Register index,
Register result,
Label* call_runtime) {
+ Label indirect_string_loaded;
+ __ bind(&indirect_string_loaded);
+
// Fetch the instance type of the receiver into result register.
__ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
__ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
@@ -620,18 +623,23 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ Branch(&check_sequential, eq, at, Operand(zero_reg));
// Dispatch on the indirect string shape: slice or cons.
- Label cons_string;
- __ And(at, result, Operand(kSlicedNotConsMask));
- __ Branch(&cons_string, eq, at, Operand(zero_reg));
+ Label cons_string, thin_string;
+ __ And(at, result, Operand(kStringRepresentationMask));
+ __ Branch(&cons_string, eq, at, Operand(kConsStringTag));
+ __ Branch(&thin_string, eq, at, Operand(kThinStringTag));
// Handle slices.
- Label indirect_string_loaded;
__ lw(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
__ lw(string, FieldMemOperand(string, SlicedString::kParentOffset));
__ sra(at, result, kSmiTagSize);
__ Addu(index, index, at);
__ jmp(&indirect_string_loaded);
+ // Handle thin strings.
+ __ bind(&thin_string);
+ __ lw(string, FieldMemOperand(string, ThinString::kActualOffset));
+ __ jmp(&indirect_string_loaded);
+
// Handle cons strings.
// Check whether the right hand side is the empty string (i.e. if
// this is really a flat string in a cons string). If that is not
@@ -643,10 +651,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ Branch(call_runtime, ne, result, Operand(at));
// Get the first of the two strings and load its instance type.
__ lw(string, FieldMemOperand(string, ConsString::kFirstOffset));
-
- __ bind(&indirect_string_loaded);
- __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
- __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+ __ jmp(&indirect_string_loaded);
// Distinguish sequential and external strings. Only these two string
// representations can reach here (slices and flat cons strings have been
diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index 478b9dfe30..46b8728170 100644
--- a/deps/v8/src/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -93,7 +93,7 @@ void Deoptimizer::SetPlatformCompiledStubRegisters(
void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
- double double_value = input_->GetDoubleRegister(i);
+ Float64 double_value = input_->GetDoubleRegister(i);
output_frame->SetDoubleRegister(i, double_value);
}
}
diff --git a/deps/v8/src/mips/interface-descriptors-mips.cc b/deps/v8/src/mips/interface-descriptors-mips.cc
index c6233c5993..eb47d1cb27 100644
--- a/deps/v8/src/mips/interface-descriptors-mips.cc
+++ b/deps/v8/src/mips/interface-descriptors-mips.cc
@@ -68,27 +68,6 @@ void FastNewClosureDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void FastNewRestParameterDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a1};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
-void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a1};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
-void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a1};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
@@ -140,15 +119,13 @@ void CallFunctionDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-
-void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
+void CallICTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {a1, a3};
+ Register registers[] = {a1, a0, a3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
+void CallICDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1, a0, a3, a2};
data->InitializePlatformSpecific(arraysize(registers), registers);
@@ -177,6 +154,13 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void CallForwardVarargsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // a1: the target to call
+ // a2: start index (to support rest parameters)
+ Register registers[] = {a1, a2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void ConstructStubDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -212,13 +196,12 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(0, nullptr, nullptr);
}
-#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
- void Allocate##Type##Descriptor::InitializePlatformSpecific( \
- CallInterfaceDescriptorData* data) { \
- data->InitializePlatformSpecific(0, nullptr, nullptr); \
- }
-SIMD128_TYPES(SIMD128_ALLOC_DESC)
-#undef SIMD128_ALLOC_DESC
+void ArrayConstructorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite
+ Register registers[] = {a1, a3, a0, a2};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -408,6 +391,14 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ a1, // loaded new FP
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index c4ff0cb987..a28c04a8e2 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -3885,17 +3885,16 @@ void MacroAssembler::Push(Handle<Object> handle) {
push(at);
}
-
-void MacroAssembler::DebugBreak() {
- PrepareCEntryArgs(0);
- PrepareCEntryFunction(
- ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
- CEntryStub ces(isolate(), 1);
- DCHECK(AllowThisStubCall(&ces));
- Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
+void MacroAssembler::MaybeDropFrames() {
+ // Check whether we need to drop frames to restart a function on the stack.
+ ExternalReference restart_fp =
+ ExternalReference::debug_restart_fp_address(isolate());
+ li(a1, Operand(restart_fp));
+ lw(a1, MemOperand(a1));
+ Jump(isolate()->builtins()->FrameDropperTrampoline(), RelocInfo::CODE_TARGET,
+ ne, a1, Operand(zero_reg));
}
-
// ---------------------------------------------------------------------------
// Exception handling.
@@ -4737,32 +4736,6 @@ void MacroAssembler::GetMapConstructor(Register result, Register map,
bind(&done);
}
-
-void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
- Register scratch, Label* miss) {
- // Get the prototype or initial map from the function.
- lw(result,
- FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
- // If the prototype or initial map is the hole, don't return it and
- // simply miss the cache instead. This will allow us to allocate a
- // prototype object on-demand in the runtime system.
- LoadRoot(t8, Heap::kTheHoleValueRootIndex);
- Branch(miss, eq, result, Operand(t8));
-
- // If the function does not have an initial map, we're done.
- Label done;
- GetObjectType(result, scratch, scratch);
- Branch(&done, ne, scratch, Operand(MAP_TYPE));
-
- // Get the prototype from the initial map.
- lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
-
- // All done.
- bind(&done);
-}
-
-
void MacroAssembler::GetObjectType(Register object,
Register map,
Register type_reg) {
@@ -5311,7 +5284,7 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
}
void MacroAssembler::StubPrologue(StackFrame::Type type) {
- li(at, Operand(Smi::FromInt(type)));
+ li(at, Operand(StackFrame::TypeToMarker(type)));
PushCommonFrame(at);
}
@@ -5342,8 +5315,8 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
lw(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- lw(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
- lw(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
+ lw(vector, FieldMemOperand(vector, JSFunction::kFeedbackVectorOffset));
+ lw(vector, FieldMemOperand(vector, Cell::kValueOffset));
}
@@ -5369,7 +5342,7 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
stack_offset -= kPointerSize;
sw(fp, MemOperand(sp, stack_offset));
stack_offset -= kPointerSize;
- li(t9, Operand(Smi::FromInt(type)));
+ li(t9, Operand(StackFrame::TypeToMarker(type)));
sw(t9, MemOperand(sp, stack_offset));
if (type == StackFrame::INTERNAL) {
DCHECK_EQ(stack_offset, kPointerSize);
@@ -5426,7 +5399,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
addiu(sp, sp, -2 * kPointerSize - ExitFrameConstants::kFixedFrameSizeFromFp);
sw(ra, MemOperand(sp, 4 * kPointerSize));
sw(fp, MemOperand(sp, 3 * kPointerSize));
- li(at, Operand(Smi::FromInt(frame_type)));
+ li(at, Operand(StackFrame::TypeToMarker(frame_type)));
sw(at, MemOperand(sp, 2 * kPointerSize));
// Set up new frame pointer.
addiu(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp);
@@ -5526,21 +5499,6 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
addiu(sp, sp, 8);
}
-
-void MacroAssembler::InitializeNewString(Register string,
- Register length,
- Heap::RootListIndex map_index,
- Register scratch1,
- Register scratch2) {
- sll(scratch1, length, kSmiTagSize);
- LoadRoot(scratch2, map_index);
- sw(scratch1, FieldMemOperand(string, String::kLengthOffset));
- li(scratch1, Operand(String::kEmptyHashField));
- sw(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
- sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
-}
-
-
int MacroAssembler::ActivationFrameAlignment() {
#if V8_HOST_ARCH_MIPS
// Running on the real platform. Use the alignment as mandated by the local
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 6ba9b3f293..5bffd89814 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -1080,12 +1080,9 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* fail);
- // -------------------------------------------------------------------------
- // Debugger Support.
-
- void DebugBreak();
+ // Frame restart support.
+ void MaybeDropFrames();
- // -------------------------------------------------------------------------
// Exception handling.
// Push a new stack handler and link into stack handler chain.
@@ -1109,14 +1106,6 @@ class MacroAssembler: public Assembler {
void GetMapConstructor(Register result, Register map, Register temp,
Register temp2);
- // Try to get function prototype of a function and puts the value in
- // the result register. Checks that the function really is a
- // function and jumps to the miss label if the fast checks fail. The
- // function register will be untouched; the other registers may be
- // clobbered.
- void TryGetFunctionPrototype(Register function, Register result,
- Register scratch, Label* miss);
-
void GetObjectType(Register function,
Register map,
Register type_reg);
@@ -1742,12 +1731,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
InvokeFlag flag,
const CallWrapper& call_wrapper);
- void InitializeNewString(Register string,
- Register length,
- Heap::RootListIndex map_index,
- Register scratch1,
- Register scratch2);
-
// Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
void InNewSpace(Register object, Register scratch,
Condition cond, // ne for new space, eq otherwise.
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index 7ff3d144e7..58191a8bdd 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -1129,9 +1129,16 @@ double Simulator::get_double_from_register_pair(int reg) {
int64_t Simulator::get_fpu_register(int fpureg) const {
- DCHECK(IsFp64Mode());
- DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
- return FPUregisters_[fpureg];
+ if (IsFp64Mode()) {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return FPUregisters_[fpureg];
+ } else {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
+ uint64_t i64;
+ i64 = static_cast<uint32_t>(get_fpu_register_word(fpureg));
+ i64 |= static_cast<uint64_t>(get_fpu_register_word(fpureg + 1)) << 32;
+ return static_cast<int64_t>(i64);
+ }
}
@@ -1688,18 +1695,77 @@ int32_t Simulator::get_pc() const {
// executed in the simulator. Since the host is typically IA32 we will not
// get the correct MIPS-like behaviour on unaligned accesses.
-void Simulator::TraceRegWr(int32_t value) {
+void Simulator::TraceRegWr(int32_t value, TraceType t) {
if (::v8::internal::FLAG_trace_sim) {
- SNPrintF(trace_buf_, "%08x", value);
+ union {
+ int32_t fmt_int32;
+ float fmt_float;
+ } v;
+ v.fmt_int32 = value;
+
+ switch (t) {
+ case WORD:
+ SNPrintF(trace_buf_, "%08" PRIx32 " (%" PRIu64 ") int32:%" PRId32
+ " uint32:%" PRIu32,
+ value, icount_, value, value);
+ break;
+ case FLOAT:
+ SNPrintF(trace_buf_, "%08" PRIx32 " (%" PRIu64 ") flt:%e",
+ v.fmt_int32, icount_, v.fmt_float);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
}
+void Simulator::TraceRegWr(int64_t value, TraceType t) {
+ if (::v8::internal::FLAG_trace_sim) {
+ union {
+ int64_t fmt_int64;
+ double fmt_double;
+ } v;
+ v.fmt_int64 = value;
+
+ switch (t) {
+ case DWORD:
+ SNPrintF(trace_buf_, "%016" PRIx64 " (%" PRIu64 ") int64:%" PRId64
+ " uint64:%" PRIu64,
+ value, icount_, value, value);
+ break;
+ case DOUBLE:
+ SNPrintF(trace_buf_, "%016" PRIx64 " (%" PRIu64 ") dbl:%e",
+ v.fmt_int64, icount_, v.fmt_double);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
// TODO(plind): consider making icount_ printing a flag option.
-void Simulator::TraceMemRd(int32_t addr, int32_t value) {
+void Simulator::TraceMemRd(int32_t addr, int32_t value, TraceType t) {
if (::v8::internal::FLAG_trace_sim) {
- SNPrintF(trace_buf_, "%08x <-- [%08x] (%" PRIu64 ")", value, addr,
- icount_);
+ union {
+ int32_t fmt_int32;
+ float fmt_float;
+ } v;
+ v.fmt_int32 = value;
+
+ switch (t) {
+ case WORD:
+ SNPrintF(trace_buf_, "%08" PRIx32 " <-- [%08" PRIx32 "] (%" PRIu64
+ ") int32:%" PRId32 " uint32:%" PRIu32,
+ value, addr, icount_, value, value);
+ break;
+ case FLOAT:
+ SNPrintF(trace_buf_,
+ "%08" PRIx32 " <-- [%08" PRIx32 "] (%" PRIu64 ") flt:%e",
+ v.fmt_int32, addr, icount_, v.fmt_float);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
}
@@ -1708,22 +1774,73 @@ void Simulator::TraceMemWr(int32_t addr, int32_t value, TraceType t) {
if (::v8::internal::FLAG_trace_sim) {
switch (t) {
case BYTE:
- SNPrintF(trace_buf_, " %02x --> [%08x]",
- static_cast<int8_t>(value), addr);
+ SNPrintF(trace_buf_,
+ " %02" PRIx8 " --> [%08" PRIx32 "] (%" PRIu64 ")",
+ static_cast<uint8_t>(value), addr, icount_);
break;
case HALF:
- SNPrintF(trace_buf_, " %04x --> [%08x]", static_cast<int16_t>(value),
- addr);
+ SNPrintF(trace_buf_,
+ " %04" PRIx16 " --> [%08" PRIx32 "] (%" PRIu64 ")",
+ static_cast<uint16_t>(value), addr, icount_);
break;
case WORD:
- SNPrintF(trace_buf_, "%08x --> [%08x]", value, addr);
+ SNPrintF(trace_buf_,
+ "%08" PRIx32 " --> [%08" PRIx32 "] (%" PRIu64 ")", value,
+ addr, icount_);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+void Simulator::TraceMemRd(int32_t addr, int64_t value, TraceType t) {
+ if (::v8::internal::FLAG_trace_sim) {
+ union {
+ int64_t fmt_int64;
+ int32_t fmt_int32[2];
+ float fmt_float[2];
+ double fmt_double;
+ } v;
+ v.fmt_int64 = value;
+
+ switch (t) {
+ case DWORD:
+ SNPrintF(trace_buf_, "%016" PRIx64 " <-- [%08" PRIx32 "] (%" PRIu64
+ ") int64:%" PRId64 " uint64:%" PRIu64,
+ v.fmt_int64, addr, icount_, v.fmt_int64, v.fmt_int64);
+ break;
+ case DOUBLE:
+ SNPrintF(trace_buf_, "%016" PRIx64 " <-- [%08" PRIx32 "] (%" PRIu64
+ ") dbl:%e",
+ v.fmt_int64, addr, icount_, v.fmt_double);
+ break;
+ case FLOAT_DOUBLE:
+ SNPrintF(trace_buf_, "%08" PRIx32 " <-- [%08" PRIx32 "] (%" PRIu64
+ ") flt:%e dbl:%e",
+ v.fmt_int32[1], addr, icount_, v.fmt_float[1], v.fmt_double);
break;
+ default:
+ UNREACHABLE();
}
}
}
+void Simulator::TraceMemWr(int32_t addr, int64_t value, TraceType t) {
+ if (::v8::internal::FLAG_trace_sim) {
+ switch (t) {
+ case DWORD:
+ SNPrintF(trace_buf_,
+ "%016" PRIx64 " --> [%08" PRIx32 "] (%" PRIu64 ")", value,
+ addr, icount_);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
-int Simulator::ReadW(int32_t addr, Instruction* instr) {
+int Simulator::ReadW(int32_t addr, Instruction* instr, TraceType t) {
if (addr >=0 && addr < 0x400) {
// This has to be a NULL-dereference, drop into debugger.
PrintF("Memory read from bad address: 0x%08x, pc=0x%08" PRIxPTR "\n", addr,
@@ -1733,7 +1850,16 @@ int Simulator::ReadW(int32_t addr, Instruction* instr) {
}
if ((addr & kPointerAlignmentMask) == 0 || IsMipsArchVariant(kMips32r6)) {
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
- TraceMemRd(addr, static_cast<int32_t>(*ptr));
+ switch (t) {
+ case WORD:
+ TraceMemRd(addr, static_cast<int32_t>(*ptr), t);
+ break;
+ case FLOAT:
+ // This TraceType is allowed but tracing for this value will be omitted.
+ break;
+ default:
+ UNREACHABLE();
+ }
return *ptr;
}
PrintF("Unaligned read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
@@ -1744,7 +1870,6 @@ int Simulator::ReadW(int32_t addr, Instruction* instr) {
return 0;
}
-
void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
if (addr >= 0 && addr < 0x400) {
// This has to be a NULL-dereference, drop into debugger.
@@ -1766,7 +1891,6 @@ void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
dbg.Debug();
}
-
double Simulator::ReadD(int32_t addr, Instruction* instr) {
if ((addr & kDoubleAlignmentMask) == 0 || IsMipsArchVariant(kMips32r6)) {
double* ptr = reinterpret_cast<double*>(addr);
@@ -2460,7 +2584,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
result = lower;
break;
}
- set_fpu_register_double(fd_reg(), result);
+ SetFPUDoubleResult(fd_reg(), result);
if (result != fs) {
set_fcsr_bit(kFCSRInexactFlagBit, true);
}
@@ -2468,20 +2592,20 @@ void Simulator::DecodeTypeRegisterDRsType() {
}
case SEL:
DCHECK(IsMipsArchVariant(kMips32r6));
- set_fpu_register_double(fd_reg(), (fd_int & 0x1) == 0 ? fs : ft);
+ SetFPUDoubleResult(fd_reg(), (fd_int & 0x1) == 0 ? fs : ft);
break;
case SELEQZ_C:
DCHECK(IsMipsArchVariant(kMips32r6));
- set_fpu_register_double(fd_reg(), (ft_int & 0x1) == 0 ? fs : 0.0);
+ SetFPUDoubleResult(fd_reg(), (ft_int & 0x1) == 0 ? fs : 0.0);
break;
case SELNEZ_C:
DCHECK(IsMipsArchVariant(kMips32r6));
- set_fpu_register_double(fd_reg(), (ft_int & 0x1) != 0 ? fs : 0.0);
+ SetFPUDoubleResult(fd_reg(), (ft_int & 0x1) != 0 ? fs : 0.0);
break;
case MOVZ_C: {
DCHECK(IsMipsArchVariant(kMips32r2));
if (rt() == 0) {
- set_fpu_register_double(fd_reg(), fs);
+ SetFPUDoubleResult(fd_reg(), fs);
}
break;
}
@@ -2490,7 +2614,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
int32_t rt_reg = instr_.RtValue();
int32_t rt = get_register(rt_reg);
if (rt != 0) {
- set_fpu_register_double(fd_reg(), fs);
+ SetFPUDoubleResult(fd_reg(), fs);
}
break;
}
@@ -2500,115 +2624,121 @@ void Simulator::DecodeTypeRegisterDRsType() {
ft_cc = get_fcsr_condition_bit(ft_cc);
if (instr_.Bit(16)) { // Read Tf bit.
// MOVT.D
- if (test_fcsr_bit(ft_cc)) set_fpu_register_double(fd_reg(), fs);
+ if (test_fcsr_bit(ft_cc)) SetFPUDoubleResult(fd_reg(), fs);
} else {
// MOVF.D
- if (!test_fcsr_bit(ft_cc)) set_fpu_register_double(fd_reg(), fs);
+ if (!test_fcsr_bit(ft_cc)) SetFPUDoubleResult(fd_reg(), fs);
}
break;
}
case MIN:
DCHECK(IsMipsArchVariant(kMips32r6));
- set_fpu_register_double(fd_reg(), FPUMin(ft, fs));
+ SetFPUDoubleResult(fd_reg(), FPUMin(ft, fs));
break;
case MAX:
DCHECK(IsMipsArchVariant(kMips32r6));
- set_fpu_register_double(fd_reg(), FPUMax(ft, fs));
+ SetFPUDoubleResult(fd_reg(), FPUMax(ft, fs));
break;
case MINA:
DCHECK(IsMipsArchVariant(kMips32r6));
- set_fpu_register_double(fd_reg(), FPUMinA(ft, fs));
+ SetFPUDoubleResult(fd_reg(), FPUMinA(ft, fs));
break;
case MAXA:
DCHECK(IsMipsArchVariant(kMips32r6));
- set_fpu_register_double(fd_reg(), FPUMaxA(ft, fs));
+ SetFPUDoubleResult(fd_reg(), FPUMaxA(ft, fs));
break;
case ADD_D:
- set_fpu_register_double(
+ SetFPUDoubleResult(
fd_reg(),
FPUCanonalizeOperation(
[](double lhs, double rhs) { return lhs + rhs; }, fs, ft));
break;
case SUB_D:
- set_fpu_register_double(
+ SetFPUDoubleResult(
fd_reg(),
FPUCanonalizeOperation(
[](double lhs, double rhs) { return lhs - rhs; }, fs, ft));
break;
case MADDF_D:
DCHECK(IsMipsArchVariant(kMips32r6));
- set_fpu_register_double(fd_reg(), std::fma(fs, ft, fd));
+ SetFPUDoubleResult(fd_reg(), std::fma(fs, ft, fd));
break;
case MSUBF_D:
DCHECK(IsMipsArchVariant(kMips32r6));
- set_fpu_register_double(fd_reg(), std::fma(-fs, ft, fd));
+ SetFPUDoubleResult(fd_reg(), std::fma(-fs, ft, fd));
break;
case MUL_D:
- set_fpu_register_double(
+ SetFPUDoubleResult(
fd_reg(),
FPUCanonalizeOperation(
[](double lhs, double rhs) { return lhs * rhs; }, fs, ft));
break;
case DIV_D:
- set_fpu_register_double(
+ SetFPUDoubleResult(
fd_reg(),
FPUCanonalizeOperation(
[](double lhs, double rhs) { return lhs / rhs; }, fs, ft));
break;
case ABS_D:
- set_fpu_register_double(
+ SetFPUDoubleResult(
fd_reg(),
FPUCanonalizeOperation([](double fs) { return FPAbs(fs); }, fs));
break;
case MOV_D:
- set_fpu_register_double(fd_reg(), fs);
+ SetFPUDoubleResult(fd_reg(), fs);
break;
case NEG_D:
- set_fpu_register_double(
- fd_reg(), FPUCanonalizeOperation([](double src) { return -src; },
- KeepSign::yes, fs));
+ SetFPUDoubleResult(fd_reg(),
+ FPUCanonalizeOperation([](double src) { return -src; },
+ KeepSign::yes, fs));
break;
case SQRT_D:
- set_fpu_register_double(
+ SetFPUDoubleResult(
fd_reg(),
FPUCanonalizeOperation([](double fs) { return std::sqrt(fs); }, fs));
break;
case RSQRT_D:
- set_fpu_register_double(
+ SetFPUDoubleResult(
fd_reg(), FPUCanonalizeOperation(
[](double fs) { return 1.0 / std::sqrt(fs); }, fs));
break;
case RECIP_D:
- set_fpu_register_double(
- fd_reg(),
- FPUCanonalizeOperation([](double fs) { return 1.0 / fs; }, fs));
+ SetFPUDoubleResult(fd_reg(), FPUCanonalizeOperation(
+ [](double fs) { return 1.0 / fs; }, fs));
break;
case C_UN_D:
set_fcsr_bit(fcsr_cc, std::isnan(fs) || std::isnan(ft));
+ TraceRegWr(test_fcsr_bit(fcsr_cc));
break;
case C_EQ_D:
set_fcsr_bit(fcsr_cc, (fs == ft));
+ TraceRegWr(test_fcsr_bit(fcsr_cc));
break;
case C_UEQ_D:
set_fcsr_bit(fcsr_cc, (fs == ft) || (std::isnan(fs) || std::isnan(ft)));
+ TraceRegWr(test_fcsr_bit(fcsr_cc));
break;
case C_OLT_D:
set_fcsr_bit(fcsr_cc, (fs < ft));
+ TraceRegWr(test_fcsr_bit(fcsr_cc));
break;
case C_ULT_D:
set_fcsr_bit(fcsr_cc, (fs < ft) || (std::isnan(fs) || std::isnan(ft)));
+ TraceRegWr(test_fcsr_bit(fcsr_cc));
break;
case C_OLE_D:
set_fcsr_bit(fcsr_cc, (fs <= ft));
+ TraceRegWr(test_fcsr_bit(fcsr_cc));
break;
case C_ULE_D:
set_fcsr_bit(fcsr_cc, (fs <= ft) || (std::isnan(fs) || std::isnan(ft)));
+ TraceRegWr(test_fcsr_bit(fcsr_cc));
break;
case CVT_W_D: { // Convert double to word.
double rounded;
int32_t result;
round_according_to_fcsr(fs, rounded, result, fs);
- set_fpu_register_word(fd_reg(), result);
+ SetFPUWordResult(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
set_fpu_register_word_invalid_result(fs, rounded);
}
@@ -2622,7 +2752,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
// round to the even one.
result--;
}
- set_fpu_register_word(fd_reg(), result);
+ SetFPUWordResult(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
set_fpu_register_word_invalid_result(fs, rounded);
}
@@ -2631,7 +2761,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
{
double rounded = trunc(fs);
int32_t result = static_cast<int32_t>(rounded);
- set_fpu_register_word(fd_reg(), result);
+ SetFPUWordResult(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
set_fpu_register_word_invalid_result(fs, rounded);
}
@@ -2640,7 +2770,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
{
double rounded = std::floor(fs);
int32_t result = static_cast<int32_t>(rounded);
- set_fpu_register_word(fd_reg(), result);
+ SetFPUWordResult(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
set_fpu_register_word_invalid_result(fs, rounded);
}
@@ -2649,20 +2779,20 @@ void Simulator::DecodeTypeRegisterDRsType() {
{
double rounded = std::ceil(fs);
int32_t result = static_cast<int32_t>(rounded);
- set_fpu_register_word(fd_reg(), result);
+ SetFPUWordResult(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
set_fpu_register_word_invalid_result(fs, rounded);
}
} break;
case CVT_S_D: // Convert double to float (single).
- set_fpu_register_float(fd_reg(), static_cast<float>(fs));
+ SetFPUFloatResult(fd_reg(), static_cast<float>(fs));
break;
case CVT_L_D: { // Mips32r2: Truncate double to 64-bit long-word.
if (IsFp64Mode()) {
int64_t result;
double rounded;
round64_according_to_fcsr(fs, rounded, result, fs);
- set_fpu_register(fd_reg(), result);
+ SetFPUResult(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
set_fpu_register_invalid_result64(fs, rounded);
}
@@ -2677,7 +2807,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
double rounded = trunc(fs);
i64 = static_cast<int64_t>(rounded);
if (IsFp64Mode()) {
- set_fpu_register(fd_reg(), i64);
+ SetFPUResult(fd_reg(), i64);
if (set_fcsr_round64_error(fs, rounded)) {
set_fpu_register_invalid_result64(fs, rounded);
}
@@ -2697,7 +2827,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
}
int64_t i64 = static_cast<int64_t>(result);
if (IsFp64Mode()) {
- set_fpu_register(fd_reg(), i64);
+ SetFPUResult(fd_reg(), i64);
if (set_fcsr_round64_error(fs, rounded)) {
set_fpu_register_invalid_result64(fs, rounded);
}
@@ -2711,7 +2841,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
double rounded = std::floor(fs);
int64_t i64 = static_cast<int64_t>(rounded);
if (IsFp64Mode()) {
- set_fpu_register(fd_reg(), i64);
+ SetFPUResult(fd_reg(), i64);
if (set_fcsr_round64_error(fs, rounded)) {
set_fpu_register_invalid_result64(fs, rounded);
}
@@ -2725,7 +2855,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
double rounded = std::ceil(fs);
int64_t i64 = static_cast<int64_t>(rounded);
if (IsFp64Mode()) {
- set_fpu_register(fd_reg(), i64);
+ SetFPUResult(fd_reg(), i64);
if (set_fcsr_round64_error(fs, rounded)) {
set_fpu_register_invalid_result64(fs, rounded);
}
@@ -2795,12 +2925,13 @@ void Simulator::DecodeTypeRegisterDRsType() {
DCHECK(result != 0);
dResult = bit_cast<double>(result);
- set_fpu_register_double(fd_reg(), dResult);
+ SetFPUDoubleResult(fd_reg(), dResult);
break;
}
case C_F_D: {
set_fcsr_bit(fcsr_cc, false);
+ TraceRegWr(test_fcsr_bit(fcsr_cc));
break;
}
default:
@@ -2816,83 +2947,83 @@ void Simulator::DecodeTypeRegisterWRsType() {
switch (instr_.FunctionFieldRaw()) {
case CVT_S_W: // Convert word to float (single).
alu_out = get_fpu_register_signed_word(fs_reg());
- set_fpu_register_float(fd_reg(), static_cast<float>(alu_out));
+ SetFPUFloatResult(fd_reg(), static_cast<float>(alu_out));
break;
case CVT_D_W: // Convert word to double.
alu_out = get_fpu_register_signed_word(fs_reg());
- set_fpu_register_double(fd_reg(), static_cast<double>(alu_out));
+ SetFPUDoubleResult(fd_reg(), static_cast<double>(alu_out));
break;
case CMP_AF:
- set_fpu_register_word(fd_reg(), 0);
+ SetFPUWordResult(fd_reg(), 0);
break;
case CMP_UN:
if (std::isnan(fs) || std::isnan(ft)) {
- set_fpu_register_word(fd_reg(), -1);
+ SetFPUWordResult(fd_reg(), -1);
} else {
- set_fpu_register_word(fd_reg(), 0);
+ SetFPUWordResult(fd_reg(), 0);
}
break;
case CMP_EQ:
if (fs == ft) {
- set_fpu_register_word(fd_reg(), -1);
+ SetFPUWordResult(fd_reg(), -1);
} else {
- set_fpu_register_word(fd_reg(), 0);
+ SetFPUWordResult(fd_reg(), 0);
}
break;
case CMP_UEQ:
if ((fs == ft) || (std::isnan(fs) || std::isnan(ft))) {
- set_fpu_register_word(fd_reg(), -1);
+ SetFPUWordResult(fd_reg(), -1);
} else {
- set_fpu_register_word(fd_reg(), 0);
+ SetFPUWordResult(fd_reg(), 0);
}
break;
case CMP_LT:
if (fs < ft) {
- set_fpu_register_word(fd_reg(), -1);
+ SetFPUWordResult(fd_reg(), -1);
} else {
- set_fpu_register_word(fd_reg(), 0);
+ SetFPUWordResult(fd_reg(), 0);
}
break;
case CMP_ULT:
if ((fs < ft) || (std::isnan(fs) || std::isnan(ft))) {
- set_fpu_register_word(fd_reg(), -1);
+ SetFPUWordResult(fd_reg(), -1);
} else {
- set_fpu_register_word(fd_reg(), 0);
+ SetFPUWordResult(fd_reg(), 0);
}
break;
case CMP_LE:
if (fs <= ft) {
- set_fpu_register_word(fd_reg(), -1);
+ SetFPUWordResult(fd_reg(), -1);
} else {
- set_fpu_register_word(fd_reg(), 0);
+ SetFPUWordResult(fd_reg(), 0);
}
break;
case CMP_ULE:
if ((fs <= ft) || (std::isnan(fs) || std::isnan(ft))) {
- set_fpu_register_word(fd_reg(), -1);
+ SetFPUWordResult(fd_reg(), -1);
} else {
- set_fpu_register_word(fd_reg(), 0);
+ SetFPUWordResult(fd_reg(), 0);
}
break;
case CMP_OR:
if (!std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_word(fd_reg(), -1);
+ SetFPUWordResult(fd_reg(), -1);
} else {
- set_fpu_register_word(fd_reg(), 0);
+ SetFPUWordResult(fd_reg(), 0);
}
break;
case CMP_UNE:
if ((fs != ft) || (std::isnan(fs) || std::isnan(ft))) {
- set_fpu_register_word(fd_reg(), -1);
+ SetFPUWordResult(fd_reg(), -1);
} else {
- set_fpu_register_word(fd_reg(), 0);
+ SetFPUWordResult(fd_reg(), 0);
}
break;
case CMP_NE:
if (fs != ft) {
- set_fpu_register_word(fd_reg(), -1);
+ SetFPUWordResult(fd_reg(), -1);
} else {
- set_fpu_register_word(fd_reg(), 0);
+ SetFPUWordResult(fd_reg(), 0);
}
break;
default:
@@ -2944,102 +3075,108 @@ void Simulator::DecodeTypeRegisterSRsType() {
result = lower;
break;
}
- set_fpu_register_float(fd_reg(), result);
+ SetFPUFloatResult(fd_reg(), result);
if (result != fs) {
set_fcsr_bit(kFCSRInexactFlagBit, true);
}
break;
}
case ADD_S:
- set_fpu_register_float(
+ SetFPUFloatResult(
fd_reg(),
FPUCanonalizeOperation([](float lhs, float rhs) { return lhs + rhs; },
fs, ft));
break;
case SUB_S:
- set_fpu_register_float(
+ SetFPUFloatResult(
fd_reg(),
FPUCanonalizeOperation([](float lhs, float rhs) { return lhs - rhs; },
fs, ft));
break;
case MADDF_S:
DCHECK(IsMipsArchVariant(kMips32r6));
- set_fpu_register_float(fd_reg(), std::fma(fs, ft, fd));
+ SetFPUFloatResult(fd_reg(), std::fma(fs, ft, fd));
break;
case MSUBF_S:
DCHECK(IsMipsArchVariant(kMips32r6));
- set_fpu_register_float(fd_reg(), std::fma(-fs, ft, fd));
+ SetFPUFloatResult(fd_reg(), std::fma(-fs, ft, fd));
break;
case MUL_S:
- set_fpu_register_float(
+ SetFPUFloatResult(
fd_reg(),
FPUCanonalizeOperation([](float lhs, float rhs) { return lhs * rhs; },
fs, ft));
break;
case DIV_S:
- set_fpu_register_float(
+ SetFPUFloatResult(
fd_reg(),
FPUCanonalizeOperation([](float lhs, float rhs) { return lhs / rhs; },
fs, ft));
break;
case ABS_S:
- set_fpu_register_float(
- fd_reg(),
- FPUCanonalizeOperation([](float fs) { return FPAbs(fs); }, fs));
+ SetFPUFloatResult(fd_reg(), FPUCanonalizeOperation(
+ [](float fs) { return FPAbs(fs); }, fs));
break;
case MOV_S:
- set_fpu_register_float(fd_reg(), fs);
+ SetFPUFloatResult(fd_reg(), fs);
break;
case NEG_S:
- set_fpu_register_float(
- fd_reg(), FPUCanonalizeOperation([](float src) { return -src; },
- KeepSign::yes, fs));
+ SetFPUFloatResult(fd_reg(),
+ FPUCanonalizeOperation([](float src) { return -src; },
+ KeepSign::yes, fs));
break;
case SQRT_S:
- set_fpu_register_float(
+ SetFPUFloatResult(
fd_reg(),
FPUCanonalizeOperation([](float src) { return std::sqrt(src); }, fs));
break;
case RSQRT_S:
- set_fpu_register_float(
+ SetFPUFloatResult(
fd_reg(), FPUCanonalizeOperation(
[](float src) { return 1.0 / std::sqrt(src); }, fs));
break;
case RECIP_S:
- set_fpu_register_float(
- fd_reg(),
- FPUCanonalizeOperation([](float src) { return 1.0 / src; }, fs));
+ SetFPUFloatResult(fd_reg(), FPUCanonalizeOperation(
+ [](float src) { return 1.0 / src; }, fs));
break;
case C_F_D:
set_fcsr_bit(fcsr_cc, false);
+ TraceRegWr(test_fcsr_bit(fcsr_cc));
break;
case C_UN_D:
set_fcsr_bit(fcsr_cc, std::isnan(fs) || std::isnan(ft));
+ TraceRegWr(test_fcsr_bit(fcsr_cc));
break;
case C_EQ_D:
set_fcsr_bit(fcsr_cc, (fs == ft));
+ TraceRegWr(test_fcsr_bit(fcsr_cc));
break;
case C_UEQ_D:
set_fcsr_bit(fcsr_cc, (fs == ft) || (std::isnan(fs) || std::isnan(ft)));
+ TraceRegWr(test_fcsr_bit(fcsr_cc));
break;
case C_OLT_D:
set_fcsr_bit(fcsr_cc, (fs < ft));
+ TraceRegWr(test_fcsr_bit(fcsr_cc));
break;
case C_ULT_D:
set_fcsr_bit(fcsr_cc, (fs < ft) || (std::isnan(fs) || std::isnan(ft)));
+ TraceRegWr(test_fcsr_bit(fcsr_cc));
break;
case C_OLE_D:
set_fcsr_bit(fcsr_cc, (fs <= ft));
+ TraceRegWr(test_fcsr_bit(fcsr_cc));
break;
case C_ULE_D:
set_fcsr_bit(fcsr_cc, (fs <= ft) || (std::isnan(fs) || std::isnan(ft)));
+ TraceRegWr(test_fcsr_bit(fcsr_cc));
break;
case CVT_D_S:
- set_fpu_register_double(fd_reg(), static_cast<double>(fs));
+ SetFPUDoubleResult(fd_reg(), static_cast<double>(fs));
break;
case SEL:
DCHECK(IsMipsArchVariant(kMips32r6));
- set_fpu_register_float(fd_reg(), (fd_int & 0x1) == 0 ? fs : ft);
+ SetFPUFloatResult(fd_reg(), (fd_int & 0x1) == 0 ? fs : ft);
break;
case CLASS_S: { // Mips32r6 instruction
// Convert float input to uint32_t for easier bit manipulation
@@ -3103,33 +3240,33 @@ void Simulator::DecodeTypeRegisterSRsType() {
DCHECK(result != 0);
fResult = bit_cast<float>(result);
- set_fpu_register_float(fd_reg(), fResult);
+ SetFPUFloatResult(fd_reg(), fResult);
break;
}
case SELEQZ_C:
DCHECK(IsMipsArchVariant(kMips32r6));
- set_fpu_register_float(fd_reg(), (ft_int & 0x1) == 0
- ? get_fpu_register_float(fs_reg())
- : 0.0);
+ SetFPUFloatResult(
+ fd_reg(),
+ (ft_int & 0x1) == 0 ? get_fpu_register_float(fs_reg()) : 0.0);
break;
case SELNEZ_C:
DCHECK(IsMipsArchVariant(kMips32r6));
- set_fpu_register_float(fd_reg(), (ft_int & 0x1) != 0
- ? get_fpu_register_float(fs_reg())
- : 0.0);
+ SetFPUFloatResult(
+ fd_reg(),
+ (ft_int & 0x1) != 0 ? get_fpu_register_float(fs_reg()) : 0.0);
break;
case MOVZ_C: {
DCHECK(IsMipsArchVariant(kMips32r2));
if (rt() == 0) {
- set_fpu_register_float(fd_reg(), fs);
+ SetFPUFloatResult(fd_reg(), fs);
}
break;
}
case MOVN_C: {
DCHECK(IsMipsArchVariant(kMips32r2));
if (rt() != 0) {
- set_fpu_register_float(fd_reg(), fs);
+ SetFPUFloatResult(fd_reg(), fs);
}
break;
}
@@ -3140,17 +3277,17 @@ void Simulator::DecodeTypeRegisterSRsType() {
if (instr_.Bit(16)) { // Read Tf bit.
// MOVT.D
- if (test_fcsr_bit(ft_cc)) set_fpu_register_float(fd_reg(), fs);
+ if (test_fcsr_bit(ft_cc)) SetFPUFloatResult(fd_reg(), fs);
} else {
// MOVF.D
- if (!test_fcsr_bit(ft_cc)) set_fpu_register_float(fd_reg(), fs);
+ if (!test_fcsr_bit(ft_cc)) SetFPUFloatResult(fd_reg(), fs);
}
break;
}
case TRUNC_W_S: { // Truncate single to word (round towards 0).
float rounded = trunc(fs);
int32_t result = static_cast<int32_t>(rounded);
- set_fpu_register_word(fd_reg(), result);
+ SetFPUWordResult(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
set_fpu_register_word_invalid_result(fs, rounded);
}
@@ -3160,7 +3297,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
float rounded = trunc(fs);
int64_t i64 = static_cast<int64_t>(rounded);
if (IsFp64Mode()) {
- set_fpu_register(fd_reg(), i64);
+ SetFPUResult(fd_reg(), i64);
if (set_fcsr_round64_error(fs, rounded)) {
set_fpu_register_invalid_result64(fs, rounded);
}
@@ -3173,7 +3310,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
{
float rounded = std::floor(fs);
int32_t result = static_cast<int32_t>(rounded);
- set_fpu_register_word(fd_reg(), result);
+ SetFPUWordResult(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
set_fpu_register_word_invalid_result(fs, rounded);
}
@@ -3183,7 +3320,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
float rounded = std::floor(fs);
int64_t i64 = static_cast<int64_t>(rounded);
if (IsFp64Mode()) {
- set_fpu_register(fd_reg(), i64);
+ SetFPUResult(fd_reg(), i64);
if (set_fcsr_round64_error(fs, rounded)) {
set_fpu_register_invalid_result64(fs, rounded);
}
@@ -3200,7 +3337,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
// round to the even one.
result--;
}
- set_fpu_register_word(fd_reg(), result);
+ SetFPUWordResult(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
set_fpu_register_word_invalid_result(fs, rounded);
}
@@ -3217,7 +3354,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
}
int64_t i64 = static_cast<int64_t>(result);
if (IsFp64Mode()) {
- set_fpu_register(fd_reg(), i64);
+ SetFPUResult(fd_reg(), i64);
if (set_fcsr_round64_error(fs, rounded)) {
set_fpu_register_invalid_result64(fs, rounded);
}
@@ -3230,7 +3367,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
{
float rounded = std::ceil(fs);
int32_t result = static_cast<int32_t>(rounded);
- set_fpu_register_word(fd_reg(), result);
+ SetFPUWordResult(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
set_fpu_register_word_invalid_result(fs, rounded);
}
@@ -3240,7 +3377,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
float rounded = std::ceil(fs);
int64_t i64 = static_cast<int64_t>(rounded);
if (IsFp64Mode()) {
- set_fpu_register(fd_reg(), i64);
+ SetFPUResult(fd_reg(), i64);
if (set_fcsr_round64_error(fs, rounded)) {
set_fpu_register_invalid_result64(fs, rounded);
}
@@ -3251,26 +3388,26 @@ void Simulator::DecodeTypeRegisterSRsType() {
}
case MIN:
DCHECK(IsMipsArchVariant(kMips32r6));
- set_fpu_register_float(fd_reg(), FPUMin(ft, fs));
+ SetFPUFloatResult(fd_reg(), FPUMin(ft, fs));
break;
case MAX:
DCHECK(IsMipsArchVariant(kMips32r6));
- set_fpu_register_float(fd_reg(), FPUMax(ft, fs));
+ SetFPUFloatResult(fd_reg(), FPUMax(ft, fs));
break;
case MINA:
DCHECK(IsMipsArchVariant(kMips32r6));
- set_fpu_register_float(fd_reg(), FPUMinA(ft, fs));
+ SetFPUFloatResult(fd_reg(), FPUMinA(ft, fs));
break;
case MAXA:
DCHECK(IsMipsArchVariant(kMips32r6));
- set_fpu_register_float(fd_reg(), FPUMaxA(ft, fs));
+ SetFPUFloatResult(fd_reg(), FPUMaxA(ft, fs));
break;
case CVT_L_S: {
if (IsFp64Mode()) {
int64_t result;
float rounded;
round64_according_to_fcsr(fs, rounded, result, fs);
- set_fpu_register(fd_reg(), result);
+ SetFPUResult(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
set_fpu_register_invalid_result64(fs, rounded);
}
@@ -3283,7 +3420,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
float rounded;
int32_t result;
round_according_to_fcsr(fs, rounded, result, fs);
- set_fpu_register_word(fd_reg(), result);
+ SetFPUWordResult(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
set_fpu_register_word_invalid_result(fs, rounded);
}
@@ -3311,7 +3448,7 @@ void Simulator::DecodeTypeRegisterLRsType() {
i64 = static_cast<uint32_t>(get_fpu_register_word(fs_reg()));
i64 |= static_cast<int64_t>(get_fpu_register_word(fs_reg() + 1)) << 32;
}
- set_fpu_register_double(fd_reg(), static_cast<double>(i64));
+ SetFPUDoubleResult(fd_reg(), static_cast<double>(i64));
break;
case CVT_S_L:
if (IsFp64Mode()) {
@@ -3320,79 +3457,79 @@ void Simulator::DecodeTypeRegisterLRsType() {
i64 = static_cast<uint32_t>(get_fpu_register_word(fs_reg()));
i64 |= static_cast<int64_t>(get_fpu_register_word(fs_reg() + 1)) << 32;
}
- set_fpu_register_float(fd_reg(), static_cast<float>(i64));
+ SetFPUFloatResult(fd_reg(), static_cast<float>(i64));
break;
case CMP_AF: // Mips64r6 CMP.D instructions.
- set_fpu_register(fd_reg(), 0);
+ SetFPUResult(fd_reg(), 0);
break;
case CMP_UN:
if (std::isnan(fs) || std::isnan(ft)) {
- set_fpu_register(fd_reg(), -1);
+ SetFPUResult(fd_reg(), -1);
} else {
- set_fpu_register(fd_reg(), 0);
+ SetFPUResult(fd_reg(), 0);
}
break;
case CMP_EQ:
if (fs == ft) {
- set_fpu_register(fd_reg(), -1);
+ SetFPUResult(fd_reg(), -1);
} else {
- set_fpu_register(fd_reg(), 0);
+ SetFPUResult(fd_reg(), 0);
}
break;
case CMP_UEQ:
if ((fs == ft) || (std::isnan(fs) || std::isnan(ft))) {
- set_fpu_register(fd_reg(), -1);
+ SetFPUResult(fd_reg(), -1);
} else {
- set_fpu_register(fd_reg(), 0);
+ SetFPUResult(fd_reg(), 0);
}
break;
case CMP_LT:
if (fs < ft) {
- set_fpu_register(fd_reg(), -1);
+ SetFPUResult(fd_reg(), -1);
} else {
- set_fpu_register(fd_reg(), 0);
+ SetFPUResult(fd_reg(), 0);
}
break;
case CMP_ULT:
if ((fs < ft) || (std::isnan(fs) || std::isnan(ft))) {
- set_fpu_register(fd_reg(), -1);
+ SetFPUResult(fd_reg(), -1);
} else {
- set_fpu_register(fd_reg(), 0);
+ SetFPUResult(fd_reg(), 0);
}
break;
case CMP_LE:
if (fs <= ft) {
- set_fpu_register(fd_reg(), -1);
+ SetFPUResult(fd_reg(), -1);
} else {
- set_fpu_register(fd_reg(), 0);
+ SetFPUResult(fd_reg(), 0);
}
break;
case CMP_ULE:
if ((fs <= ft) || (std::isnan(fs) || std::isnan(ft))) {
- set_fpu_register(fd_reg(), -1);
+ SetFPUResult(fd_reg(), -1);
} else {
- set_fpu_register(fd_reg(), 0);
+ SetFPUResult(fd_reg(), 0);
}
break;
case CMP_OR:
if (!std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register(fd_reg(), -1);
+ SetFPUResult(fd_reg(), -1);
} else {
- set_fpu_register(fd_reg(), 0);
+ SetFPUResult(fd_reg(), 0);
}
break;
case CMP_UNE:
if ((fs != ft) || (std::isnan(fs) || std::isnan(ft))) {
- set_fpu_register(fd_reg(), -1);
+ SetFPUResult(fd_reg(), -1);
} else {
- set_fpu_register(fd_reg(), 0);
+ SetFPUResult(fd_reg(), 0);
}
break;
case CMP_NE:
if (fs != ft && (!std::isnan(fs) && !std::isnan(ft))) {
- set_fpu_register(fd_reg(), -1);
+ SetFPUResult(fd_reg(), -1);
} else {
- set_fpu_register(fd_reg(), 0);
+ SetFPUResult(fd_reg(), 0);
}
break;
default:
@@ -3406,16 +3543,16 @@ void Simulator::DecodeTypeRegisterCOP1() {
case CFC1:
// At the moment only FCSR is supported.
DCHECK(fs_reg() == kFCSRRegister);
- set_register(rt_reg(), FCSR_);
+ SetResult(rt_reg(), FCSR_);
break;
case MFC1:
- set_register(rt_reg(), get_fpu_register_word(fs_reg()));
+ SetResult(rt_reg(), get_fpu_register_word(fs_reg()));
break;
case MFHC1:
if (IsFp64Mode()) {
- set_register(rt_reg(), get_fpu_register_hi_word(fs_reg()));
+ SetResult(rt_reg(), get_fpu_register_hi_word(fs_reg()));
} else {
- set_register(rt_reg(), get_fpu_register_word(fs_reg() + 1));
+ SetResult(rt_reg(), get_fpu_register_word(fs_reg() + 1));
}
break;
case CTC1: {
@@ -3428,18 +3565,26 @@ void Simulator::DecodeTypeRegisterCOP1() {
DCHECK(IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kMips32r2));
FCSR_ = reg & ~kFCSRNaN2008FlagMask;
}
+ TraceRegWr(static_cast<int32_t>(FCSR_));
break;
}
case MTC1:
// Hardware writes upper 32-bits to zero on mtc1.
set_fpu_register_hi_word(fs_reg(), 0);
set_fpu_register_word(fs_reg(), registers_[rt_reg()]);
+ TraceRegWr(get_fpu_register_word(fs_reg()), FLOAT);
break;
case MTHC1:
if (IsFp64Mode()) {
set_fpu_register_hi_word(fs_reg(), registers_[rt_reg()]);
+ TraceRegWr(get_fpu_register(fs_reg()), DOUBLE);
} else {
set_fpu_register_word(fs_reg() + 1, registers_[rt_reg()]);
+ if (fs_reg() % 2) {
+ TraceRegWr(get_fpu_register_word(fs_reg() + 1), FLOAT);
+ } else {
+ TraceRegWr(get_fpu_register(fs_reg()), DOUBLE);
+ }
}
break;
case S: {
@@ -3472,7 +3617,7 @@ void Simulator::DecodeTypeRegisterCOP1X() {
fr = get_fpu_register_float(fr_reg());
fs = get_fpu_register_float(fs_reg());
ft = get_fpu_register_float(ft_reg());
- set_fpu_register_float(fd_reg(), fs * ft + fr);
+ SetFPUFloatResult(fd_reg(), fs * ft + fr);
break;
}
case MSUB_S: {
@@ -3481,7 +3626,7 @@ void Simulator::DecodeTypeRegisterCOP1X() {
fr = get_fpu_register_float(fr_reg());
fs = get_fpu_register_float(fs_reg());
ft = get_fpu_register_float(ft_reg());
- set_fpu_register_float(fd_reg(), fs * ft - fr);
+ SetFPUFloatResult(fd_reg(), fs * ft - fr);
break;
}
case MADD_D: {
@@ -3490,7 +3635,7 @@ void Simulator::DecodeTypeRegisterCOP1X() {
fr = get_fpu_register_double(fr_reg());
fs = get_fpu_register_double(fs_reg());
ft = get_fpu_register_double(ft_reg());
- set_fpu_register_double(fd_reg(), fs * ft + fr);
+ SetFPUDoubleResult(fd_reg(), fs * ft + fr);
break;
}
case MSUB_D: {
@@ -3499,7 +3644,7 @@ void Simulator::DecodeTypeRegisterCOP1X() {
fr = get_fpu_register_double(fr_reg());
fs = get_fpu_register_double(fs_reg());
ft = get_fpu_register_double(ft_reg());
- set_fpu_register_double(fd_reg(), fs * ft - fr);
+ SetFPUDoubleResult(fd_reg(), fs * ft - fr);
break;
}
default:
@@ -3517,11 +3662,11 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
switch (instr_.FunctionFieldRaw()) {
case SELEQZ_S:
DCHECK(IsMipsArchVariant(kMips32r6));
- set_register(rd_reg(), rt() == 0 ? rs() : 0);
+ SetResult(rd_reg(), rt() == 0 ? rs() : 0);
break;
case SELNEZ_S:
DCHECK(IsMipsArchVariant(kMips32r6));
- set_register(rd_reg(), rt() != 0 ? rs() : 0);
+ SetResult(rd_reg(), rt() != 0 ? rs() : 0);
break;
case JR: {
int32_t next_pc = rs();
@@ -3622,10 +3767,10 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
} else {
switch (sa()) {
case MUL_OP:
- set_register(rd_reg(), static_cast<int32_t>(i64hilo & 0xffffffff));
+ SetResult(rd_reg(), static_cast<int32_t>(i64hilo & 0xffffffff));
break;
case MUH_OP:
- set_register(rd_reg(), static_cast<int32_t>(i64hilo >> 32));
+ SetResult(rd_reg(), static_cast<int32_t>(i64hilo >> 32));
break;
default:
UNIMPLEMENTED_MIPS();
@@ -3641,10 +3786,10 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
} else {
switch (sa()) {
case MUL_OP:
- set_register(rd_reg(), static_cast<int32_t>(u64hilo & 0xffffffff));
+ SetResult(rd_reg(), static_cast<int32_t>(u64hilo & 0xffffffff));
break;
case MUH_OP:
- set_register(rd_reg(), static_cast<int32_t>(u64hilo >> 32));
+ SetResult(rd_reg(), static_cast<int32_t>(u64hilo >> 32));
break;
default:
UNIMPLEMENTED_MIPS();
@@ -3657,16 +3802,16 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
switch (sa()) {
case DIV_OP:
if (rs() == INT_MIN && rt() == -1) {
- set_register(rd_reg(), INT_MIN);
+ SetResult(rd_reg(), INT_MIN);
} else if (rt() != 0) {
- set_register(rd_reg(), rs() / rt());
+ SetResult(rd_reg(), rs() / rt());
}
break;
case MOD_OP:
if (rs() == INT_MIN && rt() == -1) {
- set_register(rd_reg(), 0);
+ SetResult(rd_reg(), 0);
} else if (rt() != 0) {
- set_register(rd_reg(), rs() % rt());
+ SetResult(rd_reg(), rs() % rt());
}
break;
default:
@@ -3692,12 +3837,12 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
switch (sa()) {
case DIV_OP:
if (rt_u() != 0) {
- set_register(rd_reg(), rs_u() / rt_u());
+ SetResult(rd_reg(), rs_u() / rt_u());
}
break;
case MOD_OP:
if (rt_u() != 0) {
- set_register(rd_reg(), rs_u() % rt_u());
+ SetResult(rd_reg(), rs_u() % rt_u());
}
break;
default:
@@ -3791,8 +3936,7 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
// Conditional moves.
case MOVN:
if (rt()) {
- set_register(rd_reg(), rs());
- TraceRegWr(rs());
+ SetResult(rd_reg(), rs());
}
break;
case MOVCI: {
@@ -3807,8 +3951,7 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
}
case MOVZ:
if (!rt()) {
- set_register(rd_reg(), rs());
- TraceRegWr(rs());
+ SetResult(rd_reg(), rs());
}
break;
default:
@@ -4372,16 +4515,25 @@ void Simulator::DecodeTypeImmediate() {
}
case LWC1:
set_fpu_register_hi_word(ft_reg, 0);
- set_fpu_register_word(ft_reg, ReadW(rs + se_imm16, instr_.instr()));
+ set_fpu_register_word(ft_reg,
+ ReadW(rs + se_imm16, instr_.instr(), FLOAT));
+ if (ft_reg % 2) {
+ TraceMemRd(rs + se_imm16, get_fpu_register(ft_reg - 1), FLOAT_DOUBLE);
+ } else {
+ TraceMemRd(rs + se_imm16, get_fpu_register_word(ft_reg), FLOAT);
+ }
break;
case LDC1:
set_fpu_register_double(ft_reg, ReadD(rs + se_imm16, instr_.instr()));
+ TraceMemRd(rs + se_imm16, get_fpu_register(ft_reg), DOUBLE);
break;
case SWC1:
WriteW(rs + se_imm16, get_fpu_register_word(ft_reg), instr_.instr());
+ TraceMemWr(rs + se_imm16, get_fpu_register_word(ft_reg));
break;
case SDC1:
WriteD(rs + se_imm16, get_fpu_register_double(ft_reg), instr_.instr());
+ TraceMemWr(rs + se_imm16, get_fpu_register(ft_reg));
break;
// ------------- PC-Relative instructions.
case PCREL: {
@@ -4422,7 +4574,7 @@ void Simulator::DecodeTypeImmediate() {
}
}
}
- set_register(rs_reg, alu_out);
+ SetResult(rs_reg, alu_out);
break;
}
default:
diff --git a/deps/v8/src/mips/simulator-mips.h b/deps/v8/src/mips/simulator-mips.h
index 3795eecc78..2785f913c9 100644
--- a/deps/v8/src/mips/simulator-mips.h
+++ b/deps/v8/src/mips/simulator-mips.h
@@ -293,6 +293,9 @@ class Simulator {
// Unsupported instructions use Format to print an error and stop execution.
void Format(Instruction* instr, const char* format);
+ // Helpers for data value tracing.
+ enum TraceType { BYTE, HALF, WORD, DWORD, FLOAT, DOUBLE, FLOAT_DOUBLE };
+
// Read and write memory.
inline uint32_t ReadBU(int32_t addr);
inline int32_t ReadB(int32_t addr);
@@ -305,24 +308,18 @@ class Simulator {
inline void WriteH(int32_t addr, uint16_t value, Instruction* instr);
inline void WriteH(int32_t addr, int16_t value, Instruction* instr);
- inline int ReadW(int32_t addr, Instruction* instr);
+ inline int ReadW(int32_t addr, Instruction* instr, TraceType t = WORD);
inline void WriteW(int32_t addr, int value, Instruction* instr);
inline double ReadD(int32_t addr, Instruction* instr);
inline void WriteD(int32_t addr, double value, Instruction* instr);
- // Helpers for data value tracing.
- enum TraceType {
- BYTE,
- HALF,
- WORD
- // DWORD,
- // DFLOAT - Floats may have printing issues due to paired lwc1's
- };
-
- void TraceRegWr(int32_t value);
- void TraceMemWr(int32_t addr, int32_t value, TraceType t);
- void TraceMemRd(int32_t addr, int32_t value);
+ void TraceRegWr(int32_t value, TraceType t = WORD);
+ void TraceRegWr(int64_t value, TraceType t = DWORD);
+ void TraceMemWr(int32_t addr, int32_t value, TraceType t = WORD);
+ void TraceMemRd(int32_t addr, int32_t value, TraceType t = WORD);
+ void TraceMemWr(int32_t addr, int64_t value, TraceType t = DWORD);
+ void TraceMemRd(int32_t addr, int64_t value, TraceType t = DWORD);
EmbeddedVector<char, 128> trace_buf_;
// Operations depending on endianness.
@@ -381,6 +378,26 @@ class Simulator {
TraceRegWr(alu_out);
}
+ inline void SetFPUWordResult(int32_t fd_reg, int32_t alu_out) {
+ set_fpu_register_word(fd_reg, alu_out);
+ TraceRegWr(get_fpu_register_word(fd_reg));
+ }
+
+ inline void SetFPUResult(int32_t fd_reg, int64_t alu_out) {
+ set_fpu_register(fd_reg, alu_out);
+ TraceRegWr(get_fpu_register(fd_reg));
+ }
+
+ inline void SetFPUFloatResult(int32_t fd_reg, float alu_out) {
+ set_fpu_register_float(fd_reg, alu_out);
+ TraceRegWr(get_fpu_register_word(fd_reg), FLOAT);
+ }
+
+ inline void SetFPUDoubleResult(int32_t fd_reg, double alu_out) {
+ set_fpu_register_double(fd_reg, alu_out);
+ TraceRegWr(get_fpu_register(fd_reg), DOUBLE);
+ }
+
void DecodeTypeImmediate();
void DecodeTypeJump();
diff --git a/deps/v8/src/mips64/assembler-mips64-inl.h b/deps/v8/src/mips64/assembler-mips64-inl.h
index 6078ab965a..3891391da1 100644
--- a/deps/v8/src/mips64/assembler-mips64-inl.h
+++ b/deps/v8/src/mips64/assembler-mips64-inl.h
@@ -41,7 +41,7 @@
#include "src/assembler.h"
#include "src/debug/debug.h"
-
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -139,6 +139,17 @@ int RelocInfo::target_address_size() {
return Assembler::kSpecialTargetSize;
}
+Address Assembler::target_address_at(Address pc, Code* code) {
+ Address constant_pool = code ? code->constant_pool() : NULL;
+ return target_address_at(pc, constant_pool);
+}
+
+void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
+ Address target,
+ ICacheFlushMode icache_flush_mode) {
+ Address constant_pool = code ? code->constant_pool() : NULL;
+ set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
+}
Address Assembler::target_address_from_return_address(Address pc) {
return pc - kCallTargetAddressOffset;
diff --git a/deps/v8/src/mips64/assembler-mips64.h b/deps/v8/src/mips64/assembler-mips64.h
index e3786a7e8c..433c03c1c3 100644
--- a/deps/v8/src/mips64/assembler-mips64.h
+++ b/deps/v8/src/mips64/assembler-mips64.h
@@ -155,6 +155,7 @@ int ToNumber(Register reg);
Register ToRegister(int num);
static const bool kSimpleFPAliasing = true;
+static const bool kSimdMaskRegisters = false;
// Coprocessor register.
struct FPURegister {
@@ -477,17 +478,10 @@ class Assembler : public AssemblerBase {
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
set_target_address_at(isolate, pc, target, icache_flush_mode);
}
- INLINE(static Address target_address_at(Address pc, Code* code)) {
- Address constant_pool = code ? code->constant_pool() : NULL;
- return target_address_at(pc, constant_pool);
- }
+ INLINE(static Address target_address_at(Address pc, Code* code));
INLINE(static void set_target_address_at(
Isolate* isolate, Address pc, Code* code, Address target,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
- Address constant_pool = code ? code->constant_pool() : NULL;
- set_target_address_at(isolate, pc, constant_pool, target,
- icache_flush_mode);
- }
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
diff --git a/deps/v8/src/mips64/code-stubs-mips64.cc b/deps/v8/src/mips64/code-stubs-mips64.cc
index 79be33ff90..645599a03a 100644
--- a/deps/v8/src/mips64/code-stubs-mips64.cc
+++ b/deps/v8/src/mips64/code-stubs-mips64.cc
@@ -233,8 +233,6 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
__ Branch(slow, greater, t0, Operand(FIRST_JS_RECEIVER_TYPE));
// Call runtime on identical symbols since we need to throw a TypeError.
__ Branch(slow, eq, t0, Operand(SYMBOL_TYPE));
- // Call runtime on identical SIMD values since we must throw a TypeError.
- __ Branch(slow, eq, t0, Operand(SIMD128_VALUE_TYPE));
} else {
__ Branch(&heap_number, eq, t0, Operand(HEAP_NUMBER_TYPE));
// Comparing JS objects with <=, >= is complicated.
@@ -242,8 +240,6 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
__ Branch(slow, greater, t0, Operand(FIRST_JS_RECEIVER_TYPE));
// Call runtime on identical symbols since we need to throw a TypeError.
__ Branch(slow, eq, t0, Operand(SYMBOL_TYPE));
- // Call runtime on identical SIMD values since we must throw a TypeError.
- __ Branch(slow, eq, t0, Operand(SIMD128_VALUE_TYPE));
// Normally here we fall through to return_equal, but undefined is
// special: (undefined == undefined) == true, but
// (undefined <= undefined) == false! See ECMAScript 11.8.5.
@@ -1142,9 +1138,9 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// We build an EntryFrame.
__ li(a7, Operand(-1)); // Push a bad frame pointer to fail if it is used.
- int marker = type();
- __ li(a6, Operand(Smi::FromInt(marker)));
- __ li(a5, Operand(Smi::FromInt(marker)));
+ StackFrame::Type marker = type();
+ __ li(a6, Operand(StackFrame::TypeToMarker(marker)));
+ __ li(a5, Operand(StackFrame::TypeToMarker(marker)));
ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate);
__ li(a4, Operand(c_entry_fp));
__ ld(a4, MemOperand(a4));
@@ -1175,12 +1171,12 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ ld(a6, MemOperand(a5));
__ Branch(&non_outermost_js, ne, a6, Operand(zero_reg));
__ sd(fp, MemOperand(a5));
- __ li(a4, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+ __ li(a4, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
Label cont;
__ b(&cont);
__ nop(); // Branch delay slot nop.
__ bind(&non_outermost_js);
- __ li(a4, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
+ __ li(a4, Operand(StackFrame::INNER_JSENTRY_FRAME));
__ bind(&cont);
__ push(a4);
@@ -1246,10 +1242,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Check if the current stack frame is marked as the outermost JS frame.
Label non_outermost_js_2;
__ pop(a5);
- __ Branch(&non_outermost_js_2,
- ne,
- a5,
- Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+ __ Branch(&non_outermost_js_2, ne, a5,
+ Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ li(a5, Operand(ExternalReference(js_entry_sp)));
__ sd(zero_reg, MemOperand(a5));
__ bind(&non_outermost_js_2);
@@ -1272,51 +1266,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Jump(ra);
}
-
-void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
- // Return address is in ra.
- Label miss;
-
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register index = LoadDescriptor::NameRegister();
- Register scratch = a5;
- Register result = v0;
- DCHECK(!scratch.is(receiver) && !scratch.is(index));
- DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()));
-
- StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
- &miss, // When not a string.
- &miss, // When not a number.
- &miss, // When index out of range.
- RECEIVER_IS_STRING);
- char_at_generator.GenerateFast(masm);
- __ Ret();
-
- StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
-
- __ bind(&miss);
- PropertyAccessCompiler::TailCallBuiltin(
- masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
-}
-
-
-void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
- Label miss;
- Register receiver = LoadDescriptor::ReceiverRegister();
- // Ensure that the vector and slot registers won't be clobbered before
- // calling the miss handler.
- DCHECK(!AreAliased(a4, a5, LoadWithVectorDescriptor::VectorRegister(),
- LoadWithVectorDescriptor::SlotRegister()));
-
- NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, a4,
- a5, &miss);
- __ bind(&miss);
- PropertyAccessCompiler::TailCallBuiltin(
- masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
-}
-
-
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
@@ -1419,7 +1368,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// (6) External string. Make it, offset-wise, look like a sequential string.
// Go to (4).
// (7) Short external string or not a string? If yes, bail out to runtime.
- // (8) Sliced string. Replace subject with parent. Go to (1).
+ // (8) Sliced or thin string. Replace subject with parent. Go to (1).
Label check_underlying; // (1)
Label seq_string; // (4)
@@ -1443,6 +1392,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// (2) Sequential or cons? If not, go to (5).
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+ STATIC_ASSERT(kThinStringTag > kExternalStringTag);
STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
// Go to (5).
@@ -1469,12 +1419,12 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Branch(&runtime, ls, a3, Operand(a1));
__ SmiUntag(a1);
- STATIC_ASSERT(kStringEncodingMask == 4);
- STATIC_ASSERT(kOneByteStringTag == 4);
+ STATIC_ASSERT(kStringEncodingMask == 8);
+ STATIC_ASSERT(kOneByteStringTag == 8);
STATIC_ASSERT(kTwoByteStringTag == 0);
__ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for one_byte.
__ ld(t9, FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset));
- __ dsra(a3, a0, 2); // a3 is 1 for one_byte, 0 for UC16 (used below).
+ __ dsra(a3, a0, 3); // a3 is 1 for one_byte, 0 for UC16 (used below).
__ ld(a5, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
__ Movz(t9, a5, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
@@ -1720,12 +1670,18 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
__ Branch(&runtime, ne, at, Operand(zero_reg));
- // (8) Sliced string. Replace subject with parent. Go to (4).
+ // (8) Sliced or thin string. Replace subject with parent. Go to (4).
+ Label thin_string;
+ __ Branch(&thin_string, eq, a1, Operand(kThinStringTag));
// Load offset into t0 and replace subject string with parent.
__ ld(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
__ SmiUntag(t0);
__ ld(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
__ jmp(&check_underlying); // Go to (1).
+
+ __ bind(&thin_string);
+ __ ld(subject, FieldMemOperand(subject, ThinString::kActualOffset));
+ __ jmp(&check_underlying); // Go to (1).
#endif // V8_INTERPRETED_REGEXP
}
@@ -1932,189 +1888,6 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
__ bind(&exit_);
}
-// Note: feedback_vector and slot are clobbered after the call.
-static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
- Register slot) {
- __ dsrl(t0, slot, 32 - kPointerSizeLog2);
- __ Daddu(slot, feedback_vector, Operand(t0));
- __ ld(t0, FieldMemOperand(slot, FixedArray::kHeaderSize + kPointerSize));
- __ Daddu(t0, t0, Operand(Smi::FromInt(1)));
- __ sd(t0, FieldMemOperand(slot, FixedArray::kHeaderSize + kPointerSize));
-}
-
-void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
- // a0 - number of arguments
- // a1 - function
- // a3 - slot id
- // a2 - vector
- // a4 - allocation site (loaded from vector[slot])
- __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, at);
- __ Branch(miss, ne, a1, Operand(at));
-
- // Increment the call count for monomorphic function calls.
- IncrementCallCount(masm, a2, a3);
-
- __ mov(a2, a4);
- __ mov(a3, a1);
- ArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
-}
-
-
-void CallICStub::Generate(MacroAssembler* masm) {
- // a0 - number of arguments
- // a1 - function
- // a3 - slot id (Smi)
- // a2 - vector
- Label extra_checks_or_miss, call, call_function, call_count_incremented;
-
- // The checks. First, does r1 match the recorded monomorphic target?
- __ dsrl(a4, a3, 32 - kPointerSizeLog2);
- __ Daddu(a4, a2, Operand(a4));
- __ ld(a4, FieldMemOperand(a4, FixedArray::kHeaderSize));
-
- // We don't know that we have a weak cell. We might have a private symbol
- // or an AllocationSite, but the memory is safe to examine.
- // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
- // FixedArray.
- // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
- // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
- // computed, meaning that it can't appear to be a pointer. If the low bit is
- // 0, then hash is computed, but the 0 bit prevents the field from appearing
- // to be a pointer.
- STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
- STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
- WeakCell::kValueOffset &&
- WeakCell::kValueOffset == Symbol::kHashFieldSlot);
-
- __ ld(a5, FieldMemOperand(a4, WeakCell::kValueOffset));
- __ Branch(&extra_checks_or_miss, ne, a1, Operand(a5));
-
- // The compare above could have been a SMI/SMI comparison. Guard against this
- // convincing us that we have a monomorphic JSFunction.
- __ JumpIfSmi(a1, &extra_checks_or_miss);
-
- __ bind(&call_function);
- // Increment the call count for monomorphic function calls.
- IncrementCallCount(masm, a2, a3);
-
- __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
- tail_call_mode()),
- RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg));
-
- __ bind(&extra_checks_or_miss);
- Label uninitialized, miss, not_allocation_site;
-
- __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
- __ Branch(&call, eq, a4, Operand(at));
-
- // Verify that a4 contains an AllocationSite
- __ ld(a5, FieldMemOperand(a4, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
- __ Branch(&not_allocation_site, ne, a5, Operand(at));
-
- HandleArrayCase(masm, &miss);
-
- __ bind(&not_allocation_site);
-
- // The following cases attempt to handle MISS cases without going to the
- // runtime.
- if (FLAG_trace_ic) {
- __ Branch(&miss);
- }
-
- __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
- __ Branch(&uninitialized, eq, a4, Operand(at));
-
- // We are going megamorphic. If the feedback is a JSFunction, it is fine
- // to handle it here. More complex cases are dealt with in the runtime.
- __ AssertNotSmi(a4);
- __ GetObjectType(a4, a5, a5);
- __ Branch(&miss, ne, a5, Operand(JS_FUNCTION_TYPE));
- __ dsrl(a4, a3, 32 - kPointerSizeLog2);
- __ Daddu(a4, a2, Operand(a4));
- __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
- __ sd(at, FieldMemOperand(a4, FixedArray::kHeaderSize));
-
- __ bind(&call);
- IncrementCallCount(masm, a2, a3);
-
- __ bind(&call_count_incremented);
-
- __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
- RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg));
-
- __ bind(&uninitialized);
-
- // We are going monomorphic, provided we actually have a JSFunction.
- __ JumpIfSmi(a1, &miss);
-
- // Goto miss case if we do not have a function.
- __ GetObjectType(a1, a4, a4);
- __ Branch(&miss, ne, a4, Operand(JS_FUNCTION_TYPE));
-
- // Make sure the function is not the Array() function, which requires special
- // behavior on MISS.
- __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, a4);
- __ Branch(&miss, eq, a1, Operand(a4));
-
- // Make sure the function belongs to the same native context.
- __ ld(t0, FieldMemOperand(a1, JSFunction::kContextOffset));
- __ ld(t0, ContextMemOperand(t0, Context::NATIVE_CONTEXT_INDEX));
- __ ld(t1, NativeContextMemOperand());
- __ Branch(&miss, ne, t0, Operand(t1));
-
- // Store the function. Use a stub since we need a frame for allocation.
- // a2 - vector
- // a3 - slot
- // a1 - function
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- CreateWeakCellStub create_stub(masm->isolate());
- __ SmiTag(a0);
- __ Push(a0);
- __ Push(a2, a3);
- __ Push(cp, a1);
- __ CallStub(&create_stub);
- __ Pop(cp, a1);
- __ Pop(a2, a3);
- __ Pop(a0);
- __ SmiUntag(a0);
- }
-
- __ Branch(&call_function);
-
- // We are here because tracing is on or we encountered a MISS case we can't
- // handle here.
- __ bind(&miss);
- GenerateMiss(masm);
-
- __ Branch(&call_count_incremented);
-}
-
-
-void CallICStub::GenerateMiss(MacroAssembler* masm) {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve number of arguments as Smi.
- __ SmiTag(a0);
- __ Push(a0);
-
- // Push the receiver and the function and feedback info.
- __ Push(a1, a2, a3);
-
- // Call the entry.
- __ CallRuntime(Runtime::kCallIC_Miss);
-
- // Move result to a1 and exit the internal frame.
- __ mov(a1, v0);
-
- // Restore number of arguments.
- __ Pop(a0);
- __ SmiUntag(a0);
-}
-
-
void StringCharCodeAtGenerator::GenerateSlow(
MacroAssembler* masm, EmbedMode embed_mode,
const RuntimeCallHelper& call_helper) {
@@ -2174,44 +1947,6 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
}
-
-// -------------------------------------------------------------------------
-// StringCharFromCodeGenerator
-
-void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
- // Fast case of Heap::LookupSingleCharacterStringFromCode.
- __ JumpIfNotSmi(code_, &slow_case_);
- __ Branch(&slow_case_, hi, code_,
- Operand(Smi::FromInt(String::kMaxOneByteCharCode)));
-
- __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
- // At this point code register contains smi tagged one_byte char code.
- __ SmiScale(at, code_, kPointerSizeLog2);
- __ Daddu(result_, result_, at);
- __ ld(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&slow_case_, eq, result_, Operand(at));
- __ bind(&exit_);
-}
-
-
-void StringCharFromCodeGenerator::GenerateSlow(
- MacroAssembler* masm,
- const RuntimeCallHelper& call_helper) {
- __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
-
- __ bind(&slow_case_);
- call_helper.BeforeCall(masm);
- __ push(code_);
- __ CallRuntime(Runtime::kStringCharFromCode);
- __ Move(result_, v0);
-
- call_helper.AfterCall(masm);
- __ Branch(&exit_);
-
- __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
-}
-
void StringHelper::GenerateFlatOneByteStringEquals(
MacroAssembler* masm, Register left, Register right, Register scratch1,
Register scratch2, Register scratch3) {
@@ -3131,12 +2866,6 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
__ Daddu(sp, sp, a1);
}
-void CallICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadFeedbackVector(a2);
- CallICStub stub(isolate(), state());
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
-}
-
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
@@ -3491,515 +3220,6 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
GenerateCase(masm, FAST_ELEMENTS);
}
-void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a1 : function
- // -- cp : context
- // -- fp : frame pointer
- // -- ra : return address
- // -----------------------------------
- __ AssertFunction(a1);
-
- // Make a2 point to the JavaScript frame.
- __ mov(a2, fp);
- if (skip_stub_frame()) {
- // For Ignition we need to skip the handler/stub frame to reach the
- // JavaScript frame for the function.
- __ ld(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
- }
- if (FLAG_debug_code) {
- Label ok;
- __ ld(a3, MemOperand(a2, StandardFrameConstants::kFunctionOffset));
- __ Branch(&ok, eq, a1, Operand(a3));
- __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
- __ bind(&ok);
- }
-
- // Check if we have rest parameters (only possible if we have an
- // arguments adaptor frame below the function frame).
- Label no_rest_parameters;
- __ ld(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
- __ ld(a3, MemOperand(a2, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ Branch(&no_rest_parameters, ne, a3,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Check if the arguments adaptor frame contains more arguments than
- // specified by the function's internal formal parameter count.
- Label rest_parameters;
- __ SmiLoadUntag(
- a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ ld(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a3,
- FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
- __ Dsubu(a0, a0, Operand(a3));
- __ Branch(&rest_parameters, gt, a0, Operand(zero_reg));
-
- // Return an empty rest parameter array.
- __ bind(&no_rest_parameters);
- {
- // ----------- S t a t e -------------
- // -- cp : context
- // -- ra : return address
- // -----------------------------------
-
- // Allocate an empty rest parameter array.
- Label allocate, done_allocate;
- __ Allocate(JSArray::kSize, v0, a0, a1, &allocate, NO_ALLOCATION_FLAGS);
- __ bind(&done_allocate);
-
- // Setup the rest parameter array in v0.
- __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, a1);
- __ sd(a1, FieldMemOperand(v0, JSArray::kMapOffset));
- __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
- __ sd(a1, FieldMemOperand(v0, JSArray::kPropertiesOffset));
- __ sd(a1, FieldMemOperand(v0, JSArray::kElementsOffset));
- __ Move(a1, Smi::kZero);
- __ Ret(USE_DELAY_SLOT);
- __ sd(a1, FieldMemOperand(v0, JSArray::kLengthOffset)); // In delay slot
- STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
-
- // Fall back to %AllocateInNewSpace.
- __ bind(&allocate);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(Smi::FromInt(JSArray::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- }
- __ jmp(&done_allocate);
- }
-
- __ bind(&rest_parameters);
- {
- // Compute the pointer to the first rest parameter (skippping the receiver).
- __ Dlsa(a2, a2, a0, kPointerSizeLog2);
- __ Daddu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
- 1 * kPointerSize));
-
- // ----------- S t a t e -------------
- // -- cp : context
- // -- a0 : number of rest parameters
- // -- a1 : function
- // -- a2 : pointer to first rest parameters
- // -- ra : return address
- // -----------------------------------
-
- // Allocate space for the rest parameter array plus the backing store.
- Label allocate, done_allocate;
- __ li(a5, Operand(JSArray::kSize + FixedArray::kHeaderSize));
- __ Dlsa(a5, a5, a0, kPointerSizeLog2);
- __ Allocate(a5, v0, a3, a4, &allocate, NO_ALLOCATION_FLAGS);
- __ bind(&done_allocate);
-
- // Compute arguments.length in a4.
- __ SmiTag(a4, a0);
-
- // Setup the elements array in v0.
- __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
- __ sd(at, FieldMemOperand(v0, FixedArray::kMapOffset));
- __ sd(a4, FieldMemOperand(v0, FixedArray::kLengthOffset));
- __ Daddu(a3, v0, Operand(FixedArray::kHeaderSize));
- {
- Label loop, done_loop;
- __ Dlsa(a1, a3, a0, kPointerSizeLog2);
- __ bind(&loop);
- __ Branch(&done_loop, eq, a1, Operand(a3));
- __ ld(at, MemOperand(a2, 0 * kPointerSize));
- __ sd(at, FieldMemOperand(a3, 0 * kPointerSize));
- __ Dsubu(a2, a2, Operand(1 * kPointerSize));
- __ Daddu(a3, a3, Operand(1 * kPointerSize));
- __ Branch(&loop);
- __ bind(&done_loop);
- }
-
- // Setup the rest parameter array in a3.
- __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, at);
- __ sd(at, FieldMemOperand(a3, JSArray::kMapOffset));
- __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
- __ sd(at, FieldMemOperand(a3, JSArray::kPropertiesOffset));
- __ sd(v0, FieldMemOperand(a3, JSArray::kElementsOffset));
- __ sd(a4, FieldMemOperand(a3, JSArray::kLengthOffset));
- STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a3); // In delay slot
-
- // Fall back to %AllocateInNewSpace (if not too big).
- Label too_big_for_new_space;
- __ bind(&allocate);
- __ Branch(&too_big_for_new_space, gt, a5,
- Operand(kMaxRegularHeapObjectSize));
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(a0);
- __ SmiTag(a5);
- __ Push(a0, a2, a5);
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- __ Pop(a0, a2);
- __ SmiUntag(a0);
- }
- __ jmp(&done_allocate);
-
- // Fall back to %NewStrictArguments.
- __ bind(&too_big_for_new_space);
- __ Push(a1);
- __ TailCallRuntime(Runtime::kNewStrictArguments);
- }
-}
-
-
-void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a1 : function
- // -- cp : context
- // -- fp : frame pointer
- // -- ra : return address
- // -----------------------------------
- __ AssertFunction(a1);
-
- // Make t0 point to the JavaScript frame.
- __ mov(t0, fp);
- if (skip_stub_frame()) {
- // For Ignition we need to skip the handler/stub frame to reach the
- // JavaScript frame for the function.
- __ ld(t0, MemOperand(t0, StandardFrameConstants::kCallerFPOffset));
- }
- if (FLAG_debug_code) {
- Label ok;
- __ ld(a3, MemOperand(t0, StandardFrameConstants::kFunctionOffset));
- __ Branch(&ok, eq, a1, Operand(a3));
- __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
- __ bind(&ok);
- }
-
- // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
- __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a2,
- FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
- __ Lsa(a3, t0, a2, kPointerSizeLog2);
- __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
- __ SmiTag(a2);
-
- // a1 : function
- // a2 : number of parameters (tagged)
- // a3 : parameters pointer
- // t0 : Javascript frame pointer
- // Registers used over whole function:
- // a5 : arguments count (tagged)
- // a6 : mapped parameter count (tagged)
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ ld(a4, MemOperand(t0, StandardFrameConstants::kCallerFPOffset));
- __ ld(a0, MemOperand(a4, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ Branch(&adaptor_frame, eq, a0,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // No adaptor, parameter count = argument count.
- __ mov(a5, a2);
- __ Branch(USE_DELAY_SLOT, &try_allocate);
- __ mov(a6, a2); // In delay slot.
-
- // We have an adaptor frame. Patch the parameters pointer.
- __ bind(&adaptor_frame);
- __ ld(a5, MemOperand(a4, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiScale(t2, a5, kPointerSizeLog2);
- __ Daddu(a4, a4, Operand(t2));
- __ Daddu(a3, a4, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // a5 = argument count (tagged)
- // a6 = parameter count (tagged)
- // Compute the mapped parameter count = min(a6, a5) in a6.
- __ mov(a6, a2);
- __ Branch(&try_allocate, le, a6, Operand(a5));
- __ mov(a6, a5);
-
- __ bind(&try_allocate);
-
- // Compute the sizes of backing store, parameter map, and arguments object.
- // 1. Parameter map, has 2 extra words containing context and backing store.
- const int kParameterMapHeaderSize =
- FixedArray::kHeaderSize + 2 * kPointerSize;
- // If there are no mapped parameters, we do not need the parameter_map.
- Label param_map_size;
- DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
- __ Branch(USE_DELAY_SLOT, &param_map_size, eq, a6, Operand(zero_reg));
- __ mov(t1, zero_reg); // In delay slot: param map size = 0 when a6 == 0.
- __ SmiScale(t1, a6, kPointerSizeLog2);
- __ daddiu(t1, t1, kParameterMapHeaderSize);
- __ bind(&param_map_size);
-
- // 2. Backing store.
- __ SmiScale(t2, a5, kPointerSizeLog2);
- __ Daddu(t1, t1, Operand(t2));
- __ Daddu(t1, t1, Operand(FixedArray::kHeaderSize));
-
- // 3. Arguments object.
- __ Daddu(t1, t1, Operand(JSSloppyArgumentsObject::kSize));
-
- // Do the allocation of all three objects in one go.
- __ Allocate(t1, v0, t1, a4, &runtime, NO_ALLOCATION_FLAGS);
-
- // v0 = address of new object(s) (tagged)
- // a2 = argument count (smi-tagged)
- // Get the arguments boilerplate from the current native context into a4.
- const int kNormalOffset =
- Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
- const int kAliasedOffset =
- Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
-
- __ ld(a4, NativeContextMemOperand());
- Label skip2_ne, skip2_eq;
- __ Branch(&skip2_ne, ne, a6, Operand(zero_reg));
- __ ld(a4, MemOperand(a4, kNormalOffset));
- __ bind(&skip2_ne);
-
- __ Branch(&skip2_eq, eq, a6, Operand(zero_reg));
- __ ld(a4, MemOperand(a4, kAliasedOffset));
- __ bind(&skip2_eq);
-
- // v0 = address of new object (tagged)
- // a2 = argument count (smi-tagged)
- // a4 = address of arguments map (tagged)
- // a6 = mapped parameter count (tagged)
- __ sd(a4, FieldMemOperand(v0, JSObject::kMapOffset));
- __ LoadRoot(t1, Heap::kEmptyFixedArrayRootIndex);
- __ sd(t1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ sd(t1, FieldMemOperand(v0, JSObject::kElementsOffset));
-
- // Set up the callee in-object property.
- __ AssertNotSmi(a1);
- __ sd(a1, FieldMemOperand(v0, JSSloppyArgumentsObject::kCalleeOffset));
-
- // Use the length (smi tagged) and set that as an in-object property too.
- __ AssertSmi(a5);
- __ sd(a5, FieldMemOperand(v0, JSSloppyArgumentsObject::kLengthOffset));
-
- // Set up the elements pointer in the allocated arguments object.
- // If we allocated a parameter map, a4 will point there, otherwise
- // it will point to the backing store.
- __ Daddu(a4, v0, Operand(JSSloppyArgumentsObject::kSize));
- __ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset));
-
- // v0 = address of new object (tagged)
- // a2 = argument count (tagged)
- // a4 = address of parameter map or backing store (tagged)
- // a6 = mapped parameter count (tagged)
- // Initialize parameter map. If there are no mapped arguments, we're done.
- Label skip_parameter_map;
- Label skip3;
- __ Branch(&skip3, ne, a6, Operand(Smi::kZero));
- // Move backing store address to a1, because it is
- // expected there when filling in the unmapped arguments.
- __ mov(a1, a4);
- __ bind(&skip3);
-
- __ Branch(&skip_parameter_map, eq, a6, Operand(Smi::kZero));
-
- __ LoadRoot(a5, Heap::kSloppyArgumentsElementsMapRootIndex);
- __ sd(a5, FieldMemOperand(a4, FixedArray::kMapOffset));
- __ Daddu(a5, a6, Operand(Smi::FromInt(2)));
- __ sd(a5, FieldMemOperand(a4, FixedArray::kLengthOffset));
- __ sd(cp, FieldMemOperand(a4, FixedArray::kHeaderSize + 0 * kPointerSize));
- __ SmiScale(t2, a6, kPointerSizeLog2);
- __ Daddu(a5, a4, Operand(t2));
- __ Daddu(a5, a5, Operand(kParameterMapHeaderSize));
- __ sd(a5, FieldMemOperand(a4, FixedArray::kHeaderSize + 1 * kPointerSize));
-
- // Copy the parameter slots and the holes in the arguments.
- // We need to fill in mapped_parameter_count slots. They index the context,
- // where parameters are stored in reverse order, at
- // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
- // The mapped parameter thus need to get indices
- // MIN_CONTEXT_SLOTS+parameter_count-1 ..
- // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
- // We loop from right to left.
- Label parameters_loop, parameters_test;
- __ mov(a5, a6);
- __ Daddu(t1, a2, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
- __ Dsubu(t1, t1, Operand(a6));
- __ LoadRoot(a7, Heap::kTheHoleValueRootIndex);
- __ SmiScale(t2, a5, kPointerSizeLog2);
- __ Daddu(a1, a4, Operand(t2));
- __ Daddu(a1, a1, Operand(kParameterMapHeaderSize));
-
- // a1 = address of backing store (tagged)
- // a4 = address of parameter map (tagged)
- // a0 = temporary scratch (a.o., for address calculation)
- // t1 = loop variable (tagged)
- // a7 = the hole value
- __ jmp(&parameters_test);
-
- __ bind(&parameters_loop);
- __ Dsubu(a5, a5, Operand(Smi::FromInt(1)));
- __ SmiScale(a0, a5, kPointerSizeLog2);
- __ Daddu(a0, a0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
- __ Daddu(t2, a4, a0);
- __ sd(t1, MemOperand(t2));
- __ Dsubu(a0, a0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
- __ Daddu(t2, a1, a0);
- __ sd(a7, MemOperand(t2));
- __ Daddu(t1, t1, Operand(Smi::FromInt(1)));
- __ bind(&parameters_test);
- __ Branch(&parameters_loop, ne, a5, Operand(Smi::kZero));
-
- // Restore t1 = argument count (tagged).
- __ ld(a5, FieldMemOperand(v0, JSSloppyArgumentsObject::kLengthOffset));
-
- __ bind(&skip_parameter_map);
- // v0 = address of new object (tagged)
- // a1 = address of backing store (tagged)
- // a5 = argument count (tagged)
- // a6 = mapped parameter count (tagged)
- // t1 = scratch
- // Copy arguments header and remaining slots (if there are any).
- __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
- __ sd(t1, FieldMemOperand(a1, FixedArray::kMapOffset));
- __ sd(a5, FieldMemOperand(a1, FixedArray::kLengthOffset));
-
- Label arguments_loop, arguments_test;
- __ SmiScale(t2, a6, kPointerSizeLog2);
- __ Dsubu(a3, a3, Operand(t2));
- __ jmp(&arguments_test);
-
- __ bind(&arguments_loop);
- __ Dsubu(a3, a3, Operand(kPointerSize));
- __ ld(a4, MemOperand(a3, 0));
- __ SmiScale(t2, a6, kPointerSizeLog2);
- __ Daddu(t1, a1, Operand(t2));
- __ sd(a4, FieldMemOperand(t1, FixedArray::kHeaderSize));
- __ Daddu(a6, a6, Operand(Smi::FromInt(1)));
-
- __ bind(&arguments_test);
- __ Branch(&arguments_loop, lt, a6, Operand(a5));
-
- // Return.
- __ Ret();
-
- // Do the runtime call to allocate the arguments object.
- // a5 = argument count (tagged)
- __ bind(&runtime);
- __ Push(a1, a3, a5);
- __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
-void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a1 : function
- // -- cp : context
- // -- fp : frame pointer
- // -- ra : return address
- // -----------------------------------
- __ AssertFunction(a1);
-
- // Make a2 point to the JavaScript frame.
- __ mov(a2, fp);
- if (skip_stub_frame()) {
- // For Ignition we need to skip the handler/stub frame to reach the
- // JavaScript frame for the function.
- __ ld(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
- }
- if (FLAG_debug_code) {
- Label ok;
- __ ld(a3, MemOperand(a2, StandardFrameConstants::kFunctionOffset));
- __ Branch(&ok, eq, a1, Operand(a3));
- __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
- __ bind(&ok);
- }
-
- // Check if we have an arguments adaptor frame below the function frame.
- Label arguments_adaptor, arguments_done;
- __ ld(a3, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
- __ ld(a0, MemOperand(a3, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ Branch(&arguments_adaptor, eq, a0,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- {
- __ ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a0,
- FieldMemOperand(a4, SharedFunctionInfo::kFormalParameterCountOffset));
- __ Dlsa(a2, a2, a0, kPointerSizeLog2);
- __ Daddu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
- 1 * kPointerSize));
- }
- __ Branch(&arguments_done);
- __ bind(&arguments_adaptor);
- {
- __ SmiLoadUntag(
- a0, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ Dlsa(a2, a3, a0, kPointerSizeLog2);
- __ Daddu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
- 1 * kPointerSize));
- }
- __ bind(&arguments_done);
-
- // ----------- S t a t e -------------
- // -- cp : context
- // -- a0 : number of rest parameters
- // -- a1 : function
- // -- a2 : pointer to first rest parameters
- // -- ra : return address
- // -----------------------------------
-
- // Allocate space for the rest parameter array plus the backing store.
- Label allocate, done_allocate;
- __ li(a5, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
- __ Dlsa(a5, a5, a0, kPointerSizeLog2);
- __ Allocate(a5, v0, a3, a4, &allocate, NO_ALLOCATION_FLAGS);
- __ bind(&done_allocate);
-
- // Compute arguments.length in a4.
- __ SmiTag(a4, a0);
-
- // Setup the elements array in v0.
- __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
- __ sd(at, FieldMemOperand(v0, FixedArray::kMapOffset));
- __ sd(a4, FieldMemOperand(v0, FixedArray::kLengthOffset));
- __ Daddu(a3, v0, Operand(FixedArray::kHeaderSize));
- {
- Label loop, done_loop;
- __ Dlsa(a1, a3, a0, kPointerSizeLog2);
- __ bind(&loop);
- __ Branch(&done_loop, eq, a1, Operand(a3));
- __ ld(at, MemOperand(a2, 0 * kPointerSize));
- __ sd(at, FieldMemOperand(a3, 0 * kPointerSize));
- __ Dsubu(a2, a2, Operand(1 * kPointerSize));
- __ Daddu(a3, a3, Operand(1 * kPointerSize));
- __ Branch(&loop);
- __ bind(&done_loop);
- }
-
- // Setup the strict arguments object in a3.
- __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, at);
- __ sd(at, FieldMemOperand(a3, JSStrictArgumentsObject::kMapOffset));
- __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
- __ sd(at, FieldMemOperand(a3, JSStrictArgumentsObject::kPropertiesOffset));
- __ sd(v0, FieldMemOperand(a3, JSStrictArgumentsObject::kElementsOffset));
- __ sd(a4, FieldMemOperand(a3, JSStrictArgumentsObject::kLengthOffset));
- STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a3); // In delay slot
-
- // Fall back to %AllocateInNewSpace (if not too big).
- Label too_big_for_new_space;
- __ bind(&allocate);
- __ Branch(&too_big_for_new_space, gt, a5, Operand(kMaxRegularHeapObjectSize));
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(a0);
- __ SmiTag(a5);
- __ Push(a0, a2, a5);
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- __ Pop(a0, a2);
- __ SmiUntag(a0);
- }
- __ jmp(&done_allocate);
-
- // Fall back to %NewStrictArguments.
- __ bind(&too_big_for_new_space);
- __ Push(a1);
- __ TailCallRuntime(Runtime::kNewStrictArguments);
-}
-
-
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
int64_t offset = (ref0.address() - ref1.address());
DCHECK(static_cast<int>(offset) == offset);
diff --git a/deps/v8/src/mips64/codegen-mips64.cc b/deps/v8/src/mips64/codegen-mips64.cc
index 134fe4dd88..e7f6cb0a88 100644
--- a/deps/v8/src/mips64/codegen-mips64.cc
+++ b/deps/v8/src/mips64/codegen-mips64.cc
@@ -612,6 +612,9 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
Register index,
Register result,
Label* call_runtime) {
+ Label indirect_string_loaded;
+ __ bind(&indirect_string_loaded);
+
// Fetch the instance type of the receiver into result register.
__ ld(result, FieldMemOperand(string, HeapObject::kMapOffset));
__ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
@@ -622,18 +625,23 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ Branch(&check_sequential, eq, at, Operand(zero_reg));
// Dispatch on the indirect string shape: slice or cons.
- Label cons_string;
- __ And(at, result, Operand(kSlicedNotConsMask));
- __ Branch(&cons_string, eq, at, Operand(zero_reg));
+ Label cons_string, thin_string;
+ __ And(at, result, Operand(kStringRepresentationMask));
+ __ Branch(&cons_string, eq, at, Operand(kConsStringTag));
+ __ Branch(&thin_string, eq, at, Operand(kThinStringTag));
// Handle slices.
- Label indirect_string_loaded;
__ ld(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
__ ld(string, FieldMemOperand(string, SlicedString::kParentOffset));
__ dsra32(at, result, 0);
__ Daddu(index, index, at);
__ jmp(&indirect_string_loaded);
+ // Handle thin strings.
+ __ bind(&thin_string);
+ __ ld(string, FieldMemOperand(string, ThinString::kActualOffset));
+ __ jmp(&indirect_string_loaded);
+
// Handle cons strings.
// Check whether the right hand side is the empty string (i.e. if
// this is really a flat string in a cons string). If that is not
@@ -645,10 +653,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ Branch(call_runtime, ne, result, Operand(at));
// Get the first of the two strings and load its instance type.
__ ld(string, FieldMemOperand(string, ConsString::kFirstOffset));
-
- __ bind(&indirect_string_loaded);
- __ ld(result, FieldMemOperand(string, HeapObject::kMapOffset));
- __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+ __ jmp(&indirect_string_loaded);
// Distinguish sequential and external strings. Only these two string
// representations can reach here (slices and flat cons strings have been
diff --git a/deps/v8/src/mips64/deoptimizer-mips64.cc b/deps/v8/src/mips64/deoptimizer-mips64.cc
index ea17124c63..8b762bd117 100644
--- a/deps/v8/src/mips64/deoptimizer-mips64.cc
+++ b/deps/v8/src/mips64/deoptimizer-mips64.cc
@@ -93,7 +93,7 @@ void Deoptimizer::SetPlatformCompiledStubRegisters(
void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
- double double_value = input_->GetDoubleRegister(i);
+ Float64 double_value = input_->GetDoubleRegister(i);
output_frame->SetDoubleRegister(i, double_value);
}
}
diff --git a/deps/v8/src/mips64/interface-descriptors-mips64.cc b/deps/v8/src/mips64/interface-descriptors-mips64.cc
index 6f8a0979ff..5ce2bb0a4d 100644
--- a/deps/v8/src/mips64/interface-descriptors-mips64.cc
+++ b/deps/v8/src/mips64/interface-descriptors-mips64.cc
@@ -68,27 +68,6 @@ void FastNewClosureDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void FastNewRestParameterDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a1};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
-void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a1};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
-void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a1};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
@@ -133,15 +112,13 @@ void CreateWeakCellDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
+void CallICTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {a1, a3};
+ Register registers[] = {a1, a0, a3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
+void CallICDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1, a0, a3, a2};
data->InitializePlatformSpecific(arraysize(registers), registers);
@@ -177,6 +154,13 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void CallForwardVarargsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // a1: the target to call
+ // a2: start index (to support rest parameters)
+ Register registers[] = {a1, a2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void ConstructStubDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -212,13 +196,12 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(0, nullptr, nullptr);
}
-#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
- void Allocate##Type##Descriptor::InitializePlatformSpecific( \
- CallInterfaceDescriptorData* data) { \
- data->InitializePlatformSpecific(0, nullptr, nullptr); \
- }
-SIMD128_TYPES(SIMD128_ALLOC_DESC)
-#undef SIMD128_ALLOC_DESC
+void ArrayConstructorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite
+ Register registers[] = {a1, a3, a0, a2};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -407,6 +390,14 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ a1, // loaded new FP
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.cc b/deps/v8/src/mips64/macro-assembler-mips64.cc
index f022e87796..849327e60b 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/mips64/macro-assembler-mips64.cc
@@ -4040,17 +4040,16 @@ void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) {
or_(dst, dst, scratch);
}
-
-void MacroAssembler::DebugBreak() {
- PrepareCEntryArgs(0);
- PrepareCEntryFunction(
- ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
- CEntryStub ces(isolate(), 1);
- DCHECK(AllowThisStubCall(&ces));
- Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
+void MacroAssembler::MaybeDropFrames() {
+ // Check whether we need to drop frames to restart a function on the stack.
+ ExternalReference restart_fp =
+ ExternalReference::debug_restart_fp_address(isolate());
+ li(a1, Operand(restart_fp));
+ ld(a1, MemOperand(a1));
+ Jump(isolate()->builtins()->FrameDropperTrampoline(), RelocInfo::CODE_TARGET,
+ ne, a1, Operand(zero_reg));
}
-
// ---------------------------------------------------------------------------
// Exception handling.
@@ -4928,32 +4927,6 @@ void MacroAssembler::GetMapConstructor(Register result, Register map,
bind(&done);
}
-
-void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
- Register scratch, Label* miss) {
- // Get the prototype or initial map from the function.
- ld(result,
- FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
- // If the prototype or initial map is the hole, don't return it and
- // simply miss the cache instead. This will allow us to allocate a
- // prototype object on-demand in the runtime system.
- LoadRoot(t8, Heap::kTheHoleValueRootIndex);
- Branch(miss, eq, result, Operand(t8));
-
- // If the function does not have an initial map, we're done.
- Label done;
- GetObjectType(result, scratch, scratch);
- Branch(&done, ne, scratch, Operand(MAP_TYPE));
-
- // Get the prototype from the initial map.
- ld(result, FieldMemOperand(result, Map::kPrototypeOffset));
-
- // All done.
- bind(&done);
-}
-
-
void MacroAssembler::GetObjectType(Register object,
Register map,
Register type_reg) {
@@ -5653,7 +5626,7 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
}
void MacroAssembler::StubPrologue(StackFrame::Type type) {
- li(at, Operand(Smi::FromInt(type)));
+ li(at, Operand(StackFrame::TypeToMarker(type)));
PushCommonFrame(at);
}
@@ -5686,8 +5659,8 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
ld(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- ld(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
- ld(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
+ ld(vector, FieldMemOperand(vector, JSFunction::kFeedbackVectorOffset));
+ ld(vector, FieldMemOperand(vector, Cell::kValueOffset));
}
@@ -5713,7 +5686,7 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
stack_offset -= kPointerSize;
sd(fp, MemOperand(sp, stack_offset));
stack_offset -= kPointerSize;
- li(t9, Operand(Smi::FromInt(type)));
+ li(t9, Operand(StackFrame::TypeToMarker(type)));
sd(t9, MemOperand(sp, stack_offset));
if (type == StackFrame::INTERNAL) {
DCHECK_EQ(stack_offset, kPointerSize);
@@ -5770,7 +5743,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
daddiu(sp, sp, -2 * kPointerSize - ExitFrameConstants::kFixedFrameSizeFromFp);
sd(ra, MemOperand(sp, 4 * kPointerSize));
sd(fp, MemOperand(sp, 3 * kPointerSize));
- li(at, Operand(Smi::FromInt(frame_type)));
+ li(at, Operand(StackFrame::TypeToMarker(frame_type)));
sd(at, MemOperand(sp, 2 * kPointerSize));
// Set up new frame pointer.
daddiu(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp);
@@ -5868,22 +5841,6 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
daddiu(sp, sp, 2 * kPointerSize);
}
-
-void MacroAssembler::InitializeNewString(Register string,
- Register length,
- Heap::RootListIndex map_index,
- Register scratch1,
- Register scratch2) {
- // dsll(scratch1, length, kSmiTagSize);
- dsll32(scratch1, length, 0);
- LoadRoot(scratch2, map_index);
- sd(scratch1, FieldMemOperand(string, String::kLengthOffset));
- li(scratch1, Operand(String::kEmptyHashField));
- sd(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
- sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
-}
-
-
int MacroAssembler::ActivationFrameAlignment() {
#if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
// Running on the real platform. Use the alignment as mandated by the local
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.h b/deps/v8/src/mips64/macro-assembler-mips64.h
index 126090d082..bfb1d520b4 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/mips64/macro-assembler-mips64.h
@@ -1134,12 +1134,9 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* fail);
- // -------------------------------------------------------------------------
- // Debugger Support.
-
- void DebugBreak();
+ // Frame restart support.
+ void MaybeDropFrames();
- // -------------------------------------------------------------------------
// Exception handling.
// Push a new stack handler and link into stack handler chain.
@@ -1163,14 +1160,6 @@ class MacroAssembler: public Assembler {
void GetMapConstructor(Register result, Register map, Register temp,
Register temp2);
- // Try to get function prototype of a function and puts the value in
- // the result register. Checks that the function really is a
- // function and jumps to the miss label if the fast checks fail. The
- // function register will be untouched; the other registers may be
- // clobbered.
- void TryGetFunctionPrototype(Register function, Register result,
- Register scratch, Label* miss);
-
void GetObjectType(Register function,
Register map,
Register type_reg);
@@ -1877,12 +1866,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
InvokeFlag flag,
const CallWrapper& call_wrapper);
- void InitializeNewString(Register string,
- Register length,
- Heap::RootListIndex map_index,
- Register scratch1,
- Register scratch2);
-
// Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
void InNewSpace(Register object, Register scratch,
Condition cond, // ne for new space, eq otherwise.
diff --git a/deps/v8/src/mips64/simulator-mips64.cc b/deps/v8/src/mips64/simulator-mips64.cc
index 591ddaf3a1..c0dab739a8 100644
--- a/deps/v8/src/mips64/simulator-mips64.cc
+++ b/deps/v8/src/mips64/simulator-mips64.cc
@@ -1626,20 +1626,92 @@ void Simulator::DieOrDebug() {
}
}
-
-void Simulator::TraceRegWr(int64_t value) {
+void Simulator::TraceRegWr(int64_t value, TraceType t) {
if (::v8::internal::FLAG_trace_sim) {
- SNPrintF(trace_buf_, "%016" PRIx64 " ", value);
+ union {
+ int64_t fmt_int64;
+ int32_t fmt_int32[2];
+ float fmt_float[2];
+ double fmt_double;
+ } v;
+ v.fmt_int64 = value;
+
+ switch (t) {
+ case WORD:
+ SNPrintF(trace_buf_, "%016" PRIx64 " (%" PRId64 ") int32:%" PRId32
+ " uint32:%" PRIu32,
+ v.fmt_int64, icount_, v.fmt_int32[0], v.fmt_int32[0]);
+ break;
+ case DWORD:
+ SNPrintF(trace_buf_, "%016" PRIx64 " (%" PRId64 ") int64:%" PRId64
+ " uint64:%" PRIu64,
+ value, icount_, value, value);
+ break;
+ case FLOAT:
+ SNPrintF(trace_buf_, "%016" PRIx64 " (%" PRId64 ") flt:%e",
+ v.fmt_int64, icount_, v.fmt_float[0]);
+ break;
+ case DOUBLE:
+ SNPrintF(trace_buf_, "%016" PRIx64 " (%" PRId64 ") dbl:%e",
+ v.fmt_int64, icount_, v.fmt_double);
+ break;
+ case FLOAT_DOUBLE:
+ SNPrintF(trace_buf_, "%016" PRIx64 " (%" PRId64 ") flt:%e dbl:%e",
+ v.fmt_int64, icount_, v.fmt_float[0], v.fmt_double);
+ break;
+ case WORD_DWORD:
+ SNPrintF(trace_buf_,
+ "%016" PRIx64 " (%" PRId64 ") int32:%" PRId32
+ " uint32:%" PRIu32 " int64:%" PRId64 " uint64:%" PRIu64,
+ v.fmt_int64, icount_, v.fmt_int32[0], v.fmt_int32[0],
+ v.fmt_int64, v.fmt_int64);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
}
-
// TODO(plind): consider making icount_ printing a flag option.
-void Simulator::TraceMemRd(int64_t addr, int64_t value) {
+void Simulator::TraceMemRd(int64_t addr, int64_t value, TraceType t) {
if (::v8::internal::FLAG_trace_sim) {
- SNPrintF(trace_buf_,
- "%016" PRIx64 " <-- [%016" PRIx64 " ] (%" PRId64 " )", value,
- addr, icount_);
+ union {
+ int64_t fmt_int64;
+ int32_t fmt_int32[2];
+ float fmt_float[2];
+ double fmt_double;
+ } v;
+ v.fmt_int64 = value;
+
+ switch (t) {
+ case WORD:
+ SNPrintF(trace_buf_, "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64
+ ") int32:%" PRId32 " uint32:%" PRIu32,
+ v.fmt_int64, addr, icount_, v.fmt_int32[0], v.fmt_int32[0]);
+ break;
+ case DWORD:
+ SNPrintF(trace_buf_, "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64
+ ") int64:%" PRId64 " uint64:%" PRIu64,
+ value, addr, icount_, value, value);
+ break;
+ case FLOAT:
+ SNPrintF(trace_buf_, "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64
+ ") flt:%e",
+ v.fmt_int64, addr, icount_, v.fmt_float[0]);
+ break;
+ case DOUBLE:
+ SNPrintF(trace_buf_, "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64
+ ") dbl:%e",
+ v.fmt_int64, addr, icount_, v.fmt_double);
+ break;
+ case FLOAT_DOUBLE:
+ SNPrintF(trace_buf_, "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64
+ ") flt:%e dbl:%e",
+ v.fmt_int64, addr, icount_, v.fmt_float[0], v.fmt_double);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
}
@@ -1648,22 +1720,27 @@ void Simulator::TraceMemWr(int64_t addr, int64_t value, TraceType t) {
if (::v8::internal::FLAG_trace_sim) {
switch (t) {
case BYTE:
- SNPrintF(trace_buf_, " %02x --> [%016" PRIx64 " ]",
- static_cast<int8_t>(value), addr);
+ SNPrintF(trace_buf_, " %02" PRIx8 " --> [%016" PRIx64
+ "] (%" PRId64 ")",
+ static_cast<uint8_t>(value), addr, icount_);
break;
case HALF:
- SNPrintF(trace_buf_, " %04x --> [%016" PRIx64 " ]",
- static_cast<int16_t>(value), addr);
+ SNPrintF(trace_buf_, " %04" PRIx16 " --> [%016" PRIx64
+ "] (%" PRId64 ")",
+ static_cast<uint16_t>(value), addr, icount_);
break;
case WORD:
- SNPrintF(trace_buf_, " %08x --> [%016" PRIx64 " ]",
- static_cast<int32_t>(value), addr);
+ SNPrintF(trace_buf_,
+ " %08" PRIx32 " --> [%016" PRIx64 "] (%" PRId64 ")",
+ static_cast<uint32_t>(value), addr, icount_);
break;
case DWORD:
SNPrintF(trace_buf_,
- "%016" PRIx64 " --> [%016" PRIx64 " ] (%" PRId64 " )",
+ "%016" PRIx64 " --> [%016" PRIx64 "] (%" PRId64 " )",
value, addr, icount_);
break;
+ default:
+ UNREACHABLE();
}
}
}
@@ -1671,7 +1748,7 @@ void Simulator::TraceMemWr(int64_t addr, int64_t value, TraceType t) {
// TODO(plind): sign-extend and zero-extend not implmented properly
// on all the ReadXX functions, I don't think re-interpret cast does it.
-int32_t Simulator::ReadW(int64_t addr, Instruction* instr) {
+int32_t Simulator::ReadW(int64_t addr, Instruction* instr, TraceType t) {
if (addr >=0 && addr < 0x400) {
// This has to be a NULL-dereference, drop into debugger.
PrintF("Memory read from bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
@@ -1681,7 +1758,7 @@ int32_t Simulator::ReadW(int64_t addr, Instruction* instr) {
}
if ((addr & 0x3) == 0 || kArchVariant == kMips64r6) {
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
- TraceMemRd(addr, static_cast<int64_t>(*ptr));
+ TraceMemRd(addr, static_cast<int64_t>(*ptr), t);
return *ptr;
}
PrintF("Unaligned read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr,
@@ -1701,7 +1778,7 @@ uint32_t Simulator::ReadWU(int64_t addr, Instruction* instr) {
}
if ((addr & 0x3) == 0 || kArchVariant == kMips64r6) {
uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
- TraceMemRd(addr, static_cast<int64_t>(*ptr));
+ TraceMemRd(addr, static_cast<int64_t>(*ptr), WORD);
return *ptr;
}
PrintF("Unaligned read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr,
@@ -2455,98 +2532,104 @@ void Simulator::DecodeTypeRegisterSRsType() {
result = lower;
break;
}
- set_fpu_register_float(fd_reg(), result);
+ SetFPUFloatResult(fd_reg(), result);
if (result != fs) {
set_fcsr_bit(kFCSRInexactFlagBit, true);
}
break;
}
case ADD_S:
- set_fpu_register_float(
+ SetFPUFloatResult(
fd_reg(),
FPUCanonalizeOperation([](float lhs, float rhs) { return lhs + rhs; },
fs, ft));
break;
case SUB_S:
- set_fpu_register_float(
+ SetFPUFloatResult(
fd_reg(),
FPUCanonalizeOperation([](float lhs, float rhs) { return lhs - rhs; },
fs, ft));
break;
case MADDF_S:
DCHECK(kArchVariant == kMips64r6);
- set_fpu_register_float(fd_reg(), std::fma(fs, ft, fd));
+ SetFPUFloatResult(fd_reg(), std::fma(fs, ft, fd));
break;
case MSUBF_S:
DCHECK(kArchVariant == kMips64r6);
- set_fpu_register_float(fd_reg(), std::fma(-fs, ft, fd));
+ SetFPUFloatResult(fd_reg(), std::fma(-fs, ft, fd));
break;
case MUL_S:
- set_fpu_register_float(
+ SetFPUFloatResult(
fd_reg(),
FPUCanonalizeOperation([](float lhs, float rhs) { return lhs * rhs; },
fs, ft));
break;
case DIV_S:
- set_fpu_register_float(
+ SetFPUFloatResult(
fd_reg(),
FPUCanonalizeOperation([](float lhs, float rhs) { return lhs / rhs; },
fs, ft));
break;
case ABS_S:
- set_fpu_register_float(
- fd_reg(),
- FPUCanonalizeOperation([](float fs) { return FPAbs(fs); }, fs));
+ SetFPUFloatResult(fd_reg(), FPUCanonalizeOperation(
+ [](float fs) { return FPAbs(fs); }, fs));
break;
case MOV_S:
- set_fpu_register_float(fd_reg(), fs);
+ SetFPUFloatResult(fd_reg(), fs);
break;
case NEG_S:
- set_fpu_register_float(
- fd_reg(), FPUCanonalizeOperation([](float src) { return -src; },
- KeepSign::yes, fs));
+ SetFPUFloatResult(fd_reg(),
+ FPUCanonalizeOperation([](float src) { return -src; },
+ KeepSign::yes, fs));
break;
case SQRT_S:
- set_fpu_register_float(
+ SetFPUFloatResult(
fd_reg(),
FPUCanonalizeOperation([](float src) { return std::sqrt(src); }, fs));
break;
case RSQRT_S:
- set_fpu_register_float(
+ SetFPUFloatResult(
fd_reg(), FPUCanonalizeOperation(
[](float src) { return 1.0 / std::sqrt(src); }, fs));
break;
case RECIP_S:
- set_fpu_register_float(
- fd_reg(),
- FPUCanonalizeOperation([](float src) { return 1.0 / src; }, fs));
+ SetFPUFloatResult(fd_reg(), FPUCanonalizeOperation(
+ [](float src) { return 1.0 / src; }, fs));
break;
case C_F_D:
set_fcsr_bit(fcsr_cc, false);
+ TraceRegWr(test_fcsr_bit(fcsr_cc));
break;
case C_UN_D:
set_fcsr_bit(fcsr_cc, std::isnan(fs) || std::isnan(ft));
+ TraceRegWr(test_fcsr_bit(fcsr_cc));
break;
case C_EQ_D:
set_fcsr_bit(fcsr_cc, (fs == ft));
+ TraceRegWr(test_fcsr_bit(fcsr_cc));
break;
case C_UEQ_D:
set_fcsr_bit(fcsr_cc, (fs == ft) || (std::isnan(fs) || std::isnan(ft)));
+ TraceRegWr(test_fcsr_bit(fcsr_cc));
break;
case C_OLT_D:
set_fcsr_bit(fcsr_cc, (fs < ft));
+ TraceRegWr(test_fcsr_bit(fcsr_cc));
break;
case C_ULT_D:
set_fcsr_bit(fcsr_cc, (fs < ft) || (std::isnan(fs) || std::isnan(ft)));
+ TraceRegWr(test_fcsr_bit(fcsr_cc));
break;
case C_OLE_D:
set_fcsr_bit(fcsr_cc, (fs <= ft));
+ TraceRegWr(test_fcsr_bit(fcsr_cc));
break;
case C_ULE_D:
set_fcsr_bit(fcsr_cc, (fs <= ft) || (std::isnan(fs) || std::isnan(ft)));
+ TraceRegWr(test_fcsr_bit(fcsr_cc));
break;
case CVT_D_S:
- set_fpu_register_double(fd_reg(), static_cast<double>(fs));
+ SetFPUDoubleResult(fd_reg(), static_cast<double>(fs));
break;
case CLASS_S: { // Mips64r6 instruction
// Convert float input to uint32_t for easier bit manipulation
@@ -2609,15 +2692,14 @@ void Simulator::DecodeTypeRegisterSRsType() {
DCHECK(result != 0);
fResult = bit_cast<float>(result);
- set_fpu_register_float(fd_reg(), fResult);
-
+ SetFPUFloatResult(fd_reg(), fResult);
break;
}
case CVT_L_S: {
float rounded;
int64_t result;
round64_according_to_fcsr(fs, rounded, result, fs);
- set_fpu_register(fd_reg(), result);
+ SetFPUResult(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
set_fpu_register_invalid_result64(fs, rounded);
}
@@ -2627,7 +2709,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
float rounded;
int32_t result;
round_according_to_fcsr(fs, rounded, result, fs);
- set_fpu_register_word(fd_reg(), result);
+ SetFPUWordResult(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
set_fpu_register_word_invalid_result(fs, rounded);
}
@@ -2636,7 +2718,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
case TRUNC_W_S: { // Truncate single to word (round towards 0).
float rounded = trunc(fs);
int32_t result = static_cast<int32_t>(rounded);
- set_fpu_register_word(fd_reg(), result);
+ SetFPUWordResult(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
set_fpu_register_word_invalid_result(fs, rounded);
}
@@ -2644,7 +2726,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
case TRUNC_L_S: { // Mips64r2 instruction.
float rounded = trunc(fs);
int64_t result = static_cast<int64_t>(rounded);
- set_fpu_register(fd_reg(), result);
+ SetFPUResult(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
set_fpu_register_invalid_result64(fs, rounded);
}
@@ -2658,7 +2740,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
// round to the even one.
result--;
}
- set_fpu_register_word(fd_reg(), result);
+ SetFPUWordResult(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
set_fpu_register_word_invalid_result(fs, rounded);
}
@@ -2673,7 +2755,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
result--;
}
int64_t i64 = static_cast<int64_t>(result);
- set_fpu_register(fd_reg(), i64);
+ SetFPUResult(fd_reg(), i64);
if (set_fcsr_round64_error(fs, rounded)) {
set_fpu_register_invalid_result64(fs, rounded);
}
@@ -2682,7 +2764,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
case FLOOR_L_S: { // Mips64r2 instruction.
float rounded = floor(fs);
int64_t result = static_cast<int64_t>(rounded);
- set_fpu_register(fd_reg(), result);
+ SetFPUResult(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
set_fpu_register_invalid_result64(fs, rounded);
}
@@ -2692,7 +2774,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
{
float rounded = std::floor(fs);
int32_t result = static_cast<int32_t>(rounded);
- set_fpu_register_word(fd_reg(), result);
+ SetFPUWordResult(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
set_fpu_register_word_invalid_result(fs, rounded);
}
@@ -2701,7 +2783,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
{
float rounded = std::ceil(fs);
int32_t result = static_cast<int32_t>(rounded);
- set_fpu_register_word(fd_reg(), result);
+ SetFPUWordResult(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
set_fpu_register_invalid_result(fs, rounded);
}
@@ -2709,7 +2791,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
case CEIL_L_S: { // Mips64r2 instruction.
float rounded = ceil(fs);
int64_t result = static_cast<int64_t>(rounded);
- set_fpu_register(fd_reg(), result);
+ SetFPUResult(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
set_fpu_register_invalid_result64(fs, rounded);
}
@@ -2717,47 +2799,47 @@ void Simulator::DecodeTypeRegisterSRsType() {
}
case MINA:
DCHECK(kArchVariant == kMips64r6);
- set_fpu_register_float(fd_reg(), FPUMinA(ft, fs));
+ SetFPUFloatResult(fd_reg(), FPUMinA(ft, fs));
break;
case MAXA:
DCHECK(kArchVariant == kMips64r6);
- set_fpu_register_float(fd_reg(), FPUMaxA(ft, fs));
+ SetFPUFloatResult(fd_reg(), FPUMaxA(ft, fs));
break;
case MIN:
DCHECK(kArchVariant == kMips64r6);
- set_fpu_register_float(fd_reg(), FPUMin(ft, fs));
+ SetFPUFloatResult(fd_reg(), FPUMin(ft, fs));
break;
case MAX:
DCHECK(kArchVariant == kMips64r6);
- set_fpu_register_float(fd_reg(), FPUMax(ft, fs));
+ SetFPUFloatResult(fd_reg(), FPUMax(ft, fs));
break;
case SEL:
DCHECK(kArchVariant == kMips64r6);
- set_fpu_register_float(fd_reg(), (fd_int & 0x1) == 0 ? fs : ft);
+ SetFPUFloatResult(fd_reg(), (fd_int & 0x1) == 0 ? fs : ft);
break;
case SELEQZ_C:
DCHECK(kArchVariant == kMips64r6);
- set_fpu_register_float(fd_reg(), (ft_int & 0x1) == 0
- ? get_fpu_register_float(fs_reg())
- : 0.0);
+ SetFPUFloatResult(
+ fd_reg(),
+ (ft_int & 0x1) == 0 ? get_fpu_register_float(fs_reg()) : 0.0);
break;
case SELNEZ_C:
DCHECK(kArchVariant == kMips64r6);
- set_fpu_register_float(fd_reg(), (ft_int & 0x1) != 0
- ? get_fpu_register_float(fs_reg())
- : 0.0);
+ SetFPUFloatResult(
+ fd_reg(),
+ (ft_int & 0x1) != 0 ? get_fpu_register_float(fs_reg()) : 0.0);
break;
case MOVZ_C: {
DCHECK(kArchVariant == kMips64r2);
if (rt() == 0) {
- set_fpu_register_float(fd_reg(), fs);
+ SetFPUFloatResult(fd_reg(), fs);
}
break;
}
case MOVN_C: {
DCHECK(kArchVariant == kMips64r2);
if (rt() != 0) {
- set_fpu_register_float(fd_reg(), fs);
+ SetFPUFloatResult(fd_reg(), fs);
}
break;
}
@@ -2768,10 +2850,10 @@ void Simulator::DecodeTypeRegisterSRsType() {
if (instr_.Bit(16)) { // Read Tf bit.
// MOVT.D
- if (test_fcsr_bit(ft_cc)) set_fpu_register_float(fd_reg(), fs);
+ if (test_fcsr_bit(ft_cc)) SetFPUFloatResult(fd_reg(), fs);
} else {
// MOVF.D
- if (!test_fcsr_bit(ft_cc)) set_fpu_register_float(fd_reg(), fs);
+ if (!test_fcsr_bit(ft_cc)) SetFPUFloatResult(fd_reg(), fs);
}
break;
}
@@ -2826,7 +2908,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
result = lower;
break;
}
- set_fpu_register_double(fd_reg(), result);
+ SetFPUDoubleResult(fd_reg(), result);
if (result != fs) {
set_fcsr_bit(kFCSRInexactFlagBit, true);
}
@@ -2834,27 +2916,27 @@ void Simulator::DecodeTypeRegisterDRsType() {
}
case SEL:
DCHECK(kArchVariant == kMips64r6);
- set_fpu_register_double(fd_reg(), (fd_int & 0x1) == 0 ? fs : ft);
+ SetFPUDoubleResult(fd_reg(), (fd_int & 0x1) == 0 ? fs : ft);
break;
case SELEQZ_C:
DCHECK(kArchVariant == kMips64r6);
- set_fpu_register_double(fd_reg(), (ft_int & 0x1) == 0 ? fs : 0.0);
+ SetFPUDoubleResult(fd_reg(), (ft_int & 0x1) == 0 ? fs : 0.0);
break;
case SELNEZ_C:
DCHECK(kArchVariant == kMips64r6);
- set_fpu_register_double(fd_reg(), (ft_int & 0x1) != 0 ? fs : 0.0);
+ SetFPUDoubleResult(fd_reg(), (ft_int & 0x1) != 0 ? fs : 0.0);
break;
case MOVZ_C: {
DCHECK(kArchVariant == kMips64r2);
if (rt() == 0) {
- set_fpu_register_double(fd_reg(), fs);
+ SetFPUDoubleResult(fd_reg(), fs);
}
break;
}
case MOVN_C: {
DCHECK(kArchVariant == kMips64r2);
if (rt() != 0) {
- set_fpu_register_double(fd_reg(), fs);
+ SetFPUDoubleResult(fd_reg(), fs);
}
break;
}
@@ -2864,115 +2946,121 @@ void Simulator::DecodeTypeRegisterDRsType() {
ft_cc = get_fcsr_condition_bit(ft_cc);
if (instr_.Bit(16)) { // Read Tf bit.
// MOVT.D
- if (test_fcsr_bit(ft_cc)) set_fpu_register_double(fd_reg(), fs);
+ if (test_fcsr_bit(ft_cc)) SetFPUDoubleResult(fd_reg(), fs);
} else {
// MOVF.D
- if (!test_fcsr_bit(ft_cc)) set_fpu_register_double(fd_reg(), fs);
+ if (!test_fcsr_bit(ft_cc)) SetFPUDoubleResult(fd_reg(), fs);
}
break;
}
case MINA:
DCHECK(kArchVariant == kMips64r6);
- set_fpu_register_double(fd_reg(), FPUMinA(ft, fs));
+ SetFPUDoubleResult(fd_reg(), FPUMinA(ft, fs));
break;
case MAXA:
DCHECK(kArchVariant == kMips64r6);
- set_fpu_register_double(fd_reg(), FPUMaxA(ft, fs));
+ SetFPUDoubleResult(fd_reg(), FPUMaxA(ft, fs));
break;
case MIN:
DCHECK(kArchVariant == kMips64r6);
- set_fpu_register_double(fd_reg(), FPUMin(ft, fs));
+ SetFPUDoubleResult(fd_reg(), FPUMin(ft, fs));
break;
case MAX:
DCHECK(kArchVariant == kMips64r6);
- set_fpu_register_double(fd_reg(), FPUMax(ft, fs));
+ SetFPUDoubleResult(fd_reg(), FPUMax(ft, fs));
break;
case ADD_D:
- set_fpu_register_double(
+ SetFPUDoubleResult(
fd_reg(),
FPUCanonalizeOperation(
[](double lhs, double rhs) { return lhs + rhs; }, fs, ft));
break;
case SUB_D:
- set_fpu_register_double(
+ SetFPUDoubleResult(
fd_reg(),
FPUCanonalizeOperation(
[](double lhs, double rhs) { return lhs - rhs; }, fs, ft));
break;
case MADDF_D:
DCHECK(kArchVariant == kMips64r6);
- set_fpu_register_double(fd_reg(), std::fma(fs, ft, fd));
+ SetFPUDoubleResult(fd_reg(), std::fma(fs, ft, fd));
break;
case MSUBF_D:
DCHECK(kArchVariant == kMips64r6);
- set_fpu_register_double(fd_reg(), std::fma(-fs, ft, fd));
+ SetFPUDoubleResult(fd_reg(), std::fma(-fs, ft, fd));
break;
case MUL_D:
- set_fpu_register_double(
+ SetFPUDoubleResult(
fd_reg(),
FPUCanonalizeOperation(
[](double lhs, double rhs) { return lhs * rhs; }, fs, ft));
break;
case DIV_D:
- set_fpu_register_double(
+ SetFPUDoubleResult(
fd_reg(),
FPUCanonalizeOperation(
[](double lhs, double rhs) { return lhs / rhs; }, fs, ft));
break;
case ABS_D:
- set_fpu_register_double(
+ SetFPUDoubleResult(
fd_reg(),
FPUCanonalizeOperation([](double fs) { return FPAbs(fs); }, fs));
break;
case MOV_D:
- set_fpu_register_double(fd_reg(), fs);
+ SetFPUDoubleResult(fd_reg(), fs);
break;
case NEG_D:
- set_fpu_register_double(
- fd_reg(), FPUCanonalizeOperation([](double src) { return -src; },
- KeepSign::yes, fs));
+ SetFPUDoubleResult(fd_reg(),
+ FPUCanonalizeOperation([](double src) { return -src; },
+ KeepSign::yes, fs));
break;
case SQRT_D:
- set_fpu_register_double(
+ SetFPUDoubleResult(
fd_reg(),
FPUCanonalizeOperation([](double fs) { return std::sqrt(fs); }, fs));
break;
case RSQRT_D:
- set_fpu_register_double(
+ SetFPUDoubleResult(
fd_reg(), FPUCanonalizeOperation(
[](double fs) { return 1.0 / std::sqrt(fs); }, fs));
break;
case RECIP_D:
- set_fpu_register_double(
- fd_reg(),
- FPUCanonalizeOperation([](double fs) { return 1.0 / fs; }, fs));
+ SetFPUDoubleResult(fd_reg(), FPUCanonalizeOperation(
+ [](double fs) { return 1.0 / fs; }, fs));
break;
case C_UN_D:
set_fcsr_bit(fcsr_cc, std::isnan(fs) || std::isnan(ft));
+ TraceRegWr(test_fcsr_bit(fcsr_cc));
break;
case C_EQ_D:
set_fcsr_bit(fcsr_cc, (fs == ft));
+ TraceRegWr(test_fcsr_bit(fcsr_cc));
break;
case C_UEQ_D:
set_fcsr_bit(fcsr_cc, (fs == ft) || (std::isnan(fs) || std::isnan(ft)));
+ TraceRegWr(test_fcsr_bit(fcsr_cc));
break;
case C_OLT_D:
set_fcsr_bit(fcsr_cc, (fs < ft));
+ TraceRegWr(test_fcsr_bit(fcsr_cc));
break;
case C_ULT_D:
set_fcsr_bit(fcsr_cc, (fs < ft) || (std::isnan(fs) || std::isnan(ft)));
+ TraceRegWr(test_fcsr_bit(fcsr_cc));
break;
case C_OLE_D:
set_fcsr_bit(fcsr_cc, (fs <= ft));
+ TraceRegWr(test_fcsr_bit(fcsr_cc));
break;
case C_ULE_D:
set_fcsr_bit(fcsr_cc, (fs <= ft) || (std::isnan(fs) || std::isnan(ft)));
+ TraceRegWr(test_fcsr_bit(fcsr_cc));
break;
case CVT_W_D: { // Convert double to word.
double rounded;
int32_t result;
round_according_to_fcsr(fs, rounded, result, fs);
- set_fpu_register_word(fd_reg(), result);
+ SetFPUWordResult(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
set_fpu_register_word_invalid_result(fs, rounded);
}
@@ -2987,7 +3075,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
// round to the even one.
result--;
}
- set_fpu_register_word(fd_reg(), result);
+ SetFPUWordResult(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
set_fpu_register_invalid_result(fs, rounded);
}
@@ -2996,7 +3084,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
{
double rounded = trunc(fs);
int32_t result = static_cast<int32_t>(rounded);
- set_fpu_register_word(fd_reg(), result);
+ SetFPUWordResult(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
set_fpu_register_invalid_result(fs, rounded);
}
@@ -3005,7 +3093,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
{
double rounded = std::floor(fs);
int32_t result = static_cast<int32_t>(rounded);
- set_fpu_register_word(fd_reg(), result);
+ SetFPUWordResult(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
set_fpu_register_invalid_result(fs, rounded);
}
@@ -3014,19 +3102,19 @@ void Simulator::DecodeTypeRegisterDRsType() {
{
double rounded = std::ceil(fs);
int32_t result = static_cast<int32_t>(rounded);
- set_fpu_register_word(fd_reg(), result);
+ SetFPUWordResult2(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
set_fpu_register_invalid_result(fs, rounded);
}
} break;
case CVT_S_D: // Convert double to float (single).
- set_fpu_register_float(fd_reg(), static_cast<float>(fs));
+ SetFPUFloatResult(fd_reg(), static_cast<float>(fs));
break;
case CVT_L_D: { // Mips64r2: Truncate double to 64-bit long-word.
double rounded;
int64_t result;
round64_according_to_fcsr(fs, rounded, result, fs);
- set_fpu_register(fd_reg(), result);
+ SetFPUResult(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
set_fpu_register_invalid_result64(fs, rounded);
}
@@ -3041,7 +3129,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
result--;
}
int64_t i64 = static_cast<int64_t>(result);
- set_fpu_register(fd_reg(), i64);
+ SetFPUResult(fd_reg(), i64);
if (set_fcsr_round64_error(fs, rounded)) {
set_fpu_register_invalid_result64(fs, rounded);
}
@@ -3050,7 +3138,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
case TRUNC_L_D: { // Mips64r2 instruction.
double rounded = trunc(fs);
int64_t result = static_cast<int64_t>(rounded);
- set_fpu_register(fd_reg(), result);
+ SetFPUResult(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
set_fpu_register_invalid_result64(fs, rounded);
}
@@ -3059,7 +3147,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
case FLOOR_L_D: { // Mips64r2 instruction.
double rounded = floor(fs);
int64_t result = static_cast<int64_t>(rounded);
- set_fpu_register(fd_reg(), result);
+ SetFPUResult(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
set_fpu_register_invalid_result64(fs, rounded);
}
@@ -3068,7 +3156,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
case CEIL_L_D: { // Mips64r2 instruction.
double rounded = ceil(fs);
int64_t result = static_cast<int64_t>(rounded);
- set_fpu_register(fd_reg(), result);
+ SetFPUResult(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
set_fpu_register_invalid_result64(fs, rounded);
}
@@ -3135,12 +3223,12 @@ void Simulator::DecodeTypeRegisterDRsType() {
DCHECK(result != 0);
dResult = bit_cast<double>(result);
- set_fpu_register_double(fd_reg(), dResult);
-
+ SetFPUDoubleResult(fd_reg(), dResult);
break;
}
case C_F_D: {
set_fcsr_bit(fcsr_cc, false);
+ TraceRegWr(test_fcsr_bit(fcsr_cc));
break;
}
default:
@@ -3156,83 +3244,83 @@ void Simulator::DecodeTypeRegisterWRsType() {
switch (instr_.FunctionFieldRaw()) {
case CVT_S_W: // Convert word to float (single).
alu_out = get_fpu_register_signed_word(fs_reg());
- set_fpu_register_float(fd_reg(), static_cast<float>(alu_out));
+ SetFPUFloatResult(fd_reg(), static_cast<float>(alu_out));
break;
case CVT_D_W: // Convert word to double.
alu_out = get_fpu_register_signed_word(fs_reg());
- set_fpu_register_double(fd_reg(), static_cast<double>(alu_out));
+ SetFPUDoubleResult(fd_reg(), static_cast<double>(alu_out));
break;
case CMP_AF:
- set_fpu_register_word(fd_reg(), 0);
+ SetFPUWordResult2(fd_reg(), 0);
break;
case CMP_UN:
if (std::isnan(fs) || std::isnan(ft)) {
- set_fpu_register_word(fd_reg(), -1);
+ SetFPUWordResult2(fd_reg(), -1);
} else {
- set_fpu_register_word(fd_reg(), 0);
+ SetFPUWordResult2(fd_reg(), 0);
}
break;
case CMP_EQ:
if (fs == ft) {
- set_fpu_register_word(fd_reg(), -1);
+ SetFPUWordResult2(fd_reg(), -1);
} else {
- set_fpu_register_word(fd_reg(), 0);
+ SetFPUWordResult2(fd_reg(), 0);
}
break;
case CMP_UEQ:
if ((fs == ft) || (std::isnan(fs) || std::isnan(ft))) {
- set_fpu_register_word(fd_reg(), -1);
+ SetFPUWordResult2(fd_reg(), -1);
} else {
- set_fpu_register_word(fd_reg(), 0);
+ SetFPUWordResult2(fd_reg(), 0);
}
break;
case CMP_LT:
if (fs < ft) {
- set_fpu_register_word(fd_reg(), -1);
+ SetFPUWordResult2(fd_reg(), -1);
} else {
- set_fpu_register_word(fd_reg(), 0);
+ SetFPUWordResult2(fd_reg(), 0);
}
break;
case CMP_ULT:
if ((fs < ft) || (std::isnan(fs) || std::isnan(ft))) {
- set_fpu_register_word(fd_reg(), -1);
+ SetFPUWordResult2(fd_reg(), -1);
} else {
- set_fpu_register_word(fd_reg(), 0);
+ SetFPUWordResult2(fd_reg(), 0);
}
break;
case CMP_LE:
if (fs <= ft) {
- set_fpu_register_word(fd_reg(), -1);
+ SetFPUWordResult2(fd_reg(), -1);
} else {
- set_fpu_register_word(fd_reg(), 0);
+ SetFPUWordResult2(fd_reg(), 0);
}
break;
case CMP_ULE:
if ((fs <= ft) || (std::isnan(fs) || std::isnan(ft))) {
- set_fpu_register_word(fd_reg(), -1);
+ SetFPUWordResult2(fd_reg(), -1);
} else {
- set_fpu_register_word(fd_reg(), 0);
+ SetFPUWordResult2(fd_reg(), 0);
}
break;
case CMP_OR:
if (!std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_word(fd_reg(), -1);
+ SetFPUWordResult2(fd_reg(), -1);
} else {
- set_fpu_register_word(fd_reg(), 0);
+ SetFPUWordResult2(fd_reg(), 0);
}
break;
case CMP_UNE:
if ((fs != ft) || (std::isnan(fs) || std::isnan(ft))) {
- set_fpu_register_word(fd_reg(), -1);
+ SetFPUWordResult2(fd_reg(), -1);
} else {
- set_fpu_register_word(fd_reg(), 0);
+ SetFPUWordResult2(fd_reg(), 0);
}
break;
case CMP_NE:
if (fs != ft) {
- set_fpu_register_word(fd_reg(), -1);
+ SetFPUWordResult2(fd_reg(), -1);
} else {
- set_fpu_register_word(fd_reg(), 0);
+ SetFPUWordResult2(fd_reg(), 0);
}
break;
default:
@@ -3248,83 +3336,83 @@ void Simulator::DecodeTypeRegisterLRsType() {
switch (instr_.FunctionFieldRaw()) {
case CVT_D_L: // Mips32r2 instruction.
i64 = get_fpu_register(fs_reg());
- set_fpu_register_double(fd_reg(), static_cast<double>(i64));
+ SetFPUDoubleResult(fd_reg(), static_cast<double>(i64));
break;
case CVT_S_L:
i64 = get_fpu_register(fs_reg());
- set_fpu_register_float(fd_reg(), static_cast<float>(i64));
+ SetFPUFloatResult(fd_reg(), static_cast<float>(i64));
break;
case CMP_AF:
- set_fpu_register(fd_reg(), 0);
+ SetFPUResult(fd_reg(), 0);
break;
case CMP_UN:
if (std::isnan(fs) || std::isnan(ft)) {
- set_fpu_register(fd_reg(), -1);
+ SetFPUResult(fd_reg(), -1);
} else {
- set_fpu_register(fd_reg(), 0);
+ SetFPUResult(fd_reg(), 0);
}
break;
case CMP_EQ:
if (fs == ft) {
- set_fpu_register(fd_reg(), -1);
+ SetFPUResult(fd_reg(), -1);
} else {
- set_fpu_register(fd_reg(), 0);
+ SetFPUResult(fd_reg(), 0);
}
break;
case CMP_UEQ:
if ((fs == ft) || (std::isnan(fs) || std::isnan(ft))) {
- set_fpu_register(fd_reg(), -1);
+ SetFPUResult(fd_reg(), -1);
} else {
- set_fpu_register(fd_reg(), 0);
+ SetFPUResult(fd_reg(), 0);
}
break;
case CMP_LT:
if (fs < ft) {
- set_fpu_register(fd_reg(), -1);
+ SetFPUResult(fd_reg(), -1);
} else {
- set_fpu_register(fd_reg(), 0);
+ SetFPUResult(fd_reg(), 0);
}
break;
case CMP_ULT:
if ((fs < ft) || (std::isnan(fs) || std::isnan(ft))) {
- set_fpu_register(fd_reg(), -1);
+ SetFPUResult(fd_reg(), -1);
} else {
- set_fpu_register(fd_reg(), 0);
+ SetFPUResult(fd_reg(), 0);
}
break;
case CMP_LE:
if (fs <= ft) {
- set_fpu_register(fd_reg(), -1);
+ SetFPUResult(fd_reg(), -1);
} else {
- set_fpu_register(fd_reg(), 0);
+ SetFPUResult(fd_reg(), 0);
}
break;
case CMP_ULE:
if ((fs <= ft) || (std::isnan(fs) || std::isnan(ft))) {
- set_fpu_register(fd_reg(), -1);
+ SetFPUResult(fd_reg(), -1);
} else {
- set_fpu_register(fd_reg(), 0);
+ SetFPUResult(fd_reg(), 0);
}
break;
case CMP_OR:
if (!std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register(fd_reg(), -1);
+ SetFPUResult(fd_reg(), -1);
} else {
- set_fpu_register(fd_reg(), 0);
+ SetFPUResult(fd_reg(), 0);
}
break;
case CMP_UNE:
if ((fs != ft) || (std::isnan(fs) || std::isnan(ft))) {
- set_fpu_register(fd_reg(), -1);
+ SetFPUResult(fd_reg(), -1);
} else {
- set_fpu_register(fd_reg(), 0);
+ SetFPUResult(fd_reg(), 0);
}
break;
case CMP_NE:
if (fs != ft && (!std::isnan(fs) && !std::isnan(ft))) {
- set_fpu_register(fd_reg(), -1);
+ SetFPUResult(fd_reg(), -1);
} else {
- set_fpu_register(fd_reg(), 0);
+ SetFPUResult(fd_reg(), 0);
}
break;
default:
@@ -3343,17 +3431,18 @@ void Simulator::DecodeTypeRegisterCOP1() {
case CFC1:
// At the moment only FCSR is supported.
DCHECK(fs_reg() == kFCSRRegister);
- set_register(rt_reg(), FCSR_);
+ SetResult(rt_reg(), FCSR_);
break;
case MFC1:
set_register(rt_reg(),
static_cast<int64_t>(get_fpu_register_word(fs_reg())));
+ TraceRegWr(get_register(rt_reg()), WORD_DWORD);
break;
case DMFC1:
- set_register(rt_reg(), get_fpu_register(fs_reg()));
+ SetResult(rt_reg(), get_fpu_register(fs_reg()));
break;
case MFHC1:
- set_register(rt_reg(), get_fpu_register_hi_word(fs_reg()));
+ SetResult(rt_reg(), get_fpu_register_hi_word(fs_reg()));
break;
case CTC1: {
// At the moment only FCSR is supported.
@@ -3365,18 +3454,21 @@ void Simulator::DecodeTypeRegisterCOP1() {
DCHECK(kArchVariant == kMips64r2);
FCSR_ = reg & ~kFCSRNaN2008FlagMask;
}
+ TraceRegWr(FCSR_);
break;
}
case MTC1:
// Hardware writes upper 32-bits to zero on mtc1.
set_fpu_register_hi_word(fs_reg(), 0);
set_fpu_register_word(fs_reg(), static_cast<int32_t>(rt()));
+ TraceRegWr(get_fpu_register(fs_reg()), FLOAT_DOUBLE);
break;
case DMTC1:
- set_fpu_register(fs_reg(), rt());
+ SetFPUResult2(fs_reg(), rt());
break;
case MTHC1:
set_fpu_register_hi_word(fs_reg(), static_cast<int32_t>(rt()));
+ TraceRegWr(get_fpu_register(fs_reg()), DOUBLE);
break;
case S:
DecodeTypeRegisterSRsType();
@@ -3404,7 +3496,7 @@ void Simulator::DecodeTypeRegisterCOP1X() {
fr = get_fpu_register_float(fr_reg());
fs = get_fpu_register_float(fs_reg());
ft = get_fpu_register_float(ft_reg());
- set_fpu_register_float(fd_reg(), fs * ft + fr);
+ SetFPUFloatResult(fd_reg(), fs * ft + fr);
break;
}
case MSUB_S: {
@@ -3413,7 +3505,7 @@ void Simulator::DecodeTypeRegisterCOP1X() {
fr = get_fpu_register_float(fr_reg());
fs = get_fpu_register_float(fs_reg());
ft = get_fpu_register_float(ft_reg());
- set_fpu_register_float(fd_reg(), fs * ft - fr);
+ SetFPUFloatResult(fd_reg(), fs * ft - fr);
break;
}
case MADD_D: {
@@ -3422,7 +3514,7 @@ void Simulator::DecodeTypeRegisterCOP1X() {
fr = get_fpu_register_double(fr_reg());
fs = get_fpu_register_double(fs_reg());
ft = get_fpu_register_double(ft_reg());
- set_fpu_register_double(fd_reg(), fs * ft + fr);
+ SetFPUDoubleResult(fd_reg(), fs * ft + fr);
break;
}
case MSUB_D: {
@@ -3431,7 +3523,7 @@ void Simulator::DecodeTypeRegisterCOP1X() {
fr = get_fpu_register_double(fr_reg());
fs = get_fpu_register_double(fs_reg());
ft = get_fpu_register_double(ft_reg());
- set_fpu_register_double(fd_reg(), fs * ft - fr);
+ SetFPUDoubleResult(fd_reg(), fs * ft - fr);
break;
}
default:
@@ -3449,11 +3541,11 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
switch (instr_.FunctionFieldRaw()) {
case SELEQZ_S:
DCHECK(kArchVariant == kMips64r6);
- set_register(rd_reg(), rt() == 0 ? rs() : 0);
+ SetResult(rd_reg(), rt() == 0 ? rs() : 0);
break;
case SELNEZ_S:
DCHECK(kArchVariant == kMips64r6);
- set_register(rd_reg(), rt() != 0 ? rs() : 0);
+ SetResult(rd_reg(), rt() != 0 ? rs() : 0);
break;
case JR: {
int64_t next_pc = rs();
@@ -3636,10 +3728,10 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
} else {
switch (sa()) {
case MUL_OP:
- set_register(rd_reg(), static_cast<int32_t>(i64hilo & 0xffffffff));
+ SetResult(rd_reg(), static_cast<int32_t>(i64hilo & 0xffffffff));
break;
case MUH_OP:
- set_register(rd_reg(), static_cast<int32_t>(i64hilo >> 32));
+ SetResult(rd_reg(), static_cast<int32_t>(i64hilo >> 32));
break;
default:
UNIMPLEMENTED_MIPS();
@@ -3657,10 +3749,10 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
} else {
switch (sa()) {
case MUL_OP:
- set_register(rd_reg(), static_cast<int32_t>(u64hilo & 0xffffffff));
+ SetResult(rd_reg(), static_cast<int32_t>(u64hilo & 0xffffffff));
break;
case MUH_OP:
- set_register(rd_reg(), static_cast<int32_t>(u64hilo >> 32));
+ SetResult(rd_reg(), static_cast<int32_t>(u64hilo >> 32));
break;
default:
UNIMPLEMENTED_MIPS();
@@ -3675,10 +3767,10 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
} else {
switch (sa()) {
case MUL_OP:
- set_register(rd_reg(), rs() * rt());
+ SetResult(rd_reg(), rs() * rt());
break;
case MUH_OP:
- set_register(rd_reg(), MultiplyHighSigned(rs(), rt()));
+ SetResult(rd_reg(), MultiplyHighSigned(rs(), rt()));
break;
default:
UNIMPLEMENTED_MIPS();
@@ -3711,16 +3803,16 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
switch (sa()) {
case DIV_OP:
if (rs() == int_min_value && rt() == -1) {
- set_register(rd_reg(), int_min_value);
+ SetResult(rd_reg(), int_min_value);
} else if (rt() != 0) {
- set_register(rd_reg(), rs() / rt());
+ SetResult(rd_reg(), rs() / rt());
}
break;
case MOD_OP:
if (rs() == int_min_value && rt() == -1) {
- set_register(rd_reg(), 0);
+ SetResult(rd_reg(), 0);
} else if (rt() != 0) {
- set_register(rd_reg(), rs() % rt());
+ SetResult(rd_reg(), rs() % rt());
}
break;
default:
@@ -3741,12 +3833,12 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
switch (sa()) {
case DIV_OP:
if (rt_u_32 != 0) {
- set_register(rd_reg(), rs_u_32 / rt_u_32);
+ SetResult(rd_reg(), rs_u_32 / rt_u_32);
}
break;
case MOD_OP:
if (rt_u() != 0) {
- set_register(rd_reg(), rs_u_32 % rt_u_32);
+ SetResult(rd_reg(), rs_u_32 % rt_u_32);
}
break;
default:
@@ -3770,12 +3862,12 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
switch (instr_.SaValue()) {
case DIV_OP:
if (rt_u() != 0) {
- set_register(rd_reg(), rs_u() / rt_u());
+ SetResult(rd_reg(), rs_u() / rt_u());
}
break;
case MOD_OP:
if (rt_u() != 0) {
- set_register(rd_reg(), rs_u() % rt_u());
+ SetResult(rd_reg(), rs_u() % rt_u());
}
break;
default:
@@ -3892,9 +3984,9 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
uint32_t cc = instr_.FBccValue();
uint32_t fcsr_cc = get_fcsr_condition_bit(cc);
if (instr_.Bit(16)) { // Read Tf bit.
- if (test_fcsr_bit(fcsr_cc)) set_register(rd_reg(), rs());
+ if (test_fcsr_bit(fcsr_cc)) SetResult(rd_reg(), rs());
} else {
- if (!test_fcsr_bit(fcsr_cc)) set_register(rd_reg(), rs());
+ if (!test_fcsr_bit(fcsr_cc)) SetResult(rd_reg(), rs());
}
break;
}
@@ -4689,10 +4781,12 @@ void Simulator::DecodeTypeImmediate() {
}
case LWC1:
set_fpu_register(ft_reg, kFPUInvalidResult); // Trash upper 32 bits.
- set_fpu_register_word(ft_reg, ReadW(rs + se_imm16, instr_.instr()));
+ set_fpu_register_word(ft_reg,
+ ReadW(rs + se_imm16, instr_.instr(), FLOAT_DOUBLE));
break;
case LDC1:
set_fpu_register_double(ft_reg, ReadD(rs + se_imm16, instr_.instr()));
+ TraceMemRd(addr, get_fpu_register(ft_reg), DOUBLE);
break;
case SWC1: {
int32_t alu_out_32 = static_cast<int32_t>(get_fpu_register(ft_reg));
@@ -4701,6 +4795,7 @@ void Simulator::DecodeTypeImmediate() {
}
case SDC1:
WriteD(rs + se_imm16, get_fpu_register_double(ft_reg), instr_.instr());
+ TraceMemWr(rs + se_imm16, get_fpu_register(ft_reg), DWORD);
break;
// ------------- PC-Relative instructions.
case PCREL: {
@@ -4764,7 +4859,7 @@ void Simulator::DecodeTypeImmediate() {
break;
}
}
- set_register(rs_reg, alu_out);
+ SetResult(rs_reg, alu_out);
break;
}
default:
diff --git a/deps/v8/src/mips64/simulator-mips64.h b/deps/v8/src/mips64/simulator-mips64.h
index df98465c24..6c41ae111a 100644
--- a/deps/v8/src/mips64/simulator-mips64.h
+++ b/deps/v8/src/mips64/simulator-mips64.h
@@ -303,6 +303,18 @@ class Simulator {
// Unsupported instructions use Format to print an error and stop execution.
void Format(Instruction* instr, const char* format);
+ // Helpers for data value tracing.
+ enum TraceType {
+ BYTE,
+ HALF,
+ WORD,
+ DWORD,
+ FLOAT,
+ DOUBLE,
+ FLOAT_DOUBLE,
+ WORD_DWORD
+ };
+
// Read and write memory.
inline uint32_t ReadBU(int64_t addr);
inline int32_t ReadB(int64_t addr);
@@ -316,7 +328,7 @@ class Simulator {
inline void WriteH(int64_t addr, int16_t value, Instruction* instr);
inline uint32_t ReadWU(int64_t addr, Instruction* instr);
- inline int32_t ReadW(int64_t addr, Instruction* instr);
+ inline int32_t ReadW(int64_t addr, Instruction* instr, TraceType t = WORD);
inline void WriteW(int64_t addr, int32_t value, Instruction* instr);
inline int64_t Read2W(int64_t addr, Instruction* instr);
inline void Write2W(int64_t addr, int64_t value, Instruction* instr);
@@ -327,18 +339,9 @@ class Simulator {
// Helper for debugging memory access.
inline void DieOrDebug();
- // Helpers for data value tracing.
- enum TraceType {
- BYTE,
- HALF,
- WORD,
- DWORD
- // DFLOAT - Floats may have printing issues due to paired lwc1's
- };
-
- void TraceRegWr(int64_t value);
+ void TraceRegWr(int64_t value, TraceType t = DWORD);
void TraceMemWr(int64_t addr, int64_t value, TraceType t);
- void TraceMemRd(int64_t addr, int64_t value);
+ void TraceMemRd(int64_t addr, int64_t value, TraceType t = DWORD);
// Operations depending on endianness.
// Get Double Higher / Lower word.
@@ -396,6 +399,36 @@ class Simulator {
TraceRegWr(alu_out);
}
+ inline void SetFPUWordResult(int32_t fd_reg, int32_t alu_out) {
+ set_fpu_register_word(fd_reg, alu_out);
+ TraceRegWr(get_fpu_register(fd_reg), WORD);
+ }
+
+ inline void SetFPUWordResult2(int32_t fd_reg, int32_t alu_out) {
+ set_fpu_register_word(fd_reg, alu_out);
+ TraceRegWr(get_fpu_register(fd_reg));
+ }
+
+ inline void SetFPUResult(int32_t fd_reg, int64_t alu_out) {
+ set_fpu_register(fd_reg, alu_out);
+ TraceRegWr(get_fpu_register(fd_reg));
+ }
+
+ inline void SetFPUResult2(int32_t fd_reg, int64_t alu_out) {
+ set_fpu_register(fd_reg, alu_out);
+ TraceRegWr(get_fpu_register(fd_reg), DOUBLE);
+ }
+
+ inline void SetFPUFloatResult(int32_t fd_reg, float alu_out) {
+ set_fpu_register_float(fd_reg, alu_out);
+ TraceRegWr(get_fpu_register(fd_reg), FLOAT);
+ }
+
+ inline void SetFPUDoubleResult(int32_t fd_reg, double alu_out) {
+ set_fpu_register_double(fd_reg, alu_out);
+ TraceRegWr(get_fpu_register(fd_reg), DOUBLE);
+ }
+
void DecodeTypeImmediate();
void DecodeTypeJump();
diff --git a/deps/v8/src/objects-body-descriptors-inl.h b/deps/v8/src/objects-body-descriptors-inl.h
index bffc8bdb3e..be9c0f2de6 100644
--- a/deps/v8/src/objects-body-descriptors-inl.h
+++ b/deps/v8/src/objects-body-descriptors-inl.h
@@ -5,7 +5,9 @@
#ifndef V8_OBJECTS_BODY_DESCRIPTORS_INL_H_
#define V8_OBJECTS_BODY_DESCRIPTORS_INL_H_
+#include "src/assembler-inl.h"
#include "src/objects-body-descriptors.h"
+#include "src/transitions.h"
namespace v8 {
namespace internal {
@@ -361,8 +363,6 @@ class Code::BodyDescriptor final : public BodyDescriptorBase {
STATIC_ASSERT(kSourcePositionTableOffset + kPointerSize ==
kTypeFeedbackInfoOffset);
STATIC_ASSERT(kTypeFeedbackInfoOffset + kPointerSize ==
- kProtectedInstructionOffset);
- STATIC_ASSERT(kProtectedInstructionOffset + kPointerSize ==
kNextCodeLinkOffset);
static bool IsValidSlot(HeapObject* obj, int offset) {
@@ -440,6 +440,8 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3) {
return ReturnType();
case kConsStringTag:
return Op::template apply<ConsString::BodyDescriptor>(p1, p2, p3);
+ case kThinStringTag:
+ return Op::template apply<ThinString::BodyDescriptor>(p1, p2, p3);
case kSlicedStringTag:
return Op::template apply<SlicedString::BodyDescriptor>(p1, p2, p3);
case kExternalStringTag:
@@ -465,6 +467,7 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3) {
case JS_OBJECT_TYPE:
case JS_ERROR_TYPE:
case JS_ARGUMENTS_TYPE:
+ case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
case JS_PROMISE_CAPABILITY_TYPE:
case JS_PROMISE_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
@@ -555,7 +558,6 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3) {
case HEAP_NUMBER_TYPE:
case MUTABLE_HEAP_NUMBER_TYPE:
- case SIMD128_VALUE_TYPE:
case FILLER_TYPE:
case BYTE_ARRAY_TYPE:
case FREE_SPACE_TYPE:
diff --git a/deps/v8/src/objects-body-descriptors.h b/deps/v8/src/objects-body-descriptors.h
index a1c3634bd7..91cb8883be 100644
--- a/deps/v8/src/objects-body-descriptors.h
+++ b/deps/v8/src/objects-body-descriptors.h
@@ -99,7 +99,7 @@ class FixedBodyDescriptor final : public BodyDescriptorBase {
template <typename StaticVisitor>
static inline void IterateBody(HeapObject* obj, int object_size) {
- IterateBody<StaticVisitor>(obj);
+ IterateBody(obj);
}
};
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index 986970444f..8a2b9eb2ad 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -4,13 +4,19 @@
#include "src/objects.h"
+#include "src/assembler-inl.h"
#include "src/bootstrapper.h"
#include "src/disasm.h"
#include "src/disassembler.h"
#include "src/field-type.h"
+#include "src/layout-descriptor.h"
#include "src/macro-assembler.h"
+#include "src/objects-inl.h"
+#include "src/objects/literal-objects.h"
+#include "src/objects/module-info.h"
#include "src/ostreams.h"
#include "src/regexp/jsregexp.h"
+#include "src/transitions.h"
namespace v8 {
namespace internal {
@@ -64,9 +70,6 @@ void HeapObject::HeapObjectVerify() {
case MUTABLE_HEAP_NUMBER_TYPE:
HeapNumber::cast(this)->HeapNumberVerify();
break;
- case SIMD128_VALUE_TYPE:
- Simd128Value::cast(this)->Simd128ValueVerify();
- break;
case FIXED_ARRAY_TYPE:
FixedArray::cast(this)->FixedArrayVerify();
break;
@@ -197,6 +200,9 @@ void HeapObject::HeapObjectVerify() {
case JS_STRING_ITERATOR_TYPE:
JSStringIterator::cast(this)->JSStringIteratorVerify();
break;
+ case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
+ JSAsyncFromSyncIterator::cast(this)->JSAsyncFromSyncIteratorVerify();
+ break;
case JS_WEAK_MAP_TYPE:
JSWeakMap::cast(this)->JSWeakMapVerify();
break;
@@ -269,10 +275,6 @@ void HeapNumber::HeapNumberVerify() {
CHECK(IsHeapNumber() || IsMutableHeapNumber());
}
-
-void Simd128Value::Simd128ValueVerify() { CHECK(IsSimd128Value()); }
-
-
void ByteArray::ByteArrayVerify() {
CHECK(IsByteArray());
}
@@ -552,6 +554,7 @@ void JSMessageObject::JSMessageObjectVerify() {
void String::StringVerify() {
CHECK(IsString());
CHECK(length() >= 0 && length() <= Smi::kMaxValue);
+ CHECK_IMPLIES(length() == 0, this == GetHeap()->empty_string());
if (IsInternalizedString()) {
CHECK(!GetHeap()->InNewSpace(this));
}
@@ -559,6 +562,8 @@ void String::StringVerify() {
ConsString::cast(this)->ConsStringVerify();
} else if (IsSlicedString()) {
SlicedString::cast(this)->SlicedStringVerify();
+ } else if (IsThinString()) {
+ ThinString::cast(this)->ThinStringVerify();
}
}
@@ -570,12 +575,17 @@ void ConsString::ConsStringVerify() {
CHECK(this->length() >= ConsString::kMinLength);
CHECK(this->length() == this->first()->length() + this->second()->length());
if (this->IsFlat()) {
- // A flat cons can only be created by String::SlowTryFlatten.
- // Afterwards, the first part may be externalized.
- CHECK(this->first()->IsSeqString() || this->first()->IsExternalString());
+ // A flat cons can only be created by String::SlowFlatten.
+ // Afterwards, the first part may be externalized or internalized.
+ CHECK(this->first()->IsSeqString() || this->first()->IsExternalString() ||
+ this->first()->IsThinString());
}
}
+void ThinString::ThinStringVerify() {
+ CHECK(this->actual()->IsInternalizedString());
+ CHECK(this->actual()->IsSeqString() || this->actual()->IsExternalString());
+}
void SlicedString::SlicedStringVerify() {
CHECK(!this->parent()->IsConsString());
@@ -883,6 +893,12 @@ void JSStringIterator::JSStringIteratorVerify() {
CHECK_LE(index(), String::kMaxLength);
}
+void JSAsyncFromSyncIterator::JSAsyncFromSyncIteratorVerify() {
+ CHECK(IsJSAsyncFromSyncIterator());
+ JSObjectVerify();
+ VerifyHeapPointer(sync_iterator());
+}
+
void JSWeakSet::JSWeakSetVerify() {
CHECK(IsJSWeakSet());
JSObjectVerify();
@@ -914,10 +930,11 @@ void JSPromise::JSPromiseVerify() {
deferred_on_reject()->IsCallable() ||
deferred_on_reject()->IsFixedArray());
CHECK(fulfill_reactions()->IsUndefined(isolate) ||
- fulfill_reactions()->IsCallable() ||
+ fulfill_reactions()->IsCallable() || fulfill_reactions()->IsSymbol() ||
fulfill_reactions()->IsFixedArray());
CHECK(reject_reactions()->IsUndefined(isolate) ||
- reject_reactions()->IsCallable() || reject_reactions()->IsFixedArray());
+ reject_reactions()->IsSymbol() || reject_reactions()->IsCallable() ||
+ reject_reactions()->IsFixedArray());
}
void JSRegExp::JSRegExpVerify() {
@@ -1025,18 +1042,12 @@ void Foreign::ForeignVerify() {
}
-void Box::BoxVerify() {
- CHECK(IsBox());
- value()->ObjectVerify();
-}
-
void PromiseResolveThenableJobInfo::PromiseResolveThenableJobInfoVerify() {
CHECK(IsPromiseResolveThenableJobInfo());
CHECK(thenable()->IsJSReceiver());
CHECK(then()->IsJSReceiver());
CHECK(resolve()->IsJSFunction());
CHECK(reject()->IsJSFunction());
- VerifySmiField(kDebugIdOffset);
CHECK(context()->IsContext());
}
@@ -1044,7 +1055,8 @@ void PromiseReactionJobInfo::PromiseReactionJobInfoVerify() {
Isolate* isolate = GetIsolate();
CHECK(IsPromiseReactionJobInfo());
CHECK(value()->IsObject());
- CHECK(tasks()->IsFixedArray() || tasks()->IsCallable());
+ CHECK(tasks()->IsFixedArray() || tasks()->IsCallable() ||
+ tasks()->IsSymbol());
CHECK(deferred_promise()->IsUndefined(isolate) ||
deferred_promise()->IsJSReceiver() ||
deferred_promise()->IsFixedArray());
@@ -1054,7 +1066,6 @@ void PromiseReactionJobInfo::PromiseReactionJobInfoVerify() {
CHECK(deferred_on_reject()->IsUndefined(isolate) ||
deferred_on_reject()->IsCallable() ||
deferred_on_reject()->IsFixedArray());
- VerifySmiField(kDebugIdOffset);
CHECK(context()->IsContext());
}
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 1ca8fca2fe..3aa26c5601 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -31,7 +31,9 @@
#include "src/lookup-cache-inl.h"
#include "src/lookup.h"
#include "src/objects.h"
+#include "src/objects/literal-objects.h"
#include "src/objects/module-info.h"
+#include "src/objects/regexp-match-info.h"
#include "src/objects/scope-info.h"
#include "src/property.h"
#include "src/prototype.h"
@@ -41,27 +43,6 @@
namespace v8 {
namespace internal {
-template <typename Derived, typename Shape, typename Key>
-uint32_t HashTable<Derived, Shape, Key>::Hash(Key key) {
- if (Shape::UsesSeed) {
- return Shape::SeededHash(key, GetHeap()->HashSeed());
- } else {
- return Shape::Hash(key);
- }
-}
-
-
-template <typename Derived, typename Shape, typename Key>
-uint32_t HashTable<Derived, Shape, Key>::HashForObject(Key key,
- Object* object) {
- if (Shape::UsesSeed) {
- return Shape::SeededHashForObject(key, GetHeap()->HashSeed(), object);
- } else {
- return Shape::HashForObject(key, object);
- }
-}
-
-
PropertyDetails::PropertyDetails(Smi* smi) {
value_ = smi->value();
}
@@ -183,6 +164,7 @@ TYPE_CHECKER(JSPromise, JS_PROMISE_TYPE)
TYPE_CHECKER(JSRegExp, JS_REGEXP_TYPE)
TYPE_CHECKER(JSSet, JS_SET_TYPE)
TYPE_CHECKER(JSSetIterator, JS_SET_ITERATOR_TYPE)
+TYPE_CHECKER(JSAsyncFromSyncIterator, JS_ASYNC_FROM_SYNC_ITERATOR_TYPE)
TYPE_CHECKER(JSStringIterator, JS_STRING_ITERATOR_TYPE)
TYPE_CHECKER(JSTypedArray, JS_TYPED_ARRAY_TYPE)
TYPE_CHECKER(JSValue, JS_VALUE_TYPE)
@@ -193,7 +175,6 @@ TYPE_CHECKER(MutableHeapNumber, MUTABLE_HEAP_NUMBER_TYPE)
TYPE_CHECKER(Oddball, ODDBALL_TYPE)
TYPE_CHECKER(PropertyCell, PROPERTY_CELL_TYPE)
TYPE_CHECKER(SharedFunctionInfo, SHARED_FUNCTION_INFO_TYPE)
-TYPE_CHECKER(Simd128Value, SIMD128_VALUE_TYPE)
TYPE_CHECKER(Symbol, SYMBOL_TYPE)
TYPE_CHECKER(TransitionArray, TRANSITION_ARRAY_TYPE)
TYPE_CHECKER(WeakCell, WEAK_CELL_TYPE)
@@ -216,16 +197,13 @@ bool HeapObject::IsFixedArray() const {
instance_type == TRANSITION_ARRAY_TYPE;
}
+bool HeapObject::IsBoilerplateDescription() const { return IsFixedArray(); }
+
// External objects are not extensible, so the map check is enough.
bool HeapObject::IsExternal() const {
return map() == GetHeap()->external_map();
}
-#define SIMD128_TYPE_CHECKER(TYPE, Type, type, lane_count, lane_type) \
- bool HeapObject::Is##Type() const { return map() == GetHeap()->type##_map(); }
-SIMD128_TYPES(SIMD128_TYPE_CHECKER)
-#undef SIMD128_TYPE_CHECKER
-
#define IS_TYPE_FUNCTION_DEF(type_) \
bool Object::Is##type_() const { \
return IsHeapObject() && HeapObject::cast(this)->Is##type_(); \
@@ -296,6 +274,11 @@ bool HeapObject::IsConsString() const {
return StringShape(String::cast(this)).IsCons();
}
+bool HeapObject::IsThinString() const {
+ if (!IsString()) return false;
+ return StringShape(String::cast(this)).IsThin();
+}
+
bool HeapObject::IsSlicedString() const {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsSliced();
@@ -390,8 +373,6 @@ bool HeapObject::IsFeedbackVector() const {
bool HeapObject::IsFeedbackMetadata() const { return IsFixedArray(); }
-bool HeapObject::IsLiteralsArray() const { return IsFixedArray(); }
-
bool HeapObject::IsDeoptimizationInputData() const {
// Must be a fixed array.
if (!IsFixedArray()) return false;
@@ -630,9 +611,7 @@ bool Object::IsMinusZero() const {
CAST_ACCESSOR(AbstractCode)
CAST_ACCESSOR(ArrayList)
-CAST_ACCESSOR(Bool16x8)
-CAST_ACCESSOR(Bool32x4)
-CAST_ACCESSOR(Bool8x16)
+CAST_ACCESSOR(BoilerplateDescription)
CAST_ACCESSOR(ByteArray)
CAST_ACCESSOR(BytecodeArray)
CAST_ACCESSOR(Cell)
@@ -651,15 +630,11 @@ CAST_ACCESSOR(FixedArray)
CAST_ACCESSOR(FixedArrayBase)
CAST_ACCESSOR(FixedDoubleArray)
CAST_ACCESSOR(FixedTypedArrayBase)
-CAST_ACCESSOR(Float32x4)
CAST_ACCESSOR(Foreign)
CAST_ACCESSOR(FrameArray)
CAST_ACCESSOR(GlobalDictionary)
CAST_ACCESSOR(HandlerTable)
CAST_ACCESSOR(HeapObject)
-CAST_ACCESSOR(Int16x8)
-CAST_ACCESSOR(Int32x4)
-CAST_ACCESSOR(Int8x16)
CAST_ACCESSOR(JSArray)
CAST_ACCESSOR(JSArrayBuffer)
CAST_ACCESSOR(JSArrayBufferView)
@@ -682,6 +657,7 @@ CAST_ACCESSOR(JSPromiseCapability)
CAST_ACCESSOR(JSPromise)
CAST_ACCESSOR(JSSet)
CAST_ACCESSOR(JSSetIterator)
+CAST_ACCESSOR(JSAsyncFromSyncIterator)
CAST_ACCESSOR(JSStringIterator)
CAST_ACCESSOR(JSArrayIterator)
CAST_ACCESSOR(JSTypedArray)
@@ -710,7 +686,6 @@ CAST_ACCESSOR(SeqOneByteString)
CAST_ACCESSOR(SeqString)
CAST_ACCESSOR(SeqTwoByteString)
CAST_ACCESSOR(SharedFunctionInfo)
-CAST_ACCESSOR(Simd128Value)
CAST_ACCESSOR(SlicedString)
CAST_ACCESSOR(Smi)
CAST_ACCESSOR(String)
@@ -719,9 +694,7 @@ CAST_ACCESSOR(StringTable)
CAST_ACCESSOR(Struct)
CAST_ACCESSOR(Symbol)
CAST_ACCESSOR(TemplateInfo)
-CAST_ACCESSOR(Uint16x8)
-CAST_ACCESSOR(Uint32x4)
-CAST_ACCESSOR(Uint8x16)
+CAST_ACCESSOR(ThinString)
CAST_ACCESSOR(UnseededNumberDictionary)
CAST_ACCESSOR(WeakCell)
CAST_ACCESSOR(WeakFixedArray)
@@ -768,15 +741,16 @@ bool Object::FilterKey(PropertyFilter filter) {
Handle<Object> Object::NewStorageFor(Isolate* isolate, Handle<Object> object,
Representation representation) {
if (!representation.IsDouble()) return object;
- double value;
+ Handle<HeapNumber> result = isolate->factory()->NewHeapNumber(MUTABLE);
if (object->IsUninitialized(isolate)) {
- value = bit_cast<double>(kHoleNanInt64);
+ result->set_value_as_bits(kHoleNanInt64);
} else if (object->IsMutableHeapNumber()) {
- value = HeapNumber::cast(*object)->value();
+ // Ensure that all bits of the double value are preserved.
+ result->set_value_as_bits(HeapNumber::cast(*object)->value_as_bits());
} else {
- value = object->Number();
+ result->set_value(object->Number());
}
- return isolate->factory()->NewHeapNumber(value, MUTABLE);
+ return result;
}
Handle<Object> Object::WrapForRead(Isolate* isolate, Handle<Object> object,
@@ -862,6 +836,10 @@ bool StringShape::IsCons() {
return (type_ & kStringRepresentationMask) == kConsStringTag;
}
+bool StringShape::IsThin() {
+ return (type_ & kStringRepresentationMask) == kThinStringTag;
+}
+
bool StringShape::IsSliced() {
return (type_ & kStringRepresentationMask) == kSlicedStringTag;
}
@@ -1492,10 +1470,13 @@ Map* HeapObject::map() const {
void HeapObject::set_map(Map* value) {
set_map_word(MapWord::FromMap(value));
- if (value != NULL) {
+ if (value != nullptr) {
// TODO(1600) We are passing NULL as a slot because maps can never be on
// evacuation candidate.
- value->GetHeap()->incremental_marking()->RecordWrite(this, NULL, value);
+ value->GetHeap()->incremental_marking()->RecordWrite(this, nullptr, value);
+#ifdef VERIFY_HEAP
+ value->GetHeap()->VerifyObjectLayoutChange(this, value);
+#endif
}
}
@@ -1507,10 +1488,13 @@ Map* HeapObject::synchronized_map() {
void HeapObject::synchronized_set_map(Map* value) {
synchronized_set_map_word(MapWord::FromMap(value));
- if (value != NULL) {
+ if (value != nullptr) {
// TODO(1600) We are passing NULL as a slot because maps can never be on
// evacuation candidate.
- value->GetHeap()->incremental_marking()->RecordWrite(this, NULL, value);
+ value->GetHeap()->incremental_marking()->RecordWrite(this, nullptr, value);
+#ifdef VERIFY_HEAP
+ value->GetHeap()->VerifyObjectLayoutChange(this, value);
+#endif
}
}
@@ -1564,6 +1548,13 @@ void HeapNumber::set_value(double value) {
WRITE_DOUBLE_FIELD(this, kValueOffset, value);
}
+uint64_t HeapNumber::value_as_bits() const {
+ return READ_UINT64_FIELD(this, kValueOffset);
+}
+
+void HeapNumber::set_value_as_bits(uint64_t bits) {
+ WRITE_UINT64_FIELD(this, kValueOffset, bits);
+}
int HeapNumber::get_exponent() {
return ((READ_INT_FIELD(this, kExponentOffset) & kExponentMask) >>
@@ -1575,110 +1566,6 @@ int HeapNumber::get_sign() {
return READ_INT_FIELD(this, kExponentOffset) & kSignMask;
}
-
-bool Simd128Value::Equals(Simd128Value* that) {
- // TODO(bmeurer): This doesn't match the SIMD.js specification, but it seems
- // to be consistent with what the CompareICStub does, and what is tested in
- // the current SIMD.js testsuite.
- if (this == that) return true;
-#define SIMD128_VALUE(TYPE, Type, type, lane_count, lane_type) \
- if (this->Is##Type()) { \
- if (!that->Is##Type()) return false; \
- return Type::cast(this)->Equals(Type::cast(that)); \
- }
- SIMD128_TYPES(SIMD128_VALUE)
-#undef SIMD128_VALUE
- return false;
-}
-
-
-// static
-bool Simd128Value::Equals(Handle<Simd128Value> one, Handle<Simd128Value> two) {
- return one->Equals(*two);
-}
-
-
-#define SIMD128_VALUE_EQUALS(TYPE, Type, type, lane_count, lane_type) \
- bool Type::Equals(Type* that) { \
- for (int lane = 0; lane < lane_count; ++lane) { \
- if (this->get_lane(lane) != that->get_lane(lane)) return false; \
- } \
- return true; \
- }
-SIMD128_TYPES(SIMD128_VALUE_EQUALS)
-#undef SIMD128_VALUE_EQUALS
-
-
-#if defined(V8_TARGET_LITTLE_ENDIAN)
-#define SIMD128_READ_LANE(lane_type, lane_count, field_type, field_size) \
- lane_type value = \
- READ_##field_type##_FIELD(this, kValueOffset + lane * field_size);
-#elif defined(V8_TARGET_BIG_ENDIAN)
-#define SIMD128_READ_LANE(lane_type, lane_count, field_type, field_size) \
- lane_type value = READ_##field_type##_FIELD( \
- this, kValueOffset + (lane_count - lane - 1) * field_size);
-#else
-#error Unknown byte ordering
-#endif
-
-#if defined(V8_TARGET_LITTLE_ENDIAN)
-#define SIMD128_WRITE_LANE(lane_count, field_type, field_size, value) \
- WRITE_##field_type##_FIELD(this, kValueOffset + lane * field_size, value);
-#elif defined(V8_TARGET_BIG_ENDIAN)
-#define SIMD128_WRITE_LANE(lane_count, field_type, field_size, value) \
- WRITE_##field_type##_FIELD( \
- this, kValueOffset + (lane_count - lane - 1) * field_size, value);
-#else
-#error Unknown byte ordering
-#endif
-
-#define SIMD128_NUMERIC_LANE_FNS(type, lane_type, lane_count, field_type, \
- field_size) \
- lane_type type::get_lane(int lane) const { \
- DCHECK(lane < lane_count && lane >= 0); \
- SIMD128_READ_LANE(lane_type, lane_count, field_type, field_size) \
- return value; \
- } \
- \
- void type::set_lane(int lane, lane_type value) { \
- DCHECK(lane < lane_count && lane >= 0); \
- SIMD128_WRITE_LANE(lane_count, field_type, field_size, value) \
- }
-
-SIMD128_NUMERIC_LANE_FNS(Float32x4, float, 4, FLOAT, kFloatSize)
-SIMD128_NUMERIC_LANE_FNS(Int32x4, int32_t, 4, INT32, kInt32Size)
-SIMD128_NUMERIC_LANE_FNS(Uint32x4, uint32_t, 4, UINT32, kInt32Size)
-SIMD128_NUMERIC_LANE_FNS(Int16x8, int16_t, 8, INT16, kShortSize)
-SIMD128_NUMERIC_LANE_FNS(Uint16x8, uint16_t, 8, UINT16, kShortSize)
-SIMD128_NUMERIC_LANE_FNS(Int8x16, int8_t, 16, INT8, kCharSize)
-SIMD128_NUMERIC_LANE_FNS(Uint8x16, uint8_t, 16, UINT8, kCharSize)
-#undef SIMD128_NUMERIC_LANE_FNS
-
-
-#define SIMD128_BOOLEAN_LANE_FNS(type, lane_type, lane_count, field_type, \
- field_size) \
- bool type::get_lane(int lane) const { \
- DCHECK(lane < lane_count && lane >= 0); \
- SIMD128_READ_LANE(lane_type, lane_count, field_type, field_size) \
- DCHECK(value == 0 || value == -1); \
- return value != 0; \
- } \
- \
- void type::set_lane(int lane, bool value) { \
- DCHECK(lane < lane_count && lane >= 0); \
- int32_t int_val = value ? -1 : 0; \
- SIMD128_WRITE_LANE(lane_count, field_type, field_size, int_val) \
- }
-
-SIMD128_BOOLEAN_LANE_FNS(Bool32x4, int32_t, 4, INT32, kInt32Size)
-SIMD128_BOOLEAN_LANE_FNS(Bool16x8, int16_t, 8, INT16, kShortSize)
-SIMD128_BOOLEAN_LANE_FNS(Bool8x16, int8_t, 16, INT8, kCharSize)
-#undef SIMD128_BOOLEAN_LANE_FNS
-
-#undef SIMD128_READ_LANE
-#undef SIMD128_WRITE_LANE
-
-
ACCESSORS(JSReceiver, properties, FixedArray, kPropertiesOffset)
@@ -1782,6 +1669,11 @@ AllocationSiteMode AllocationSite::GetMode(
}
inline bool AllocationSite::CanTrack(InstanceType type) {
+ if (FLAG_turbo) {
+ // TurboFan doesn't care at all about String pretenuring feedback,
+ // so don't bother even trying to track that.
+ return type == JS_ARRAY_TYPE || type == JS_OBJECT_TYPE;
+ }
if (FLAG_allocation_site_pretenuring) {
return type == JS_ARRAY_TYPE ||
type == JS_OBJECT_TYPE ||
@@ -2132,7 +2024,7 @@ void WeakCell::initialize(HeapObject* val) {
// We just have to execute the generational barrier here because we never
// mark through a weak cell and collect evacuation candidates when we process
// all weak cells.
- WriteBarrierMode mode = Marking::IsBlack(ObjectMarking::MarkBitFrom(this))
+ WriteBarrierMode mode = ObjectMarking::IsBlack(this)
? UPDATE_WRITE_BARRIER
: UPDATE_WEAK_WRITE_BARRIER;
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kValueOffset, val, mode);
@@ -2315,6 +2207,10 @@ double JSObject::RawFastDoublePropertyAt(FieldIndex index) {
return READ_DOUBLE_FIELD(this, index.offset());
}
+uint64_t JSObject::RawFastDoublePropertyAsBitsAt(FieldIndex index) {
+ DCHECK(IsUnboxedDoubleField(index));
+ return READ_UINT64_FIELD(this, index.offset());
+}
void JSObject::RawFastPropertyAtPut(FieldIndex index, Object* value) {
if (index.is_inobject()) {
@@ -2326,16 +2222,17 @@ void JSObject::RawFastPropertyAtPut(FieldIndex index, Object* value) {
}
}
-
-void JSObject::RawFastDoublePropertyAtPut(FieldIndex index, double value) {
- WRITE_DOUBLE_FIELD(this, index.offset(), value);
+void JSObject::RawFastDoublePropertyAsBitsAtPut(FieldIndex index,
+ uint64_t bits) {
+ WRITE_UINT64_FIELD(this, index.offset(), bits);
}
-
void JSObject::FastPropertyAtPut(FieldIndex index, Object* value) {
if (IsUnboxedDoubleField(index)) {
DCHECK(value->IsMutableHeapNumber());
- RawFastDoublePropertyAtPut(index, HeapNumber::cast(value)->value());
+ // Ensure that all bits of the double value are preserved.
+ RawFastDoublePropertyAsBitsAtPut(index,
+ HeapNumber::cast(value)->value_as_bits());
} else {
RawFastPropertyAtPut(index, value);
}
@@ -2352,24 +2249,29 @@ void JSObject::WriteToField(int descriptor, PropertyDetails details,
if (value->IsUninitialized(this->GetIsolate())) {
return;
}
+ // Manipulating the signaling NaN used for the hole and uninitialized
+ // double field sentinel in C++, e.g. with bit_cast or value()/set_value(),
+ // will change its value on ia32 (the x87 stack is used to return values
+ // and stores to the stack silently clear the signalling bit).
+ uint64_t bits;
+ if (value->IsSmi()) {
+ bits = bit_cast<uint64_t>(static_cast<double>(Smi::cast(value)->value()));
+ } else {
+ DCHECK(value->IsHeapNumber());
+ bits = HeapNumber::cast(value)->value_as_bits();
+ }
if (IsUnboxedDoubleField(index)) {
- RawFastDoublePropertyAtPut(index, value->Number());
+ RawFastDoublePropertyAsBitsAtPut(index, bits);
} else {
HeapNumber* box = HeapNumber::cast(RawFastPropertyAt(index));
DCHECK(box->IsMutableHeapNumber());
- box->set_value(value->Number());
+ box->set_value_as_bits(bits);
}
} else {
RawFastPropertyAtPut(index, value);
}
}
-void JSObject::WriteToField(int descriptor, Object* value) {
- DescriptorArray* desc = map()->instance_descriptors();
- PropertyDetails details = desc->GetDetails(descriptor);
- WriteToField(descriptor, details, value);
-}
-
int JSObject::GetInObjectPropertyOffset(int index) {
return map()->GetInObjectPropertyOffset(index);
}
@@ -2448,8 +2350,8 @@ void Object::VerifyApiCallResultType() {
DCHECK(IsHeapObject());
Isolate* isolate = HeapObject::cast(this)->GetIsolate();
if (!(IsString() || IsSymbol() || IsJSReceiver() || IsHeapNumber() ||
- IsSimd128Value() || IsUndefined(isolate) || IsTrue(isolate) ||
- IsFalse(isolate) || IsNull(isolate))) {
+ IsUndefined(isolate) || IsTrue(isolate) || IsFalse(isolate) ||
+ IsNull(isolate))) {
FATAL("API call returned invalid object");
}
#endif // DEBUG
@@ -2712,7 +2614,6 @@ AllocationAlignment HeapObject::RequiredAlignment() {
return kDoubleAligned;
}
if (IsHeapNumber()) return kDoubleUnaligned;
- if (IsSimd128Value()) return kSimd128Unaligned;
#endif // V8_HOST_ARCH_32_BIT
return kWordAligned;
}
@@ -2823,7 +2724,7 @@ int DescriptorArray::number_of_descriptors() {
int DescriptorArray::number_of_descriptors_storage() {
int len = length();
- return len == 0 ? 0 : (len - kFirstIndex) / kDescriptorSize;
+ return len == 0 ? 0 : (len - kFirstIndex) / kEntrySize;
}
@@ -3124,7 +3025,6 @@ PropertyDetails DescriptorArray::GetDetails(int descriptor_number) {
return PropertyDetails(Smi::cast(details));
}
-
int DescriptorArray::GetFieldIndex(int descriptor_number) {
DCHECK(GetDetails(descriptor_number).location() == kField);
return GetDetails(descriptor_number).field_index();
@@ -3450,66 +3350,6 @@ void DeoptimizationOutputData::SetPcAndState(int index, Smi* offset) {
set(1 + index * 2, offset);
}
-
-Object* LiteralsArray::get(int index) const { return FixedArray::get(index); }
-
-
-void LiteralsArray::set(int index, Object* value) {
- FixedArray::set(index, value);
-}
-
-
-void LiteralsArray::set(int index, Smi* value) {
- FixedArray::set(index, value);
-}
-
-
-void LiteralsArray::set(int index, Object* value, WriteBarrierMode mode) {
- FixedArray::set(index, value, mode);
-}
-
-
-LiteralsArray* LiteralsArray::cast(Object* object) {
- SLOW_DCHECK(object->IsLiteralsArray());
- return reinterpret_cast<LiteralsArray*>(object);
-}
-
-
-FeedbackVector* LiteralsArray::feedback_vector() const {
- if (length() == 0) {
- return FeedbackVector::cast(
- const_cast<FixedArray*>(FixedArray::cast(this)));
- }
- return FeedbackVector::cast(get(kVectorIndex));
-}
-
-
-void LiteralsArray::set_feedback_vector(FeedbackVector* vector) {
- if (length() <= kVectorIndex) {
- DCHECK(vector->length() == 0);
- return;
- }
- set(kVectorIndex, vector);
-}
-
-
-Object* LiteralsArray::literal(int literal_index) const {
- return get(kFirstLiteralIndex + literal_index);
-}
-
-
-void LiteralsArray::set_literal(int literal_index, Object* literal) {
- set(kFirstLiteralIndex + literal_index, literal);
-}
-
-void LiteralsArray::set_literal_undefined(int literal_index) {
- set_undefined(kFirstLiteralIndex + literal_index);
-}
-
-int LiteralsArray::literals_count() const {
- return length() - kFirstLiteralIndex;
-}
-
int HandlerTable::GetRangeStart(int index) const {
return Smi::cast(get(index * kRangeEntrySize + kRangeStartIndex))->value();
}
@@ -3679,10 +3519,19 @@ bool String::Equals(Handle<String> one, Handle<String> two) {
Handle<String> String::Flatten(Handle<String> string, PretenureFlag pretenure) {
- if (!string->IsConsString()) return string;
- Handle<ConsString> cons = Handle<ConsString>::cast(string);
- if (cons->IsFlat()) return handle(cons->first());
- return SlowFlatten(cons, pretenure);
+ if (string->IsConsString()) {
+ Handle<ConsString> cons = Handle<ConsString>::cast(string);
+ if (cons->IsFlat()) {
+ string = handle(cons->first());
+ } else {
+ return SlowFlatten(cons, pretenure);
+ }
+ }
+ if (string->IsThinString()) {
+ string = handle(Handle<ThinString>::cast(string)->actual());
+ DCHECK(!string->IsConsString());
+ }
+ return string;
}
@@ -3703,6 +3552,9 @@ uint16_t String::Get(int index) {
case kSlicedStringTag | kOneByteStringTag:
case kSlicedStringTag | kTwoByteStringTag:
return SlicedString::cast(this)->SlicedStringGet(index);
+ case kThinStringTag | kOneByteStringTag:
+ case kThinStringTag | kTwoByteStringTag:
+ return ThinString::cast(this)->ThinStringGet(index);
default:
break;
}
@@ -3734,6 +3586,7 @@ String* String::GetUnderlying() {
DCHECK(this->IsFlat());
DCHECK(StringShape(this).IsIndirect());
STATIC_ASSERT(ConsString::kFirstOffset == SlicedString::kParentOffset);
+ STATIC_ASSERT(ConsString::kFirstOffset == ThinString::kActualOffset);
const int kUnderlyingOffset = SlicedString::kParentOffset;
return String::cast(READ_FIELD(this, kUnderlyingOffset));
}
@@ -3785,6 +3638,11 @@ ConsString* String::VisitFlat(Visitor* visitor,
case kConsStringTag | kTwoByteStringTag:
return ConsString::cast(string);
+ case kThinStringTag | kOneByteStringTag:
+ case kThinStringTag | kTwoByteStringTag:
+ string = ThinString::cast(string)->actual();
+ continue;
+
default:
UNREACHABLE();
return NULL;
@@ -3916,6 +3774,7 @@ void ConsString::set_second(String* value, WriteBarrierMode mode) {
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kSecondOffset, value, mode);
}
+ACCESSORS(ThinString, actual, String, kActualOffset);
bool ExternalString::is_short() {
InstanceType type = map()->instance_type();
@@ -5013,7 +4872,8 @@ bool Code::IsCodeStubOrIC() {
}
ExtraICState Code::extra_ic_state() {
- DCHECK(is_inline_cache_stub() || is_debug_stub());
+ DCHECK(is_binary_op_stub() || is_compare_ic_stub() ||
+ is_to_boolean_ic_stub() || is_debug_stub());
return ExtractExtraICStateFromFlags(flags());
}
@@ -5310,7 +5170,7 @@ bool Code::is_debug_stub() {
return false;
}
bool Code::is_handler() { return kind() == HANDLER; }
-bool Code::is_call_stub() { return kind() == CALL_IC; }
+bool Code::is_stub() { return kind() == STUB; }
bool Code::is_binary_op_stub() { return kind() == BINARY_OP_IC; }
bool Code::is_compare_ic_stub() { return kind() == COMPARE_IC; }
bool Code::is_to_boolean_ic_stub() { return kind() == TO_BOOLEAN_IC; }
@@ -5392,42 +5252,20 @@ bool Code::IsWeakObject(Object* object) {
bool Code::IsWeakObjectInOptimizedCode(Object* object) {
if (object->IsMap()) {
- return Map::cast(object)->CanTransition() &&
- FLAG_weak_embedded_maps_in_optimized_code;
+ return Map::cast(object)->CanTransition();
}
if (object->IsCell()) {
object = Cell::cast(object)->value();
} else if (object->IsPropertyCell()) {
object = PropertyCell::cast(object)->value();
}
- if (object->IsJSReceiver()) {
- return FLAG_weak_embedded_objects_in_optimized_code;
- }
- if (object->IsContext()) {
- // Contexts of inlined functions are embedded in optimized code.
- return FLAG_weak_embedded_objects_in_optimized_code;
+ if (object->IsJSReceiver() || object->IsContext()) {
+ return true;
}
return false;
}
-class Code::FindAndReplacePattern {
- public:
- FindAndReplacePattern() : count_(0) { }
- void Add(Handle<Map> map_to_find, Handle<Object> obj_to_replace) {
- DCHECK(count_ < kMaxCount);
- find_[count_] = map_to_find;
- replace_[count_] = obj_to_replace;
- ++count_;
- }
- private:
- static const int kMaxCount = 4;
- int count_;
- Handle<Map> find_[kMaxCount];
- Handle<Object> replace_[kMaxCount];
- friend class Code;
-};
-
int AbstractCode::instruction_size() {
if (IsCode()) {
return GetCode()->instruction_size();
@@ -5686,7 +5524,7 @@ ACCESSORS(JSBoundFunction, bound_this, Object, kBoundThisOffset)
ACCESSORS(JSBoundFunction, bound_arguments, FixedArray, kBoundArgumentsOffset)
ACCESSORS(JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
-ACCESSORS(JSFunction, literals, LiteralsArray, kLiteralsOffset)
+ACCESSORS(JSFunction, feedback_vector_cell, Cell, kFeedbackVectorOffset)
ACCESSORS(JSFunction, next_function_link, Object, kNextFunctionLinkOffset)
ACCESSORS(JSGlobalObject, native_context, Context, kNativeContextOffset)
@@ -5705,13 +5543,10 @@ ACCESSORS(AccessorInfo, setter, Object, kSetterOffset)
ACCESSORS(AccessorInfo, js_getter, Object, kJsGetterOffset)
ACCESSORS(AccessorInfo, data, Object, kDataOffset)
-ACCESSORS(Box, value, Object, kValueOffset)
-
ACCESSORS(PromiseResolveThenableJobInfo, thenable, JSReceiver, kThenableOffset)
ACCESSORS(PromiseResolveThenableJobInfo, then, JSReceiver, kThenOffset)
ACCESSORS(PromiseResolveThenableJobInfo, resolve, JSFunction, kResolveOffset)
ACCESSORS(PromiseResolveThenableJobInfo, reject, JSFunction, kRejectOffset)
-SMI_ACCESSORS(PromiseResolveThenableJobInfo, debug_id, kDebugIdOffset)
ACCESSORS(PromiseResolveThenableJobInfo, context, Context, kContextOffset);
ACCESSORS(PromiseReactionJobInfo, value, Object, kValueOffset);
@@ -5722,7 +5557,6 @@ ACCESSORS(PromiseReactionJobInfo, deferred_on_resolve, Object,
kDeferredOnResolveOffset);
ACCESSORS(PromiseReactionJobInfo, deferred_on_reject, Object,
kDeferredOnRejectOffset);
-SMI_ACCESSORS(PromiseReactionJobInfo, debug_id, kDebugIdOffset);
ACCESSORS(PromiseReactionJobInfo, context, Context, kContextOffset);
Map* PrototypeInfo::ObjectCreateMap() {
@@ -5968,6 +5802,7 @@ void Script::set_origin_options(ScriptOriginOptions origin_options) {
ACCESSORS(DebugInfo, shared, SharedFunctionInfo, kSharedFunctionInfoIndex)
+SMI_ACCESSORS(DebugInfo, debugger_hints, kDebuggerHintsIndex)
ACCESSORS(DebugInfo, debug_bytecode_array, Object, kDebugBytecodeArrayIndex)
ACCESSORS(DebugInfo, break_points, FixedArray, kBreakPointsStateIndex)
@@ -6037,30 +5872,12 @@ BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_named_expression,
BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_toplevel,
kIsTopLevelBit)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, allows_lazy_compilation,
- kAllowLazyCompilation)
-BOOL_ACCESSORS(SharedFunctionInfo,
- compiler_hints,
- uses_arguments,
- kUsesArguments)
-BOOL_ACCESSORS(SharedFunctionInfo,
- compiler_hints,
- has_duplicate_parameters,
- kHasDuplicateParameters)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, asm_function, kIsAsmFunction)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, deserialized, kDeserialized)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_declaration,
- kIsDeclaration)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, marked_for_tier_up,
- kMarkedForTierUp)
-
#if V8_HOST_ARCH_32_BIT
SMI_ACCESSORS(SharedFunctionInfo, length, kLengthOffset)
SMI_ACCESSORS(SharedFunctionInfo, internal_formal_parameter_count,
kFormalParameterCountOffset)
SMI_ACCESSORS(SharedFunctionInfo, expected_nof_properties,
kExpectedNofPropertiesOffset)
-SMI_ACCESSORS(SharedFunctionInfo, num_literals, kNumLiteralsOffset)
SMI_ACCESSORS(SharedFunctionInfo, start_position_and_type,
kStartPositionAndTypeOffset)
SMI_ACCESSORS(SharedFunctionInfo, end_position, kEndPositionOffset)
@@ -6110,7 +5927,6 @@ PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, internal_formal_parameter_count,
PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo,
expected_nof_properties,
kExpectedNofPropertiesOffset)
-PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, num_literals, kNumLiteralsOffset)
PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, end_position, kEndPositionOffset)
PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo,
@@ -6138,12 +5954,6 @@ PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo,
#endif
-
-BOOL_GETTER(SharedFunctionInfo,
- compiler_hints,
- optimization_disabled,
- kOptimizationDisabled)
-
AbstractCode* SharedFunctionInfo::abstract_code() {
if (HasBytecodeArray()) {
return AbstractCode::cast(bytecode_array());
@@ -6152,20 +5962,43 @@ AbstractCode* SharedFunctionInfo::abstract_code() {
}
}
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, allows_lazy_compilation,
+ kAllowLazyCompilation)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, uses_arguments,
+ kUsesArguments)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, has_duplicate_parameters,
+ kHasDuplicateParameters)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, asm_function, kIsAsmFunction)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_declaration,
+ kIsDeclaration)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, marked_for_tier_up,
+ kMarkedForTierUp)
+
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, needs_home_object,
+ kNeedsHomeObject)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, native, kNative)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, force_inline, kForceInline)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, must_use_ignition_turbo,
+ kMustUseIgnitionTurbo)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_flush, kDontFlush)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_asm_wasm_broken,
+ kIsAsmWasmBroken)
+
+BOOL_GETTER(SharedFunctionInfo, compiler_hints, optimization_disabled,
+ kOptimizationDisabled)
+
void SharedFunctionInfo::set_optimization_disabled(bool disable) {
set_compiler_hints(BooleanBit::set(compiler_hints(),
kOptimizationDisabled,
disable));
}
-
LanguageMode SharedFunctionInfo::language_mode() {
STATIC_ASSERT(LANGUAGE_END == 2);
return construct_language_mode(
BooleanBit::get(compiler_hints(), kStrictModeFunction));
}
-
void SharedFunctionInfo::set_language_mode(LanguageMode language_mode) {
STATIC_ASSERT(LANGUAGE_END == 2);
// We only allow language mode transitions that set the same language mode
@@ -6180,7 +6013,6 @@ FunctionKind SharedFunctionInfo::kind() const {
return FunctionKindBits::decode(compiler_hints());
}
-
void SharedFunctionInfo::set_kind(FunctionKind kind) {
DCHECK(IsValidFunctionKind(kind));
int hints = compiler_hints();
@@ -6188,24 +6020,19 @@ void SharedFunctionInfo::set_kind(FunctionKind kind) {
set_compiler_hints(hints);
}
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, needs_home_object,
- kNeedsHomeObject)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, native, kNative)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, force_inline, kForceInline)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints,
- name_should_print_as_anonymous,
- kNameShouldPrintAsAnonymous)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_anonymous_expression,
+BOOL_ACCESSORS(SharedFunctionInfo, debugger_hints,
+ name_should_print_as_anonymous, kNameShouldPrintAsAnonymous)
+BOOL_ACCESSORS(SharedFunctionInfo, debugger_hints, is_anonymous_expression,
kIsAnonymousExpression)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, must_use_ignition_turbo,
- kMustUseIgnitionTurbo)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_flush, kDontFlush)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_asm_wasm_broken,
- kIsAsmWasmBroken)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, has_no_side_effect,
+BOOL_ACCESSORS(SharedFunctionInfo, debugger_hints, deserialized, kDeserialized)
+BOOL_ACCESSORS(SharedFunctionInfo, debugger_hints, has_no_side_effect,
kHasNoSideEffect)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, computed_has_no_side_effect,
+BOOL_ACCESSORS(SharedFunctionInfo, debugger_hints, computed_has_no_side_effect,
kComputedHasNoSideEffect)
+BOOL_ACCESSORS(SharedFunctionInfo, debugger_hints, debug_is_blackboxed,
+ kDebugIsBlackboxed)
+BOOL_ACCESSORS(SharedFunctionInfo, debugger_hints, computed_debug_is_blackboxed,
+ kComputedDebugIsBlackboxed)
bool Script::HasValidSource() {
Object* src = this->source();
@@ -6309,25 +6136,35 @@ bool SharedFunctionInfo::has_simple_parameters() {
return scope_info()->HasSimpleParameters();
}
-
-bool SharedFunctionInfo::HasDebugInfo() {
- bool has_debug_info = debug_info()->IsStruct();
+bool SharedFunctionInfo::HasDebugInfo() const {
+ bool has_debug_info = !debug_info()->IsSmi();
+ DCHECK_EQ(debug_info()->IsStruct(), has_debug_info);
DCHECK(!has_debug_info || HasDebugCode());
return has_debug_info;
}
-
-DebugInfo* SharedFunctionInfo::GetDebugInfo() {
+DebugInfo* SharedFunctionInfo::GetDebugInfo() const {
DCHECK(HasDebugInfo());
return DebugInfo::cast(debug_info());
}
-
-bool SharedFunctionInfo::HasDebugCode() {
+bool SharedFunctionInfo::HasDebugCode() const {
if (HasBaselineCode()) return code()->has_debug_break_slots();
return HasBytecodeArray();
}
+int SharedFunctionInfo::debugger_hints() const {
+ if (HasDebugInfo()) return GetDebugInfo()->debugger_hints();
+ return Smi::cast(debug_info())->value();
+}
+
+void SharedFunctionInfo::set_debugger_hints(int value) {
+ if (HasDebugInfo()) {
+ GetDebugInfo()->set_debugger_hints(value);
+ } else {
+ set_debug_info(Smi::FromInt(value));
+ }
+}
bool SharedFunctionInfo::IsApiFunction() {
return function_data()->IsFunctionTemplateInfo();
@@ -6344,11 +6181,11 @@ void SharedFunctionInfo::set_api_func_data(FunctionTemplateInfo* data) {
set_function_data(data);
}
-bool SharedFunctionInfo::HasBytecodeArray() {
+bool SharedFunctionInfo::HasBytecodeArray() const {
return function_data()->IsBytecodeArray();
}
-BytecodeArray* SharedFunctionInfo::bytecode_array() {
+BytecodeArray* SharedFunctionInfo::bytecode_array() const {
DCHECK(HasBytecodeArray());
return BytecodeArray::cast(function_data());
}
@@ -6363,11 +6200,11 @@ void SharedFunctionInfo::ClearBytecodeArray() {
set_function_data(GetHeap()->undefined_value());
}
-bool SharedFunctionInfo::HasAsmWasmData() {
+bool SharedFunctionInfo::HasAsmWasmData() const {
return function_data()->IsFixedArray();
}
-FixedArray* SharedFunctionInfo::asm_wasm_data() {
+FixedArray* SharedFunctionInfo::asm_wasm_data() const {
DCHECK(HasAsmWasmData());
return FixedArray::cast(function_data());
}
@@ -6508,6 +6345,10 @@ bool SharedFunctionInfo::OptimizedCodeMapIsCleared() const {
return optimized_code_map() == GetHeap()->empty_fixed_array();
}
+FeedbackVector* JSFunction::feedback_vector() const {
+ DCHECK(feedback_vector_cell()->value()->IsFeedbackVector());
+ return FeedbackVector::cast(feedback_vector_cell()->value());
+}
bool JSFunction::IsOptimized() {
return code()->kind() == Code::OPTIMIZED_FUNCTION;
@@ -6615,11 +6456,29 @@ void JSFunction::ReplaceCode(Code* code) {
}
}
+bool JSFunction::has_feedback_vector() const {
+ return !feedback_vector_cell()->value()->IsUndefined(GetIsolate());
+}
+
+JSFunction::FeedbackVectorState JSFunction::GetFeedbackVectorState(
+ Isolate* isolate) const {
+ Cell* cell = feedback_vector_cell();
+ if (cell == isolate->heap()->undefined_cell()) {
+ return TOP_LEVEL_SCRIPT_NEEDS_VECTOR;
+ } else if (cell->value() == isolate->heap()->undefined_value() ||
+ !has_feedback_vector()) {
+ return NEEDS_VECTOR;
+ }
+ return HAS_VECTOR;
+}
Context* JSFunction::context() {
return Context::cast(READ_FIELD(this, kContextOffset));
}
+bool JSFunction::has_context() const {
+ return READ_FIELD(this, kContextOffset)->IsContext();
+}
JSObject* JSFunction::global_proxy() {
return context()->global_proxy();
@@ -6691,11 +6550,6 @@ bool JSFunction::is_compiled() {
code() != builtins->builtin(Builtins::kCompileOptimizedConcurrent);
}
-FeedbackVector* JSFunction::feedback_vector() {
- LiteralsArray* array = literals();
- return array->feedback_vector();
-}
-
ACCESSORS(JSProxy, target, JSReceiver, kTargetOffset)
ACCESSORS(JSProxy, handler, Object, kHandlerOffset)
ACCESSORS(JSProxy, hash, Object, kHashOffset)
@@ -6804,7 +6658,6 @@ CODE_ACCESSORS(relocation_info, ByteArray, kRelocationInfoOffset)
CODE_ACCESSORS(handler_table, FixedArray, kHandlerTableOffset)
CODE_ACCESSORS(deoptimization_data, FixedArray, kDeoptimizationDataOffset)
CODE_ACCESSORS(source_position_table, ByteArray, kSourcePositionTableOffset)
-CODE_ACCESSORS(protected_instructions, FixedArray, kProtectedInstructionOffset)
CODE_ACCESSORS(raw_type_feedback_info, Object, kTypeFeedbackInfoOffset)
CODE_ACCESSORS(next_code_link, Object, kNextCodeLinkOffset)
#undef CODE_ACCESSORS
@@ -6820,7 +6673,6 @@ void Code::WipeOutHeader() {
}
WRITE_FIELD(this, kNextCodeLinkOffset, NULL);
WRITE_FIELD(this, kGCMetadataOffset, NULL);
- WRITE_FIELD(this, kProtectedInstructionOffset, NULL);
}
@@ -6904,7 +6756,6 @@ int Code::SizeIncludingMetadata() {
size += deoptimization_data()->Size();
size += handler_table()->Size();
if (kind() == FUNCTION) size += source_position_table()->Size();
- size += protected_instructions()->Size();
return size;
}
@@ -7076,6 +6927,18 @@ void JSTypedArray::set_length(Object* value, WriteBarrierMode mode) {
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kLengthOffset, value, mode);
}
+// static
+MaybeHandle<JSTypedArray> JSTypedArray::Validate(Isolate* isolate,
+ Handle<Object> receiver,
+ const char* method_name) {
+ if (V8_UNLIKELY(!receiver->IsJSTypedArray())) {
+ const MessageTemplate::Template message = MessageTemplate::kNotTypedArray;
+ THROW_NEW_ERROR(isolate, NewTypeError(message), JSTypedArray);
+ }
+
+ // TODO(caitp): throw if array.[[ViewedArrayBuffer]] is neutered (per v8:4648)
+ return Handle<JSTypedArray>::cast(receiver);
+}
#ifdef VERIFY_HEAP
ACCESSORS(JSTypedArray, raw_length, Object, kLengthOffset)
@@ -8365,6 +8228,9 @@ ACCESSORS(JSArrayIterator, object, Object, kIteratedObjectOffset)
ACCESSORS(JSArrayIterator, index, Object, kNextIndexOffset)
ACCESSORS(JSArrayIterator, object_map, Object, kIteratedObjectMapOffset)
+ACCESSORS(JSAsyncFromSyncIterator, sync_iterator, JSReceiver,
+ kSyncIteratorOffset)
+
ACCESSORS(JSStringIterator, string, String, kStringOffset)
SMI_ACCESSORS(JSStringIterator, index, kNextIndexOffset)
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index 4f63757b7c..1e1a1062c3 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -74,9 +74,6 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
HeapNumber::cast(this)->HeapNumberPrint(os);
os << ">\n";
break;
- case SIMD128_VALUE_TYPE:
- Simd128Value::cast(this)->Simd128ValuePrint(os);
- break;
case FIXED_DOUBLE_ARRAY_TYPE:
FixedDoubleArray::cast(this)->FixedDoubleArrayPrint(os);
break;
@@ -253,59 +250,6 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
}
}
-
-void Simd128Value::Simd128ValuePrint(std::ostream& os) { // NOLINT
-#define PRINT_SIMD128_VALUE(TYPE, Type, type, lane_count, lane_type) \
- if (Is##Type()) return Type::cast(this)->Type##Print(os);
- SIMD128_TYPES(PRINT_SIMD128_VALUE)
-#undef PRINT_SIMD128_VALUE
- UNREACHABLE();
-}
-
-
-void Float32x4::Float32x4Print(std::ostream& os) { // NOLINT
- char arr[100];
- Vector<char> buffer(arr, arraysize(arr));
- os << std::string(DoubleToCString(get_lane(0), buffer)) << ", "
- << std::string(DoubleToCString(get_lane(1), buffer)) << ", "
- << std::string(DoubleToCString(get_lane(2), buffer)) << ", "
- << std::string(DoubleToCString(get_lane(3), buffer));
-}
-
-
-#define SIMD128_INT_PRINT_FUNCTION(type, lane_count) \
- void type::type##Print(std::ostream& os) { \
- char arr[100]; \
- Vector<char> buffer(arr, arraysize(arr)); \
- os << std::string(IntToCString(get_lane(0), buffer)); \
- for (int i = 1; i < lane_count; i++) { \
- os << ", " << std::string(IntToCString(get_lane(i), buffer)); \
- } \
- }
-SIMD128_INT_PRINT_FUNCTION(Int32x4, 4)
-SIMD128_INT_PRINT_FUNCTION(Uint32x4, 4)
-SIMD128_INT_PRINT_FUNCTION(Int16x8, 8)
-SIMD128_INT_PRINT_FUNCTION(Uint16x8, 8)
-SIMD128_INT_PRINT_FUNCTION(Int8x16, 16)
-SIMD128_INT_PRINT_FUNCTION(Uint8x16, 16)
-#undef SIMD128_INT_PRINT_FUNCTION
-
-
-#define SIMD128_BOOL_PRINT_FUNCTION(type, lane_count) \
- void type::type##Print(std::ostream& os) { \
- char arr[100]; \
- Vector<char> buffer(arr, arraysize(arr)); \
- os << std::string(get_lane(0) ? "true" : "false"); \
- for (int i = 1; i < lane_count; i++) { \
- os << ", " << std::string(get_lane(i) ? "true" : "false"); \
- } \
- }
-SIMD128_BOOL_PRINT_FUNCTION(Bool32x4, 4)
-SIMD128_BOOL_PRINT_FUNCTION(Bool16x8, 8)
-SIMD128_BOOL_PRINT_FUNCTION(Bool8x16, 16)
-#undef SIMD128_BOOL_PRINT_FUNCTION
-
-
void ByteArray::ByteArrayPrint(std::ostream& os) { // NOLINT
os << "byte array, data starts at " << GetDataStartAddress();
}
@@ -365,21 +309,26 @@ bool JSObject::PrintProperties(std::ostream& os) { // NOLINT
namespace {
template <class T>
-double GetScalarElement(T* array, int index) {
- return array->get_scalar(index);
+bool IsTheHoleAt(T* array, int index) {
+ return false;
}
-double GetScalarElement(FixedDoubleArray* array, int index) {
- if (array->is_the_hole(index)) return bit_cast<double>(kHoleNanInt64);
- return array->get_scalar(index);
+template <>
+bool IsTheHoleAt(FixedDoubleArray* array, int index) {
+ return array->is_the_hole(index);
}
-bool is_the_hole(double maybe_hole) {
- return bit_cast<uint64_t>(maybe_hole) == kHoleNanInt64;
+template <class T>
+double GetScalarElement(T* array, int index) {
+ if (IsTheHoleAt(array, index)) {
+ return std::numeric_limits<double>::quiet_NaN();
+ }
+ return array->get_scalar(index);
}
-template <class T, bool print_the_hole>
+template <class T>
void DoPrintElements(std::ostream& os, Object* object) { // NOLINT
+ const bool print_the_hole = std::is_same<T, FixedDoubleArray>::value;
T* array = T::cast(object);
if (array->length() == 0) return;
int previous_index = 0;
@@ -390,7 +339,7 @@ void DoPrintElements(std::ostream& os, Object* object) { // NOLINT
if (i < array->length()) value = GetScalarElement(array, i);
bool values_are_nan = std::isnan(previous_value) && std::isnan(value);
if (i != array->length() && (previous_value == value || values_are_nan) &&
- is_the_hole(previous_value) == is_the_hole(value)) {
+ IsTheHoleAt(array, i - 1) == IsTheHoleAt(array, i)) {
continue;
}
os << "\n";
@@ -400,7 +349,7 @@ void DoPrintElements(std::ostream& os, Object* object) { // NOLINT
ss << '-' << (i - 1);
}
os << std::setw(12) << ss.str() << ": ";
- if (print_the_hole && is_the_hole(previous_value)) {
+ if (print_the_hole && IsTheHoleAt(array, i - 1)) {
os << "<the_hole>";
} else {
os << previous_value;
@@ -412,7 +361,7 @@ void DoPrintElements(std::ostream& os, Object* object) { // NOLINT
void PrintFixedArrayElements(std::ostream& os, FixedArray* array) {
// Print in array notation for non-sparse arrays.
- Object* previous_value = array->get(0);
+ Object* previous_value = array->length() > 0 ? array->get(0) : nullptr;
Object* value = nullptr;
int previous_index = 0;
int i;
@@ -450,14 +399,14 @@ bool JSObject::PrintElements(std::ostream& os) { // NOLINT
}
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: {
- DoPrintElements<FixedDoubleArray, true>(os, elements());
+ DoPrintElements<FixedDoubleArray>(os, elements());
break;
}
-#define PRINT_ELEMENTS(Type, type, TYPE, elementType, size) \
- case TYPE##_ELEMENTS: { \
- DoPrintElements<Fixed##Type##Array, false>(os, elements()); \
- break; \
+#define PRINT_ELEMENTS(Type, type, TYPE, elementType, size) \
+ case TYPE##_ELEMENTS: { \
+ DoPrintElements<Fixed##Type##Array>(os, elements()); \
+ break; \
}
TYPED_ARRAYS(PRINT_ELEMENTS)
#undef PRINT_ELEMENTS
@@ -551,6 +500,7 @@ void JSPromise::JSPromisePrint(std::ostream& os) { // NOLINT
os << "\n - fulfill_reactions = " << Brief(fulfill_reactions());
os << "\n - reject_reactions = " << Brief(reject_reactions());
os << "\n - has_handler = " << has_handler();
+ os << "\n ";
}
void JSRegExp::JSRegExpPrint(std::ostream& os) { // NOLINT
@@ -655,7 +605,7 @@ void FixedDoubleArray::FixedDoubleArrayPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "FixedDoubleArray");
os << "\n - map = " << Brief(map());
os << "\n - length: " << length();
- DoPrintElements<FixedDoubleArray, true>(os, this);
+ DoPrintElements<FixedDoubleArray>(os, this);
os << "\n";
}
@@ -693,7 +643,7 @@ void FeedbackVectorSpecBase<Derived>::FeedbackVectorSpecPrint(
}
for (int slot = 0; slot < slot_count;) {
- FeedbackVectorSlotKind kind = This()->GetKind(slot);
+ FeedbackSlotKind kind = This()->GetKind(FeedbackSlot(slot));
int entry_size = FeedbackMetadata::GetSlotSize(kind);
DCHECK_LT(0, entry_size);
os << "\n Slot #" << slot << " " << kind;
@@ -718,15 +668,10 @@ void FeedbackMetadata::FeedbackMetadataPrint(std::ostream& os) { // NOLINT
os << "\n - slot_count: " << slot_count();
FeedbackMetadataIterator iter(this);
- int parameter_index = 0;
while (iter.HasNext()) {
- FeedbackVectorSlot slot = iter.Next();
- FeedbackVectorSlotKind kind = iter.kind();
+ FeedbackSlot slot = iter.Next();
+ FeedbackSlotKind kind = iter.kind();
os << "\n Slot " << slot << " " << kind;
- if (FeedbackMetadata::SlotRequiresParameter(kind)) {
- int parameter_value = this->GetParameter(parameter_index++);
- os << " [" << parameter_value << "]";
- }
}
os << "\n";
}
@@ -745,70 +690,70 @@ void FeedbackVector::FeedbackVectorPrint(std::ostream& os) { // NOLINT
return;
}
- int parameter_index = 0;
FeedbackMetadataIterator iter(metadata());
while (iter.HasNext()) {
- FeedbackVectorSlot slot = iter.Next();
- FeedbackVectorSlotKind kind = iter.kind();
+ FeedbackSlot slot = iter.Next();
+ FeedbackSlotKind kind = iter.kind();
os << "\n Slot " << slot << " " << kind;
os << " ";
switch (kind) {
- case FeedbackVectorSlotKind::LOAD_IC: {
+ case FeedbackSlotKind::kLoadProperty: {
LoadICNexus nexus(this, slot);
os << Code::ICState2String(nexus.StateFromFeedback());
break;
}
- case FeedbackVectorSlotKind::LOAD_GLOBAL_IC: {
+ case FeedbackSlotKind::kLoadGlobalInsideTypeof:
+ case FeedbackSlotKind::kLoadGlobalNotInsideTypeof: {
LoadGlobalICNexus nexus(this, slot);
os << Code::ICState2String(nexus.StateFromFeedback());
break;
}
- case FeedbackVectorSlotKind::KEYED_LOAD_IC: {
+ case FeedbackSlotKind::kLoadKeyed: {
KeyedLoadICNexus nexus(this, slot);
os << Code::ICState2String(nexus.StateFromFeedback());
break;
}
- case FeedbackVectorSlotKind::CALL_IC: {
+ case FeedbackSlotKind::kCall: {
CallICNexus nexus(this, slot);
os << Code::ICState2String(nexus.StateFromFeedback());
break;
}
- case FeedbackVectorSlotKind::STORE_IC: {
+ case FeedbackSlotKind::kStoreNamedSloppy:
+ case FeedbackSlotKind::kStoreNamedStrict:
+ case FeedbackSlotKind::kStoreOwnNamed: {
StoreICNexus nexus(this, slot);
os << Code::ICState2String(nexus.StateFromFeedback());
break;
}
- case FeedbackVectorSlotKind::KEYED_STORE_IC: {
+ case FeedbackSlotKind::kStoreKeyedSloppy:
+ case FeedbackSlotKind::kStoreKeyedStrict: {
KeyedStoreICNexus nexus(this, slot);
os << Code::ICState2String(nexus.StateFromFeedback());
break;
}
- case FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC: {
+ case FeedbackSlotKind::kBinaryOp: {
BinaryOpICNexus nexus(this, slot);
os << Code::ICState2String(nexus.StateFromFeedback());
break;
}
- case FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC: {
+ case FeedbackSlotKind::kCompareOp: {
CompareICNexus nexus(this, slot);
os << Code::ICState2String(nexus.StateFromFeedback());
break;
}
- case FeedbackVectorSlotKind::STORE_DATA_PROPERTY_IN_LITERAL_IC: {
+ case FeedbackSlotKind::kStoreDataPropertyInLiteral: {
StoreDataPropertyInLiteralICNexus nexus(this, slot);
os << Code::ICState2String(nexus.StateFromFeedback());
break;
}
- case FeedbackVectorSlotKind::CREATE_CLOSURE: {
- // TODO(mvstanton): Integrate this into the iterator.
- int parameter_value = metadata()->GetParameter(parameter_index++);
- os << "[" << parameter_value << "]";
+ case FeedbackSlotKind::kCreateClosure:
+ case FeedbackSlotKind::kLiteral:
+ case FeedbackSlotKind::kGeneral:
break;
- }
- case FeedbackVectorSlotKind::GENERAL:
- break;
- case FeedbackVectorSlotKind::INVALID:
- case FeedbackVectorSlotKind::KINDS_NUMBER:
+ case FeedbackSlotKind::kToBoolean:
+ case FeedbackSlotKind::kInvalid:
+ case FeedbackSlotKind::kKindsNumber:
UNREACHABLE();
break;
}
@@ -847,6 +792,8 @@ void String::StringPrint(std::ostream& os) { // NOLINT
os << "#";
} else if (StringShape(this).IsCons()) {
os << "c\"";
+ } else if (StringShape(this).IsThin()) {
+ os << ">\"";
} else {
os << "\"";
}
@@ -1052,17 +999,47 @@ void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
os << "\n - async";
}
os << "\n - context = " << Brief(context());
- os << "\n - literals = " << Brief(literals());
+ os << "\n - feedback vector cell = " << Brief(feedback_vector_cell());
os << "\n - code = " << Brief(code());
JSObjectPrintBody(os, this);
}
+namespace {
+
+std::ostream& operator<<(std::ostream& os, FunctionKind kind) {
+ os << "[";
+ if (kind == FunctionKind::kNormalFunction) {
+ os << " NormalFunction";
+ } else {
+#define PRINT_FLAG(name) \
+ if (static_cast<int>(kind) & static_cast<int>(FunctionKind::k##name)) { \
+ os << " " << #name; \
+ }
+
+ PRINT_FLAG(ArrowFunction)
+ PRINT_FLAG(GeneratorFunction)
+ PRINT_FLAG(ConciseMethod)
+ PRINT_FLAG(DefaultConstructor)
+ PRINT_FLAG(DerivedConstructor)
+ PRINT_FLAG(BaseConstructor)
+ PRINT_FLAG(GetterFunction)
+ PRINT_FLAG(SetterFunction)
+ PRINT_FLAG(AsyncFunction)
+ PRINT_FLAG(Module)
+#undef PRINT_FLAG
+ }
+ return os << " ]";
+}
+
+} // namespace
void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "SharedFunctionInfo");
os << "\n - name = " << Brief(name());
+ os << "\n - kind = " << kind();
os << "\n - formal_parameter_count = " << internal_formal_parameter_count();
os << "\n - expected_nof_properties = " << expected_nof_properties();
+ os << "\n - language_mode = " << language_mode();
os << "\n - ast_node_count = " << ast_node_count();
os << "\n - instance class name = ";
instance_class_name()->Print(os);
@@ -1089,9 +1066,12 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) { // NOLINT
os << "\n - function token position = " << function_token_position();
os << "\n - start position = " << start_position();
os << "\n - end position = " << end_position();
- os << "\n - debug info = " << Brief(debug_info());
+ if (HasDebugInfo()) {
+ os << "\n - debug info = " << Brief(debug_info());
+ } else {
+ os << "\n - no debug info";
+ }
os << "\n - length = " << length();
- os << "\n - num_literals = " << num_literals();
os << "\n - optimized_code_map = " << Brief(optimized_code_map());
os << "\n - feedback_metadata = ";
feedback_metadata()->FeedbackMetadataPrint(os);
@@ -1218,12 +1198,6 @@ void AccessorInfo::AccessorInfoPrint(std::ostream& os) { // NOLINT
}
-void Box::BoxPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "Box");
- os << "\n - value: " << Brief(value());
- os << "\n";
-}
-
void PromiseResolveThenableJobInfo::PromiseResolveThenableJobInfoPrint(
std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "PromiseResolveThenableJobInfo");
@@ -1231,7 +1205,6 @@ void PromiseResolveThenableJobInfo::PromiseResolveThenableJobInfoPrint(
os << "\n - then: " << Brief(then());
os << "\n - resolve: " << Brief(resolve());
os << "\n - reject: " << Brief(reject());
- os << "\n - debug id: " << debug_id();
os << "\n - context: " << Brief(context());
os << "\n";
}
@@ -1244,7 +1217,6 @@ void PromiseReactionJobInfo::PromiseReactionJobInfoPrint(
os << "\n - deferred_promise: " << Brief(deferred_promise());
os << "\n - deferred_on_resolve: " << Brief(deferred_on_resolve());
os << "\n - deferred_on_reject: " << Brief(deferred_on_reject());
- os << "\n - debug id: " << debug_id();
os << "\n - reaction context: " << Brief(context());
os << "\n";
}
@@ -1263,10 +1235,19 @@ void ModuleInfoEntry::ModuleInfoEntryPrint(std::ostream& os) { // NOLINT
void Module::ModulePrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "Module");
+ // TODO(neis): Simplify once modules have a script field.
+ if (!evaluated()) {
+ SharedFunctionInfo* shared = code()->IsSharedFunctionInfo()
+ ? SharedFunctionInfo::cast(code())
+ : JSFunction::cast(code())->shared();
+ Object* origin = Script::cast(shared->script())->GetNameOrSourceURL();
+ os << "\n - origin: " << Brief(origin);
+ }
os << "\n - code: " << Brief(code());
os << "\n - exports: " << Brief(exports());
os << "\n - requested_modules: " << Brief(requested_modules());
- os << "\n - evaluated: " << evaluated();
+ os << "\n - instantiated, evaluated: " << instantiated() << ", "
+ << evaluated();
os << "\n";
}
@@ -1682,6 +1663,14 @@ extern void _v8_internal_Print_Code(void* object) {
isolate->FindCodeObject(reinterpret_cast<i::Address>(object))->Print();
}
+extern void _v8_internal_Print_FeedbackMetadata(void* object) {
+ if (reinterpret_cast<i::Object*>(object)->IsSmi()) {
+ printf("Not a feedback metadata object\n");
+ } else {
+ reinterpret_cast<i::FeedbackMetadata*>(object)->Print();
+ }
+}
+
extern void _v8_internal_Print_FeedbackVector(void* object) {
if (reinterpret_cast<i::Object*>(object)->IsSmi()) {
printf("Not a feedback vector\n");
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 133f94387c..9f9a628062 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -18,6 +18,7 @@
#include "src/api-arguments-inl.h"
#include "src/api-natives.h"
#include "src/api.h"
+#include "src/arguments.h"
#include "src/base/bits.h"
#include "src/base/utils/random-number-generator.h"
#include "src/bootstrapper.h"
@@ -157,10 +158,6 @@ MaybeHandle<Object> Object::ConvertToNumber(Isolate* isolate,
THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kSymbolToNumber),
Object);
}
- if (input->IsSimd128Value()) {
- THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kSimdToNumber),
- Object);
- }
ASSIGN_RETURN_ON_EXCEPTION(
isolate, input, JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(input),
ToPrimitiveHint::kNumber),
@@ -244,9 +241,6 @@ MaybeHandle<String> Object::ConvertToString(Isolate* isolate,
THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kSymbolToString),
String);
}
- if (input->IsSimd128Value()) {
- return Simd128Value::ToString(Handle<Simd128Value>::cast(input));
- }
ASSIGN_RETURN_ON_EXCEPTION(
isolate, input, JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(input),
ToPrimitiveHint::kString),
@@ -303,8 +297,7 @@ Handle<String> Object::NoSideEffectsToString(Isolate* isolate,
Handle<Object> input) {
DisallowJavascriptExecution no_js(isolate);
- if (input->IsString() || input->IsNumber() || input->IsOddball() ||
- input->IsSimd128Value()) {
+ if (input->IsString() || input->IsNumber() || input->IsOddball()) {
return Object::ToString(isolate, input).ToHandleChecked();
} else if (input->IsFunction()) {
// -- F u n c t i o n
@@ -579,18 +572,6 @@ Maybe<bool> Object::Equals(Handle<Object> x, Handle<Object> y) {
} else {
return Just(false);
}
- } else if (x->IsSimd128Value()) {
- if (y->IsSimd128Value()) {
- return Just(Simd128Value::Equals(Handle<Simd128Value>::cast(x),
- Handle<Simd128Value>::cast(y)));
- } else if (y->IsJSReceiver()) {
- if (!JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(y))
- .ToHandle(&y)) {
- return Nothing<bool>();
- }
- } else {
- return Just(false);
- }
} else if (x->IsJSReceiver()) {
if (y->IsJSReceiver()) {
return Just(x.is_identical_to(y));
@@ -616,9 +597,6 @@ bool Object::StrictEquals(Object* that) {
} else if (this->IsString()) {
if (!that->IsString()) return false;
return String::cast(this)->Equals(String::cast(that));
- } else if (this->IsSimd128Value()) {
- if (!that->IsSimd128Value()) return false;
- return Simd128Value::cast(this)->Equals(Simd128Value::cast(that));
}
return this == that;
}
@@ -634,10 +612,6 @@ Handle<String> Object::TypeOf(Isolate* isolate, Handle<Object> object) {
if (object->IsString()) return isolate->factory()->string_string();
if (object->IsSymbol()) return isolate->factory()->symbol_string();
if (object->IsString()) return isolate->factory()->string_string();
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
- if (object->Is##Type()) return isolate->factory()->type##_string();
- SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
if (object->IsCallable()) return isolate->factory()->function_string();
return isolate->factory()->object_string();
}
@@ -2003,8 +1977,22 @@ Maybe<bool> JSReceiver::HasInPrototypeChain(Isolate* isolate,
namespace {
-MUST_USE_RESULT Maybe<bool> FastAssign(Handle<JSReceiver> target,
- Handle<Object> source, bool use_set) {
+bool HasExcludedProperty(
+ const ScopedVector<Handle<Object>>* excluded_properties,
+ Handle<Object> search_element) {
+ // TODO(gsathya): Change this to be a hashtable.
+ for (int i = 0; i < excluded_properties->length(); i++) {
+ if (search_element->SameValue(*excluded_properties->at(i))) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+MUST_USE_RESULT Maybe<bool> FastAssign(
+ Handle<JSReceiver> target, Handle<Object> source,
+ const ScopedVector<Handle<Object>>* excluded_properties, bool use_set) {
// Non-empty strings are the only non-JSReceivers that need to be handled
// explicitly by Object.assign.
if (!source->IsJSReceiver()) {
@@ -2077,6 +2065,11 @@ MUST_USE_RESULT Maybe<bool> FastAssign(Handle<JSReceiver> target,
if (result.IsNothing()) return result;
if (stable && call_to_js) stable = from->map() == *map;
} else {
+ if (excluded_properties != nullptr &&
+ HasExcludedProperty(excluded_properties, next_key)) {
+ continue;
+ }
+
// 4a ii 2. Perform ? CreateDataProperty(target, nextKey, propValue).
bool success;
LookupIterator it = LookupIterator::PropertyOrElement(
@@ -2090,15 +2083,14 @@ MUST_USE_RESULT Maybe<bool> FastAssign(Handle<JSReceiver> target,
return Just(true);
}
-
} // namespace
// static
-Maybe<bool> JSReceiver::SetOrCopyDataProperties(Isolate* isolate,
- Handle<JSReceiver> target,
- Handle<Object> source,
- bool use_set) {
- Maybe<bool> fast_assign = FastAssign(target, source, use_set);
+Maybe<bool> JSReceiver::SetOrCopyDataProperties(
+ Isolate* isolate, Handle<JSReceiver> target, Handle<Object> source,
+ const ScopedVector<Handle<Object>>* excluded_properties, bool use_set) {
+ Maybe<bool> fast_assign =
+ FastAssign(target, source, excluded_properties, use_set);
if (fast_assign.IsNothing()) return Nothing<bool>();
if (fast_assign.FromJust()) return Just(true);
@@ -2135,6 +2127,11 @@ Maybe<bool> JSReceiver::SetOrCopyDataProperties(Isolate* isolate,
isolate, target, next_key, prop_value, STRICT),
Nothing<bool>());
} else {
+ if (excluded_properties != nullptr &&
+ HasExcludedProperty(excluded_properties, next_key)) {
+ continue;
+ }
+
// 4a ii 2. Perform ! CreateDataProperty(target, nextKey, propValue).
bool success;
LookupIterator it = LookupIterator::PropertyOrElement(
@@ -2157,8 +2154,8 @@ Map* Object::GetPrototypeChainRootMap(Isolate* isolate) {
return native_context->number_function()->initial_map();
}
- // The object is either a number, a string, a symbol, a boolean, a SIMD value,
- // a real JS object, or a Harmony proxy.
+ // The object is either a number, a string, a symbol, a boolean, a real JS
+ // object, or a Harmony proxy.
HeapObject* heap_object = HeapObject::cast(this);
return heap_object->map()->GetPrototypeChainRootMap(isolate);
}
@@ -2184,8 +2181,8 @@ namespace {
// objects. This avoids a double lookup in the cases where we know we will
// add the hash to the JSObject if it does not already exist.
Object* GetSimpleHash(Object* object) {
- // The object is either a Smi, a HeapNumber, a name, an odd-ball,
- // a SIMD value type, a real JS object, or a Harmony proxy.
+ // The object is either a Smi, a HeapNumber, a name, an odd-ball, a real JS
+ // object, or a Harmony proxy.
if (object->IsSmi()) {
uint32_t hash =
ComputeIntegerHash(Smi::cast(object)->value(), kZeroHashSeed);
@@ -2209,10 +2206,6 @@ Object* GetSimpleHash(Object* object) {
uint32_t hash = Oddball::cast(object)->to_string()->Hash();
return Smi::FromInt(hash);
}
- if (object->IsSimd128Value()) {
- uint32_t hash = Simd128Value::cast(object)->Hash();
- return Smi::FromInt(hash & Smi::kMaxValue);
- }
DCHECK(object->IsJSReceiver());
// Simply return the receiver as it is guaranteed to not be a SMI.
return object;
@@ -2259,23 +2252,6 @@ bool Object::SameValue(Object* other) {
if (IsString() && other->IsString()) {
return String::cast(this)->Equals(String::cast(other));
}
- if (IsFloat32x4() && other->IsFloat32x4()) {
- Float32x4* a = Float32x4::cast(this);
- Float32x4* b = Float32x4::cast(other);
- for (int i = 0; i < 4; i++) {
- float x = a->get_lane(i);
- float y = b->get_lane(i);
- // Implements the ES5 SameValue operation for floating point types.
- // http://www.ecma-international.org/ecma-262/6.0/#sec-samevalue
- if (x != y && !(std::isnan(x) && std::isnan(y))) return false;
- if (std::signbit(x) != std::signbit(y)) return false;
- }
- return true;
- } else if (IsSimd128Value() && other->IsSimd128Value()) {
- Simd128Value* a = Simd128Value::cast(this);
- Simd128Value* b = Simd128Value::cast(other);
- return a->map() == b->map() && a->BitwiseEquals(b);
- }
return false;
}
@@ -2295,23 +2271,6 @@ bool Object::SameValueZero(Object* other) {
if (IsString() && other->IsString()) {
return String::cast(this)->Equals(String::cast(other));
}
- if (IsFloat32x4() && other->IsFloat32x4()) {
- Float32x4* a = Float32x4::cast(this);
- Float32x4* b = Float32x4::cast(other);
- for (int i = 0; i < 4; i++) {
- float x = a->get_lane(i);
- float y = b->get_lane(i);
- // Implements the ES6 SameValueZero operation for floating point types.
- // http://www.ecma-international.org/ecma-262/6.0/#sec-samevaluezero
- if (x != y && !(std::isnan(x) && std::isnan(y))) return false;
- // SameValueZero doesn't distinguish between 0 and -0.
- }
- return true;
- } else if (IsSimd128Value() && other->IsSimd128Value()) {
- Simd128Value* a = Simd128Value::cast(this);
- Simd128Value* b = Simd128Value::cast(other);
- return a->map() == b->map() && a->BitwiseEquals(b);
- }
return false;
}
@@ -2463,7 +2422,16 @@ Handle<String> String::SlowFlatten(Handle<ConsString> cons,
DCHECK(cons->second()->length() != 0);
// TurboFan can create cons strings with empty first parts.
- if (cons->first()->length() == 0) return handle(cons->second());
+ while (cons->first()->length() == 0) {
+ // We do not want to call this function recursively. Therefore we call
+ // String::Flatten only in those cases where String::SlowFlatten is not
+ // called again.
+ if (cons->second()->IsConsString() && !cons->second()->IsFlat()) {
+ cons = handle(ConsString::cast(cons->second()));
+ } else {
+ return String::Flatten(handle(cons->second()));
+ }
+ }
DCHECK(AllowHeapAllocation::IsAllowed());
Isolate* isolate = cons->GetIsolate();
@@ -2514,7 +2482,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
Heap* heap = GetHeap();
bool is_one_byte = this->IsOneByteRepresentation();
bool is_internalized = this->IsInternalizedString();
- bool has_pointers = this->IsConsString() || this->IsSlicedString();
+ bool has_pointers = StringShape(this).IsIndirect();
// Morph the string to an external string by replacing the map and
// reinitializing the fields. This won't work if the space the existing
@@ -2586,7 +2554,7 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
if (size < ExternalString::kShortSize) return false;
Heap* heap = GetHeap();
bool is_internalized = this->IsInternalizedString();
- bool has_pointers = this->IsConsString() || this->IsSlicedString();
+ bool has_pointers = StringShape(this).IsIndirect();
// Morph the string to an external string by replacing the map and
// reinitializing the fields. This won't work if the space the existing
@@ -3053,17 +3021,6 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
os << '>';
break;
}
- case SIMD128_VALUE_TYPE: {
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
- if (Is##Type()) { \
- os << "<" #Type ">"; \
- break; \
- }
- SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
- UNREACHABLE();
- break;
- }
case JS_PROXY_TYPE:
os << "<JSProxy>";
break;
@@ -3154,101 +3111,6 @@ void HeapNumber::HeapNumberPrint(std::ostream& os) { // NOLINT
#define READ_BYTE_FIELD(p, offset) \
(*reinterpret_cast<const byte*>(FIELD_ADDR_CONST(p, offset)))
-
-// static
-Handle<String> Simd128Value::ToString(Handle<Simd128Value> input) {
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
- if (input->Is##Type()) return Type::ToString(Handle<Type>::cast(input));
- SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
- UNREACHABLE();
- return Handle<String>::null();
-}
-
-
-// static
-Handle<String> Float32x4::ToString(Handle<Float32x4> input) {
- Isolate* const isolate = input->GetIsolate();
- char arr[100];
- Vector<char> buffer(arr, arraysize(arr));
- std::ostringstream os;
- os << "SIMD.Float32x4("
- << std::string(DoubleToCString(input->get_lane(0), buffer)) << ", "
- << std::string(DoubleToCString(input->get_lane(1), buffer)) << ", "
- << std::string(DoubleToCString(input->get_lane(2), buffer)) << ", "
- << std::string(DoubleToCString(input->get_lane(3), buffer)) << ")";
- return isolate->factory()->NewStringFromAsciiChecked(os.str().c_str());
-}
-
-
-#define SIMD128_BOOL_TO_STRING(Type, lane_count) \
- Handle<String> Type::ToString(Handle<Type> input) { \
- Isolate* const isolate = input->GetIsolate(); \
- std::ostringstream os; \
- os << "SIMD." #Type "("; \
- os << (input->get_lane(0) ? "true" : "false"); \
- for (int i = 1; i < lane_count; i++) { \
- os << ", " << (input->get_lane(i) ? "true" : "false"); \
- } \
- os << ")"; \
- return isolate->factory()->NewStringFromAsciiChecked(os.str().c_str()); \
- }
-SIMD128_BOOL_TO_STRING(Bool32x4, 4)
-SIMD128_BOOL_TO_STRING(Bool16x8, 8)
-SIMD128_BOOL_TO_STRING(Bool8x16, 16)
-#undef SIMD128_BOOL_TO_STRING
-
-
-#define SIMD128_INT_TO_STRING(Type, lane_count) \
- Handle<String> Type::ToString(Handle<Type> input) { \
- Isolate* const isolate = input->GetIsolate(); \
- char arr[100]; \
- Vector<char> buffer(arr, arraysize(arr)); \
- std::ostringstream os; \
- os << "SIMD." #Type "("; \
- os << IntToCString(input->get_lane(0), buffer); \
- for (int i = 1; i < lane_count; i++) { \
- os << ", " << IntToCString(input->get_lane(i), buffer); \
- } \
- os << ")"; \
- return isolate->factory()->NewStringFromAsciiChecked(os.str().c_str()); \
- }
-SIMD128_INT_TO_STRING(Int32x4, 4)
-SIMD128_INT_TO_STRING(Uint32x4, 4)
-SIMD128_INT_TO_STRING(Int16x8, 8)
-SIMD128_INT_TO_STRING(Uint16x8, 8)
-SIMD128_INT_TO_STRING(Int8x16, 16)
-SIMD128_INT_TO_STRING(Uint8x16, 16)
-#undef SIMD128_INT_TO_STRING
-
-
-bool Simd128Value::BitwiseEquals(const Simd128Value* other) const {
- return READ_INT64_FIELD(this, kValueOffset) ==
- READ_INT64_FIELD(other, kValueOffset) &&
- READ_INT64_FIELD(this, kValueOffset + kInt64Size) ==
- READ_INT64_FIELD(other, kValueOffset + kInt64Size);
-}
-
-
-uint32_t Simd128Value::Hash() const {
- uint32_t seed = v8::internal::kZeroHashSeed;
- uint32_t hash;
- hash = ComputeIntegerHash(READ_INT32_FIELD(this, kValueOffset), seed);
- hash = ComputeIntegerHash(
- READ_INT32_FIELD(this, kValueOffset + 1 * kInt32Size), hash * 31);
- hash = ComputeIntegerHash(
- READ_INT32_FIELD(this, kValueOffset + 2 * kInt32Size), hash * 31);
- hash = ComputeIntegerHash(
- READ_INT32_FIELD(this, kValueOffset + 3 * kInt32Size), hash * 31);
- return hash;
-}
-
-
-void Simd128Value::CopyBits(void* destination) const {
- memcpy(destination, &READ_BYTE_FIELD(this, kValueOffset), kSimd128Size);
-}
-
-
String* JSReceiver::class_name() {
if (IsFunction()) {
return GetHeap()->Function_string();
@@ -3308,8 +3170,7 @@ Handle<String> JSReceiver::GetConstructorName(Handle<JSReceiver> receiver) {
: result;
}
-
-Context* JSReceiver::GetCreationContext() {
+Handle<Context> JSReceiver::GetCreationContext() {
JSReceiver* receiver = this;
while (receiver->IsJSBoundFunction()) {
receiver = JSBoundFunction::cast(receiver)->bound_target_function();
@@ -3325,7 +3186,9 @@ Context* JSReceiver::GetCreationContext() {
function = JSFunction::cast(receiver);
}
- return function->context()->native_context();
+ return function->has_context()
+ ? Handle<Context>(function->context()->native_context())
+ : Handle<Context>::null();
}
Handle<Object> Map::WrapFieldType(Handle<FieldType> type) {
@@ -3345,6 +3208,7 @@ FieldType* Map::UnwrapFieldType(Object* wrapped_type) {
MaybeHandle<Map> Map::CopyWithField(Handle<Map> map, Handle<Name> name,
Handle<FieldType> type,
PropertyAttributes attributes,
+ PropertyConstness constness,
Representation representation,
TransitionFlag flag) {
DCHECK(DescriptorArray::kNotFound ==
@@ -3368,8 +3232,9 @@ MaybeHandle<Map> Map::CopyWithField(Handle<Map> map, Handle<Name> name,
Handle<Object> wrapped_type(WrapFieldType(type));
- Descriptor d = Descriptor::DataField(name, index, wrapped_type, attributes,
- representation);
+ DCHECK_IMPLIES(!FLAG_track_constant_fields, constness == kMutable);
+ Descriptor d = Descriptor::DataField(name, index, attributes, constness,
+ representation, wrapped_type);
Handle<Map> new_map = Map::CopyAddDescriptor(map, &d, flag);
int unused_property_fields = new_map->unused_property_fields() - 1;
if (unused_property_fields < 0) {
@@ -3390,9 +3255,18 @@ MaybeHandle<Map> Map::CopyWithConstant(Handle<Map> map,
return MaybeHandle<Map>();
}
- // Allocate new instance descriptors with (name, constant) added.
- Descriptor d = Descriptor::DataConstant(name, constant, attributes);
- return Map::CopyAddDescriptor(map, &d, flag);
+ if (FLAG_track_constant_fields) {
+ Isolate* isolate = map->GetIsolate();
+ Representation representation = constant->OptimalRepresentation();
+ Handle<FieldType> type = constant->OptimalType(isolate, representation);
+ return CopyWithField(map, name, type, attributes, kConst, representation,
+ flag);
+ } else {
+ // Allocate new instance descriptors with (name, constant) added.
+ Descriptor d = Descriptor::DataConstant(name, 0, constant, attributes);
+ Handle<Map> new_map = Map::CopyAddDescriptor(map, &d, flag);
+ return new_map;
+ }
}
const char* Representation::Mnemonic() const {
@@ -3410,6 +3284,34 @@ const char* Representation::Mnemonic() const {
}
}
+bool Map::TransitionRemovesTaggedField(Map* target) {
+ int inobject = GetInObjectProperties();
+ int target_inobject = target->GetInObjectProperties();
+ for (int i = target_inobject; i < inobject; i++) {
+ FieldIndex index = FieldIndex::ForPropertyIndex(this, i);
+ if (!IsUnboxedDoubleField(index)) return true;
+ }
+ return false;
+}
+
+bool Map::TransitionChangesTaggedFieldToUntaggedField(Map* target) {
+ int inobject = GetInObjectProperties();
+ int target_inobject = target->GetInObjectProperties();
+ int limit = Min(inobject, target_inobject);
+ for (int i = 0; i < limit; i++) {
+ FieldIndex index = FieldIndex::ForPropertyIndex(target, i);
+ if (!IsUnboxedDoubleField(index) && target->IsUnboxedDoubleField(index)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool Map::TransitionRequiresSynchronizationWithGC(Map* target) {
+ return TransitionRemovesTaggedField(target) ||
+ TransitionChangesTaggedFieldToUntaggedField(target);
+}
+
bool Map::InstancesNeedRewriting(Map* target) {
int target_number_of_fields = target->NumberOfFields();
int target_inobject = target->GetInObjectProperties();
@@ -3618,9 +3520,9 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
DCHECK_EQ(kField, old_details.location());
FieldIndex index = FieldIndex::ForDescriptor(*old_map, i);
if (object->IsUnboxedDoubleField(index)) {
- double old = object->RawFastDoublePropertyAt(index);
- value = isolate->factory()->NewHeapNumber(
- old, representation.IsDouble() ? MUTABLE : IMMUTABLE);
+ uint64_t old_bits = object->RawFastDoublePropertyAsBitsAt(index);
+ value = isolate->factory()->NewHeapNumberFromBits(
+ old_bits, representation.IsDouble() ? MUTABLE : IMMUTABLE);
} else {
value = handle(object->RawFastPropertyAt(index), isolate);
@@ -3660,6 +3562,8 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
Heap* heap = isolate->heap();
+ heap->NotifyObjectLayoutChange(*object, no_allocation);
+
// Copy (real) inobject properties. If necessary, stop at number_of_fields to
// avoid overwriting |one_pointer_filler_map|.
int limit = Min(inobject, number_of_fields);
@@ -3670,8 +3574,9 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
// yet.
if (new_map->IsUnboxedDoubleField(index)) {
DCHECK(value->IsMutableHeapNumber());
- object->RawFastDoublePropertyAtPut(index,
- HeapNumber::cast(value)->value());
+ // Ensure that all bits of the double value are preserved.
+ object->RawFastDoublePropertyAsBitsAtPut(
+ index, HeapNumber::cast(value)->value_as_bits());
if (i < old_number_of_fields && !old_map->IsUnboxedDoubleField(index)) {
// Transition from tagged to untagged slot.
heap->ClearRecordedSlot(*object,
@@ -3773,13 +3678,15 @@ void MigrateFastToSlow(Handle<JSObject> object, Handle<Map> new_map,
// From here on we cannot fail and we shouldn't GC anymore.
DisallowHeapAllocation no_allocation;
+ Heap* heap = isolate->heap();
+ heap->NotifyObjectLayoutChange(*object, no_allocation);
+
// Resize the object in the heap if necessary.
int new_instance_size = new_map->instance_size();
int instance_size_delta = map->instance_size() - new_instance_size;
DCHECK(instance_size_delta >= 0);
if (instance_size_delta > 0) {
- Heap* heap = isolate->heap();
heap->CreateFillerObjectAt(object->address() + new_instance_size,
instance_size_delta, ClearRecordedSlots::kYes);
heap->AdjustLiveBytes(*object, -instance_size_delta);
@@ -3899,6 +3806,7 @@ void DescriptorArray::GeneralizeAllFields() {
details = details.CopyWithRepresentation(Representation::Tagged());
if (details.location() == kField) {
DCHECK_EQ(kData, details.kind());
+ details = details.CopyWithConstness(kMutable);
SetValue(i, FieldType::Any());
}
set(ToDetailsIndex(i), details.AsSmi());
@@ -3926,7 +3834,8 @@ Handle<Map> Map::CopyGeneralizeAllFields(Handle<Map> map,
// Unless the instance is being migrated, ensure that modify_index is a field.
if (modify_index >= 0) {
PropertyDetails details = descriptors->GetDetails(modify_index);
- if (details.location() != kField || details.attributes() != attributes) {
+ if (details.constness() != kMutable || details.location() != kField ||
+ details.attributes() != attributes) {
int field_index = details.location() == kField
? details.field_index()
: new_map->NumberOfFields();
@@ -4035,8 +3944,8 @@ Map* Map::FindFieldOwner(int descriptor) {
return result;
}
-
void Map::UpdateFieldType(int descriptor, Handle<Name> name,
+ PropertyConstness new_constness,
Representation new_representation,
Handle<Object> new_wrapped_type) {
DCHECK(new_wrapped_type->IsSmi() || new_wrapped_type->IsWeakCell());
@@ -4063,15 +3972,19 @@ void Map::UpdateFieldType(int descriptor, Handle<Name> name,
DescriptorArray* descriptors = current->instance_descriptors();
PropertyDetails details = descriptors->GetDetails(descriptor);
+ // Currently constness change implies map change.
+ DCHECK_EQ(new_constness, details.constness());
+
// It is allowed to change representation here only from None to something.
DCHECK(details.representation().Equals(new_representation) ||
details.representation().IsNone());
// Skip if already updated the shared descriptor.
if (descriptors->GetValue(descriptor) != *new_wrapped_type) {
+ DCHECK_IMPLIES(!FLAG_track_constant_fields, new_constness == kMutable);
Descriptor d = Descriptor::DataField(
- name, descriptors->GetFieldIndex(descriptor), new_wrapped_type,
- details.attributes(), new_representation);
+ name, descriptors->GetFieldIndex(descriptor), details.attributes(),
+ new_constness, new_representation, new_wrapped_type);
descriptors->Replace(descriptor, &d);
}
}
@@ -4102,18 +4015,21 @@ Handle<FieldType> Map::GeneralizeFieldType(Representation rep1,
// static
void Map::GeneralizeField(Handle<Map> map, int modify_index,
+ PropertyConstness new_constness,
Representation new_representation,
Handle<FieldType> new_field_type) {
Isolate* isolate = map->GetIsolate();
// Check if we actually need to generalize the field type at all.
Handle<DescriptorArray> old_descriptors(map->instance_descriptors(), isolate);
- Representation old_representation =
- old_descriptors->GetDetails(modify_index).representation();
+ PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
+ PropertyConstness old_constness = old_details.constness();
+ Representation old_representation = old_details.representation();
Handle<FieldType> old_field_type(old_descriptors->GetFieldType(modify_index),
isolate);
- if (old_representation.Equals(new_representation) &&
+ if (old_constness == new_constness &&
+ old_representation.Equals(new_representation) &&
!FieldTypeIsCleared(new_representation, *new_field_type) &&
// Checking old_field_type for being cleared is not necessary because
// the NowIs check below would fail anyway in that case.
@@ -4138,8 +4054,8 @@ void Map::GeneralizeField(Handle<Map> map, int modify_index,
Handle<Name> name(descriptors->GetKey(modify_index));
Handle<Object> wrapped_type(WrapFieldType(new_field_type));
- field_owner->UpdateFieldType(modify_index, name, new_representation,
- wrapped_type);
+ field_owner->UpdateFieldType(modify_index, name, new_constness,
+ new_representation, wrapped_type);
field_owner->dependent_code()->DeoptimizeDependentCodeGroup(
isolate, DependentCode::kFieldOwnerGroup);
@@ -4161,7 +4077,7 @@ Handle<Map> Map::ReconfigureProperty(Handle<Map> map, int modify_index,
Handle<FieldType> new_field_type) {
DCHECK_EQ(kData, new_kind); // Only kData case is supported.
MapUpdater mu(map->GetIsolate(), map);
- return mu.ReconfigureToDataField(modify_index, new_attributes,
+ return mu.ReconfigureToDataField(modify_index, new_attributes, kConst,
new_representation, new_field_type);
}
@@ -4184,7 +4100,7 @@ Handle<Map> Map::GeneralizeAllFields(Handle<Map> map) {
if (details.location() == kField) {
DCHECK_EQ(kData, details.kind());
MapUpdater mu(isolate, map);
- map = mu.ReconfigureToDataField(i, details.attributes(),
+ map = mu.ReconfigureToDataField(i, details.attributes(), kMutable,
Representation::Tagged(), any_type);
}
}
@@ -4238,6 +4154,10 @@ Map* Map::TryReplayPropertyTransitions(Map* old_map) {
PropertyDetails new_details = new_descriptors->GetDetails(i);
DCHECK_EQ(old_details.kind(), new_details.kind());
DCHECK_EQ(old_details.attributes(), new_details.attributes());
+ if (!IsGeneralizableTo(old_details.constness(), new_details.constness())) {
+ return nullptr;
+ }
+ DCHECK(IsGeneralizableTo(old_details.location(), new_details.location()));
if (!old_details.representation().fits_into(new_details.representation())) {
return nullptr;
}
@@ -4258,6 +4178,7 @@ Map* Map::TryReplayPropertyTransitions(Map* old_map) {
}
} else {
DCHECK_EQ(kDescriptor, old_details.location());
+ DCHECK(!FLAG_track_constant_fields);
Object* old_value = old_descriptors->GetValue(i);
if (!new_type->NowContains(old_value)) {
return nullptr;
@@ -4580,7 +4501,7 @@ Maybe<bool> Object::SetDataProperty(LookupIterator* it, Handle<Object> value) {
it->PrepareForDataProperty(to_assign);
// Write the property value.
- it->WriteDataValue(to_assign);
+ it->WriteDataValue(to_assign, false);
#if VERIFY_HEAP
if (FLAG_verify_heap) {
@@ -4654,7 +4575,7 @@ Maybe<bool> Object::AddDataProperty(LookupIterator* it, Handle<Object> value,
it->ApplyTransitionToDataProperty(receiver);
// Write the property value.
- it->WriteDataValue(value);
+ it->WriteDataValue(value, true);
#if VERIFY_HEAP
if (FLAG_verify_heap) {
@@ -5327,7 +5248,7 @@ Handle<Context> JSFunction::GetFunctionRealm(Handle<JSFunction> function) {
MaybeHandle<Context> JSObject::GetFunctionRealm(Handle<JSObject> object) {
DCHECK(object->map()->is_constructor());
DCHECK(!object->IsJSFunction());
- return handle(object->GetCreationContext());
+ return object->GetCreationContext();
}
@@ -5688,9 +5609,13 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
PropertyKind kind = dictionary->DetailsAt(index).kind();
if (kind == kData) {
- Object* value = dictionary->ValueAt(index);
- if (!value->IsJSFunction()) {
+ if (FLAG_track_constant_fields) {
number_of_fields += 1;
+ } else {
+ Object* value = dictionary->ValueAt(index);
+ if (!value->IsJSFunction()) {
+ number_of_fields += 1;
+ }
}
}
}
@@ -5755,18 +5680,20 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
Object* value = dictionary->ValueAt(index);
PropertyDetails details = dictionary->DetailsAt(index);
+ DCHECK_EQ(kField, details.location());
+ DCHECK_EQ(kMutable, details.constness());
int enumeration_index = details.dictionary_index();
Descriptor d;
if (details.kind() == kData) {
- if (value->IsJSFunction()) {
+ if (!FLAG_track_constant_fields && value->IsJSFunction()) {
d = Descriptor::DataConstant(key, handle(value, isolate),
details.attributes());
} else {
d = Descriptor::DataField(
- key, current_offset, details.attributes(),
+ key, current_offset, details.attributes(), kDefaultFieldConstness,
// TODO(verwaest): value->OptimalRepresentation();
- Representation::Tagged());
+ Representation::Tagged(), FieldType::Any(isolate));
}
} else {
DCHECK_EQ(kAccessor, details.kind());
@@ -6648,7 +6575,6 @@ bool PropertyKeyToArrayLength(Handle<Object> value, uint32_t* length) {
return false;
}
-
bool PropertyKeyToArrayIndex(Handle<Object> index_obj, uint32_t* output) {
return PropertyKeyToArrayLength(index_obj, output) && *output != kMaxUInt32;
}
@@ -6978,7 +6904,7 @@ Maybe<bool> JSProxy::SetPrivateProperty(Isolate* isolate, Handle<JSProxy> proxy,
if (it.IsFound()) {
DCHECK_EQ(LookupIterator::DATA, it.state());
DCHECK_EQ(DONT_ENUM, it.property_attributes());
- it.WriteDataValue(value);
+ it.WriteDataValue(value, false);
return Just(true);
}
@@ -7928,8 +7854,9 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
FieldIndex index = FieldIndex::ForDescriptor(copy->map(), i);
if (object->IsUnboxedDoubleField(index)) {
if (copying) {
- double value = object->RawFastDoublePropertyAt(index);
- copy->RawFastDoublePropertyAtPut(index, value);
+ // Ensure that all bits of the double value are preserved.
+ uint64_t value = object->RawFastDoublePropertyAsBitsAt(index);
+ copy->RawFastDoublePropertyAsBitsAtPut(index, value);
}
} else {
Handle<Object> value(object->RawFastPropertyAt(index), isolate);
@@ -9137,21 +9064,24 @@ Handle<Map> Map::CopyForPreventExtensions(Handle<Map> map,
namespace {
-bool CanHoldValue(DescriptorArray* descriptors, int descriptor, Object* value) {
+bool CanHoldValue(DescriptorArray* descriptors, int descriptor,
+ PropertyConstness constness, Object* value) {
PropertyDetails details = descriptors->GetDetails(descriptor);
if (details.location() == kField) {
if (details.kind() == kData) {
- return value->FitsRepresentation(details.representation()) &&
+ return IsGeneralizableTo(constness, details.constness()) &&
+ value->FitsRepresentation(details.representation()) &&
descriptors->GetFieldType(descriptor)->NowContains(value);
} else {
DCHECK_EQ(kAccessor, details.kind());
- UNREACHABLE();
return false;
}
} else {
DCHECK_EQ(kDescriptor, details.location());
+ DCHECK_EQ(kConst, details.constness());
if (details.kind() == kData) {
+ DCHECK(!FLAG_track_constant_fields);
DCHECK(descriptors->GetValue(descriptor) != value ||
value->FitsRepresentation(details.representation()));
return descriptors->GetValue(descriptor) == value;
@@ -9165,8 +9095,12 @@ bool CanHoldValue(DescriptorArray* descriptors, int descriptor, Object* value) {
}
Handle<Map> UpdateDescriptorForValue(Handle<Map> map, int descriptor,
+ PropertyConstness constness,
Handle<Object> value) {
- if (CanHoldValue(map->instance_descriptors(), descriptor, *value)) return map;
+ if (CanHoldValue(map->instance_descriptors(), descriptor, constness,
+ *value)) {
+ return map;
+ }
Isolate* isolate = map->GetIsolate();
PropertyAttributes attributes =
@@ -9175,25 +9109,26 @@ Handle<Map> UpdateDescriptorForValue(Handle<Map> map, int descriptor,
Handle<FieldType> type = value->OptimalType(isolate, representation);
MapUpdater mu(isolate, map);
- return mu.ReconfigureToDataField(descriptor, attributes, representation,
- type);
+ return mu.ReconfigureToDataField(descriptor, attributes, constness,
+ representation, type);
}
} // namespace
// static
Handle<Map> Map::PrepareForDataProperty(Handle<Map> map, int descriptor,
+ PropertyConstness constness,
Handle<Object> value) {
// Dictionaries can store any property value.
DCHECK(!map->is_dictionary_map());
// Update to the newest map before storing the property.
- return UpdateDescriptorForValue(Update(map), descriptor, value);
+ return UpdateDescriptorForValue(Update(map), descriptor, constness, value);
}
-
Handle<Map> Map::TransitionToDataProperty(Handle<Map> map, Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
+ PropertyConstness constness,
StoreFromKeyed store_mode) {
RuntimeCallTimerScope stats_scope(
*map, map->is_prototype_map()
@@ -9216,19 +9151,19 @@ Handle<Map> Map::TransitionToDataProperty(Handle<Map> map, Handle<Name> name,
->GetDetails(descriptor)
.attributes());
- return UpdateDescriptorForValue(transition, descriptor, value);
+ return UpdateDescriptorForValue(transition, descriptor, constness, value);
}
TransitionFlag flag = INSERT_TRANSITION;
MaybeHandle<Map> maybe_map;
- if (value->IsJSFunction()) {
+ if (!FLAG_track_constant_fields && value->IsJSFunction()) {
maybe_map = Map::CopyWithConstant(map, name, value, attributes, flag);
} else if (!map->TooManyFastProperties(store_mode)) {
Isolate* isolate = name->GetIsolate();
Representation representation = value->OptimalRepresentation();
Handle<FieldType> type = value->OptimalType(isolate, representation);
- maybe_map =
- Map::CopyWithField(map, name, type, attributes, representation, flag);
+ maybe_map = Map::CopyWithField(map, name, type, attributes, constness,
+ representation, flag);
}
Handle<Map> result;
@@ -9273,7 +9208,8 @@ Handle<Map> Map::ReconfigureExistingProperty(Handle<Map> map, int descriptor,
MapUpdater mu(isolate, map);
DCHECK_EQ(kData, kind); // Only kData case is supported so far.
Handle<Map> new_map = mu.ReconfigureToDataField(
- descriptor, attributes, Representation::None(), FieldType::None(isolate));
+ descriptor, attributes, kDefaultFieldConstness, Representation::None(),
+ FieldType::None(isolate));
return new_map;
}
@@ -9810,7 +9746,6 @@ void FixedArray::CopyTo(int pos, FixedArray* dest, int dest_pos, int len) {
}
}
-
#ifdef DEBUG
bool FixedArray::IsEqualTo(FixedArray* other) {
if (length() != other->length()) return false;
@@ -9991,6 +9926,12 @@ Handle<ArrayList> ArrayList::Add(Handle<ArrayList> array, Handle<Object> obj1,
return array;
}
+Handle<ArrayList> ArrayList::New(Isolate* isolate, int size) {
+ Handle<ArrayList> result = Handle<ArrayList>::cast(
+ isolate->factory()->NewFixedArray(size + kFirstIndex));
+ result->SetLength(0);
+ return result;
+}
bool ArrayList::IsFull() {
int capacity = length();
@@ -10237,28 +10178,6 @@ SharedFunctionInfo* DeoptimizationInputData::GetInlinedFunction(int index) {
}
}
-const int LiteralsArray::kFeedbackVectorOffset =
- LiteralsArray::OffsetOfElementAt(LiteralsArray::kVectorIndex);
-
-const int LiteralsArray::kOffsetToFirstLiteral =
- LiteralsArray::OffsetOfElementAt(LiteralsArray::kFirstLiteralIndex);
-
-// static
-Handle<LiteralsArray> LiteralsArray::New(Isolate* isolate,
- Handle<FeedbackVector> vector,
- int number_of_literals,
- PretenureFlag pretenure) {
- if (vector->is_empty() && number_of_literals == 0) {
- return Handle<LiteralsArray>::cast(
- isolate->factory()->empty_literals_array());
- }
- Handle<FixedArray> literals = isolate->factory()->NewFixedArray(
- number_of_literals + kFirstLiteralIndex, pretenure);
- Handle<LiteralsArray> casted_literals = Handle<LiteralsArray>::cast(literals);
- casted_literals->set_feedback_vector(*vector);
- return casted_literals;
-}
-
int HandlerTable::LookupRange(int pc_offset, int* data_out,
CatchPrediction* prediction_out) {
int innermost_handler = -1;
@@ -10345,10 +10264,7 @@ Handle<String> String::Trim(Handle<String> string, TrimMode mode) {
return isolate->factory()->NewSubString(string, left, right);
}
-bool String::LooksValid() {
- if (!GetIsolate()->heap()->Contains(this)) return false;
- return true;
-}
+bool String::LooksValid() { return GetIsolate()->heap()->Contains(this); }
// static
MaybeHandle<String> Name::ToFunctionName(Handle<Name> name) {
@@ -10482,8 +10398,7 @@ String::FlatContent String::GetFlatContent() {
}
string = cons->first();
shape = StringShape(string);
- }
- if (shape.representation_tag() == kSlicedStringTag) {
+ } else if (shape.representation_tag() == kSlicedStringTag) {
SlicedString* slice = SlicedString::cast(string);
offset = slice->offset();
string = slice->parent();
@@ -10491,6 +10406,13 @@ String::FlatContent String::GetFlatContent() {
DCHECK(shape.representation_tag() != kConsStringTag &&
shape.representation_tag() != kSlicedStringTag);
}
+ if (shape.representation_tag() == kThinStringTag) {
+ ThinString* thin = ThinString::cast(string);
+ string = thin->actual();
+ shape = StringShape(string);
+ DCHECK(!shape.IsCons());
+ DCHECK(!shape.IsSliced());
+ }
if (shape.encoding_tag() == kOneByteStringTag) {
const uint8_t* start;
if (shape.representation_tag() == kSeqStringTag) {
@@ -10576,6 +10498,7 @@ const uc16* String::GetTwoByteData(unsigned start) {
return slice->parent()->GetTwoByteData(start + slice->offset());
}
case kConsStringTag:
+ case kThinStringTag:
UNREACHABLE();
return NULL;
}
@@ -10842,6 +10765,7 @@ uint16_t ConsString::ConsStringGet(int index) {
return 0;
}
+uint16_t ThinString::ThinStringGet(int index) { return actual()->Get(index); }
uint16_t SlicedString::SlicedStringGet(int index) {
return parent()->Get(offset() + index);
@@ -10936,6 +10860,10 @@ void String::WriteToFlat(String* src,
WriteToFlat(slice->parent(), sink, from + offset, to + offset);
return;
}
+ case kOneByteStringTag | kThinStringTag:
+ case kTwoByteStringTag | kThinStringTag:
+ source = ThinString::cast(source)->actual();
+ break;
}
}
}
@@ -11157,6 +11085,17 @@ bool String::SlowEquals(String* other) {
if (len != other->length()) return false;
if (len == 0) return true;
+ // Fast check: if at least one ThinString is involved, dereference it/them
+ // and restart.
+ if (this->IsThinString() || other->IsThinString()) {
+ if (other->IsThinString()) other = ThinString::cast(other)->actual();
+ if (this->IsThinString()) {
+ return ThinString::cast(this)->actual()->Equals(other);
+ } else {
+ return this->Equals(other);
+ }
+ }
+
// Fast check: if hash code is computed for both strings
// a fast negative check can be performed.
if (HasHashCode() && other->HasHashCode()) {
@@ -11198,6 +11137,14 @@ bool String::SlowEquals(Handle<String> one, Handle<String> two) {
if (one_length != two->length()) return false;
if (one_length == 0) return true;
+ // Fast check: if at least one ThinString is involved, dereference it/them
+ // and restart.
+ if (one->IsThinString() || two->IsThinString()) {
+ if (one->IsThinString()) one = handle(ThinString::cast(*one)->actual());
+ if (two->IsThinString()) two = handle(ThinString::cast(*two)->actual());
+ return String::Equals(one, two);
+ }
+
// Fast check: if hash code is computed for both strings
// a fast negative check can be performed.
if (one->HasHashCode() && two->HasHashCode()) {
@@ -11676,6 +11623,9 @@ bool String::SlowAsArrayIndex(uint32_t* index) {
Handle<String> SeqString::Truncate(Handle<SeqString> string, int new_length) {
+ Heap* heap = string->GetHeap();
+ if (new_length == 0) return heap->isolate()->factory()->empty_string();
+
int new_size, old_size;
int old_length = string->length();
if (old_length <= new_length) return string;
@@ -11695,7 +11645,6 @@ Handle<String> SeqString::Truncate(Handle<SeqString> string, int new_length) {
DCHECK_OBJECT_ALIGNED(start_of_string);
DCHECK_OBJECT_ALIGNED(start_of_string + new_size);
- Heap* heap = string->GetHeap();
// Sizes are pointer size aligned, so that we can use filler objects
// that are a multiple of pointer size.
heap->CreateFillerObjectAt(start_of_string + new_size, delta,
@@ -11706,7 +11655,6 @@ Handle<String> SeqString::Truncate(Handle<SeqString> string, int new_length) {
// for the left-over space to avoid races with the sweeper thread.
string->synchronized_set_length(new_length);
- if (new_length == 0) return heap->isolate()->factory()->empty_string();
return string;
}
@@ -11955,47 +11903,19 @@ void JSFunction::AttemptConcurrentOptimization() {
}
// static
-Handle<LiteralsArray> SharedFunctionInfo::FindOrCreateLiterals(
- Handle<SharedFunctionInfo> shared, Handle<Context> native_context) {
- Isolate* isolate = shared->GetIsolate();
- CodeAndLiterals result =
- shared->SearchOptimizedCodeMap(*native_context, BailoutId::None());
- if (result.literals != nullptr) {
- DCHECK(shared->feedback_metadata()->is_empty() ||
- !result.literals->feedback_vector()->is_empty());
- return handle(result.literals, isolate);
- }
-
- Handle<FeedbackVector> feedback_vector =
- FeedbackVector::New(isolate, handle(shared->feedback_metadata()));
- Handle<LiteralsArray> literals =
- LiteralsArray::New(isolate, feedback_vector, shared->num_literals());
- Handle<Code> code;
- if (result.code != nullptr) {
- code = Handle<Code>(result.code, isolate);
- }
- AddToOptimizedCodeMap(shared, native_context, code, literals,
- BailoutId::None());
- return literals;
-}
-
-// static
void SharedFunctionInfo::AddToOptimizedCodeMap(
Handle<SharedFunctionInfo> shared, Handle<Context> native_context,
- MaybeHandle<Code> code, Handle<LiteralsArray> literals,
- BailoutId osr_ast_id) {
+ Handle<Code> code, BailoutId osr_ast_id) {
Isolate* isolate = shared->GetIsolate();
if (isolate->serializer_enabled()) return;
- DCHECK(code.is_null() ||
- code.ToHandleChecked()->kind() == Code::OPTIMIZED_FUNCTION);
+ DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
DCHECK(native_context->IsNativeContext());
- STATIC_ASSERT(kEntryLength == 3);
+ STATIC_ASSERT(kEntryLength == 2);
Handle<FixedArray> new_code_map;
int entry;
if (!osr_ast_id.IsNone()) {
- Context::AddToOptimizedCodeMap(
- native_context, shared, code.ToHandleChecked(), literals, osr_ast_id);
+ Context::AddToOptimizedCodeMap(native_context, shared, code, osr_ast_id);
return;
}
@@ -12007,15 +11927,9 @@ void SharedFunctionInfo::AddToOptimizedCodeMap(
Handle<FixedArray> old_code_map(shared->optimized_code_map(), isolate);
entry = shared->SearchOptimizedCodeMapEntry(*native_context);
if (entry >= kEntriesStart) {
- // Just set the code and literals of the entry.
- if (!code.is_null()) {
- Handle<WeakCell> code_cell =
- isolate->factory()->NewWeakCell(code.ToHandleChecked());
- old_code_map->set(entry + kCachedCodeOffset, *code_cell);
- }
- Handle<WeakCell> literals_cell =
- isolate->factory()->NewWeakCell(literals);
- old_code_map->set(entry + kLiteralsOffset, *literals_cell);
+ // Just set the code of the entry.
+ Handle<WeakCell> code_cell = isolate->factory()->NewWeakCell(code);
+ old_code_map->set(entry + kCachedCodeOffset, *code_cell);
return;
}
@@ -12043,15 +11957,11 @@ void SharedFunctionInfo::AddToOptimizedCodeMap(
}
}
- Handle<WeakCell> code_cell =
- code.is_null() ? isolate->factory()->empty_weak_cell()
- : isolate->factory()->NewWeakCell(code.ToHandleChecked());
- Handle<WeakCell> literals_cell = isolate->factory()->NewWeakCell(literals);
+ Handle<WeakCell> code_cell = isolate->factory()->NewWeakCell(code);
WeakCell* context_cell = native_context->self_weak_cell();
new_code_map->set(entry + kContextOffset, context_cell);
new_code_map->set(entry + kCachedCodeOffset, *code_cell);
- new_code_map->set(entry + kLiteralsOffset, *literals_cell);
#ifdef DEBUG
for (int i = kEntriesStart; i < new_code_map->length(); i += kEntryLength) {
@@ -12061,8 +11971,6 @@ void SharedFunctionInfo::AddToOptimizedCodeMap(
DCHECK(cell->cleared() ||
(cell->value()->IsCode() &&
Code::cast(cell->value())->kind() == Code::OPTIMIZED_FUNCTION));
- cell = WeakCell::cast(new_code_map->get(i + kLiteralsOffset));
- DCHECK(cell->cleared() || cell->value()->IsFixedArray());
}
#endif
@@ -12100,7 +12008,7 @@ void SharedFunctionInfo::EvictFromOptimizedCodeMap(Code* optimized_code,
ShortPrint();
PrintF("]\n");
}
- // Just clear the code in order to continue sharing literals.
+ // Just clear the code.
code_map->set(src + kCachedCodeOffset, heap->empty_weak_cell(),
SKIP_WRITE_BARRIER);
}
@@ -12116,12 +12024,28 @@ void SharedFunctionInfo::EvictFromOptimizedCodeMap(Code* optimized_code,
// static
void JSFunction::EnsureLiterals(Handle<JSFunction> function) {
Handle<SharedFunctionInfo> shared(function->shared());
- Handle<Context> native_context(function->context()->native_context());
- if (function->literals() ==
- function->GetIsolate()->heap()->empty_literals_array()) {
- Handle<LiteralsArray> literals =
- SharedFunctionInfo::FindOrCreateLiterals(shared, native_context);
- function->set_literals(*literals);
+ Isolate* isolate = shared->GetIsolate();
+
+ FeedbackVectorState state = function->GetFeedbackVectorState(isolate);
+ switch (state) {
+ case TOP_LEVEL_SCRIPT_NEEDS_VECTOR: {
+ // A top level script didn't get it's literals installed.
+ Handle<FeedbackVector> feedback_vector =
+ FeedbackVector::New(isolate, shared);
+ Handle<Cell> new_cell =
+ isolate->factory()->NewOneClosureCell(feedback_vector);
+ function->set_feedback_vector_cell(*new_cell);
+ break;
+ }
+ case NEEDS_VECTOR: {
+ Handle<FeedbackVector> feedback_vector =
+ FeedbackVector::New(isolate, shared);
+ function->feedback_vector_cell()->set_value(*feedback_vector);
+ break;
+ }
+ case HAS_VECTOR:
+ // Nothing to do.
+ break;
}
}
@@ -12640,6 +12564,7 @@ bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
case JS_API_OBJECT_TYPE:
case JS_ARRAY_BUFFER_TYPE:
case JS_ARRAY_TYPE:
+ case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_DATA_VIEW_TYPE:
case JS_DATE_TYPE:
@@ -12681,7 +12606,6 @@ bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
case ODDBALL_TYPE:
case PROPERTY_CELL_TYPE:
case SHARED_FUNCTION_INFO_TYPE:
- case SIMD128_VALUE_TYPE:
case SYMBOL_TYPE:
case WEAK_CELL_TYPE:
@@ -12946,6 +12870,10 @@ Handle<String> JSFunction::ToString(Handle<JSFunction> function) {
return NativeCodeFunctionSourceString(shared_info);
}
+ if (FLAG_harmony_function_tostring) {
+ return Handle<String>::cast(shared_info->GetSourceCodeHarmony());
+ }
+
IncrementalStringBuilder builder(isolate);
FunctionKind kind = shared_info->kind();
if (!IsArrowFunction(kind)) {
@@ -12997,7 +12925,7 @@ void Script::SetEvalOrigin(Handle<Script> script,
// position, but store it as negative value for lazy translation.
StackTraceFrameIterator it(script->GetIsolate());
if (!it.done() && it.is_javascript()) {
- FrameSummary summary = FrameSummary::GetFirst(it.javascript_frame());
+ FrameSummary summary = FrameSummary::GetTop(it.javascript_frame());
script->set_eval_from_shared(summary.AsJavaScript().function()->shared());
script->set_eval_from_position(-summary.code_offset());
return;
@@ -13224,7 +13152,7 @@ Handle<JSObject> Script::GetWrapper(Handle<Script> script) {
}
MaybeHandle<SharedFunctionInfo> Script::FindSharedFunctionInfo(
- Isolate* isolate, FunctionLiteral* fun) {
+ Isolate* isolate, const FunctionLiteral* fun) {
DCHECK_NE(fun->function_literal_id(), FunctionLiteral::kIdTypeInvalid);
DCHECK_LT(fun->function_literal_id(), shared_function_infos()->length());
Object* shared = shared_function_infos()->get(fun->function_literal_id());
@@ -13416,6 +13344,15 @@ Handle<Object> SharedFunctionInfo::GetSourceCode() {
source, start_position(), end_position());
}
+Handle<Object> SharedFunctionInfo::GetSourceCodeHarmony() {
+ Isolate* isolate = GetIsolate();
+ if (!HasSourceCode()) return isolate->factory()->undefined_value();
+ Handle<String> script_source(String::cast(Script::cast(script())->source()));
+ int start_pos = function_token_position();
+ if (start_pos == kNoSourcePosition) start_pos = start_position();
+ return isolate->factory()->NewSubString(script_source, start_pos,
+ end_position());
+}
bool SharedFunctionInfo::IsInlineable() {
// Check that the function has a script associated with it.
@@ -13701,15 +13638,11 @@ void SharedFunctionInfo::ClearCodeFromOptimizedCodeMap() {
}
}
-CodeAndLiterals SharedFunctionInfo::SearchOptimizedCodeMap(
- Context* native_context, BailoutId osr_ast_id) {
- CodeAndLiterals result = {nullptr, nullptr};
+Code* SharedFunctionInfo::SearchOptimizedCodeMap(Context* native_context,
+ BailoutId osr_ast_id) {
+ Code* result = nullptr;
if (!osr_ast_id.IsNone()) {
- Code* code;
- LiteralsArray* literals;
- native_context->SearchOptimizedCodeMap(this, osr_ast_id, &code, &literals);
- result = {code, literals};
- return result;
+ return native_context->SearchOptimizedCodeMap(this, osr_ast_id);
}
DCHECK(osr_ast_id.IsNone());
@@ -13718,12 +13651,7 @@ CodeAndLiterals SharedFunctionInfo::SearchOptimizedCodeMap(
FixedArray* code_map = optimized_code_map();
DCHECK_LE(entry + kEntryLength, code_map->length());
WeakCell* cell = WeakCell::cast(code_map->get(entry + kCachedCodeOffset));
- WeakCell* literals_cell =
- WeakCell::cast(code_map->get(entry + kLiteralsOffset));
-
- result = {cell->cleared() ? nullptr : Code::cast(cell->value()),
- literals_cell->cleared() ? nullptr : LiteralsArray::cast(
- literals_cell->value())};
+ result = cell->cleared() ? nullptr : Code::cast(cell->value());
}
return result;
}
@@ -13968,7 +13896,8 @@ void Code::ClearInlineCaches() {
RelocInfo* info = it.rinfo();
Code* target(Code::GetCodeFromTargetAddress(info->target_address()));
if (target->is_inline_cache_stub()) {
- IC::Clear(this->GetIsolate(), info->pc(), info->host()->constant_pool());
+ ICUtility::Clear(this->GetIsolate(), info->pc(),
+ info->host()->constant_pool());
}
}
}
@@ -14003,11 +13932,10 @@ int AbstractCode::SourceStatementPosition(int offset) {
}
void JSFunction::ClearTypeFeedbackInfo() {
- feedback_vector()->ClearSlots(shared());
-}
-
-void JSFunction::ClearTypeFeedbackInfoAtGCTime() {
- feedback_vector()->ClearSlotsAtGCTime(shared());
+ if (feedback_vector_cell()->value()->IsFeedbackVector()) {
+ FeedbackVector* vector = feedback_vector();
+ vector->ClearSlots(this);
+ }
}
BailoutId Code::TranslatePcOffsetToAstId(uint32_t pc_offset) {
@@ -14350,14 +14278,24 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(
break;
}
+ case Translation::CONSTRUCT_STUB_FRAME: {
+ int bailout_id = iterator.Next();
+ int shared_info_id = iterator.Next();
+ Object* shared_info = LiteralArray()->get(shared_info_id);
+ unsigned height = iterator.Next();
+ os << "{bailout_id=" << bailout_id << ", function="
+ << Brief(SharedFunctionInfo::cast(shared_info)->DebugName())
+ << ", height=" << height << "}";
+ break;
+ }
+
case Translation::COMPILED_STUB_FRAME: {
Code::Kind stub_kind = static_cast<Code::Kind>(iterator.Next());
os << "{kind=" << stub_kind << "}";
break;
}
- case Translation::ARGUMENTS_ADAPTOR_FRAME:
- case Translation::CONSTRUCT_STUB_FRAME: {
+ case Translation::ARGUMENTS_ADAPTOR_FRAME: {
int shared_info_id = iterator.Next();
Object* shared_info = LiteralArray()->get(shared_info_id);
unsigned height = iterator.Next();
@@ -14544,8 +14482,8 @@ void Code::Disassemble(const char* name, std::ostream& os) { // NOLINT
if (!IC::ICUseVector(kind())) {
InlineCacheState ic_state = IC::StateFromCode(this);
os << "ic_state = " << ICState2String(ic_state) << "\n";
+ PrintExtraICState(os, kind(), extra_ic_state());
}
- PrintExtraICState(os, kind(), extra_ic_state());
if (is_compare_ic_stub()) {
DCHECK(CodeStub::GetMajorKey(this) == CodeStub::CompareIC);
CompareICStub stub(stub_key(), GetIsolate());
@@ -15470,12 +15408,10 @@ Maybe<bool> JSObject::AddDataElement(Handle<JSObject> object, uint32_t index,
ElementsAccessor* accessor = ElementsAccessor::ForKind(to);
accessor->Add(object, index, value, attributes, new_capacity);
- uint32_t new_length = old_length;
- Handle<Object> new_length_handle;
if (object->IsJSArray() && index >= old_length) {
- new_length = index + 1;
- new_length_handle = isolate->factory()->NewNumberFromUint(new_length);
- JSArray::cast(*object)->set_length(*new_length_handle);
+ Handle<Object> new_length =
+ isolate->factory()->NewNumberFromUint(index + 1);
+ JSArray::cast(*object)->set_length(*new_length);
}
return Just(true);
@@ -15894,12 +15830,22 @@ void Symbol::SymbolShortPrint(std::ostream& os) {
// StringSharedKeys are used as keys in the eval cache.
class StringSharedKey : public HashTableKey {
public:
+ // This tuple unambiguously identifies calls to eval() or
+ // CreateDynamicFunction() (such as through the Function() constructor).
+ // * source is the string passed into eval(). For dynamic functions, this is
+ // the effective source for the function, some of which is implicitly
+ // generated.
+ // * shared is the shared function info for the function containing the call
+ // to eval(). for dynamic functions, shared is the native context closure.
+ // * When positive, position is the position in the source where eval is
+ // called. When negative, position is the negation of the position in the
+ // dynamic function's effective source where the ')' ends the parameters.
StringSharedKey(Handle<String> source, Handle<SharedFunctionInfo> shared,
- LanguageMode language_mode, int scope_position)
+ LanguageMode language_mode, int position)
: source_(source),
shared_(shared),
language_mode_(language_mode),
- scope_position_(scope_position) {}
+ position_(position) {}
bool IsMatch(Object* other) override {
DisallowHeapAllocation no_allocation;
@@ -15915,8 +15861,8 @@ class StringSharedKey : public HashTableKey {
DCHECK(is_valid_language_mode(language_unchecked));
LanguageMode language_mode = static_cast<LanguageMode>(language_unchecked);
if (language_mode != language_mode_) return false;
- int scope_position = Smi::cast(other_array->get(3))->value();
- if (scope_position != scope_position_) return false;
+ int position = Smi::cast(other_array->get(3))->value();
+ if (position != position_) return false;
String* source = String::cast(other_array->get(1));
return source->Equals(*source_);
}
@@ -15924,7 +15870,7 @@ class StringSharedKey : public HashTableKey {
static uint32_t StringSharedHashHelper(String* source,
SharedFunctionInfo* shared,
LanguageMode language_mode,
- int scope_position) {
+ int position) {
uint32_t hash = source->Hash();
if (shared->HasSourceCode()) {
// Instead of using the SharedFunctionInfo pointer in the hash
@@ -15936,14 +15882,14 @@ class StringSharedKey : public HashTableKey {
hash ^= String::cast(script->source())->Hash();
STATIC_ASSERT(LANGUAGE_END == 2);
if (is_strict(language_mode)) hash ^= 0x8000;
- hash += scope_position;
+ hash += position;
}
return hash;
}
uint32_t Hash() override {
return StringSharedHashHelper(*source_, *shared_, language_mode_,
- scope_position_);
+ position_);
}
uint32_t HashForObject(Object* obj) override {
@@ -15957,9 +15903,8 @@ class StringSharedKey : public HashTableKey {
int language_unchecked = Smi::cast(other_array->get(2))->value();
DCHECK(is_valid_language_mode(language_unchecked));
LanguageMode language_mode = static_cast<LanguageMode>(language_unchecked);
- int scope_position = Smi::cast(other_array->get(3))->value();
- return StringSharedHashHelper(source, shared, language_mode,
- scope_position);
+ int position = Smi::cast(other_array->get(3))->value();
+ return StringSharedHashHelper(source, shared, language_mode, position);
}
@@ -15968,7 +15913,7 @@ class StringSharedKey : public HashTableKey {
array->set(0, *shared_);
array->set(1, *source_);
array->set(2, Smi::FromInt(language_mode_));
- array->set(3, Smi::FromInt(scope_position_));
+ array->set(3, Smi::FromInt(position_));
return array;
}
@@ -15976,7 +15921,7 @@ class StringSharedKey : public HashTableKey {
Handle<String> source_;
Handle<SharedFunctionInfo> shared_;
LanguageMode language_mode_;
- int scope_position_;
+ int position_;
};
// static
@@ -16263,6 +16208,17 @@ class InternalizedStringKey : public HashTableKey {
DCHECK(string_->IsInternalizedString());
return string_;
}
+ if (FLAG_thin_strings) {
+ // External strings get special treatment, to avoid copying their
+ // contents.
+ if (string_->IsExternalOneByteString()) {
+ return isolate->factory()
+ ->InternalizeExternalString<ExternalOneByteString>(string_);
+ } else if (string_->IsExternalTwoByteString()) {
+ return isolate->factory()
+ ->InternalizeExternalString<ExternalTwoByteString>(string_);
+ }
+ }
// Otherwise allocate a new internalized string.
return isolate->factory()->NewInternalizedStringImpl(
string_, string_->length(), string_->hash_field());
@@ -16272,6 +16228,7 @@ class InternalizedStringKey : public HashTableKey {
return String::cast(obj)->Hash();
}
+ private:
Handle<String> string_;
};
@@ -16852,7 +16809,7 @@ Handle<Object> JSObject::PrepareSlowElementsForSort(
Handle<Object> JSObject::PrepareElementsForSort(Handle<JSObject> object,
uint32_t limit) {
Isolate* isolate = object->GetIsolate();
- if (object->HasSloppyArgumentsElements()) {
+ if (object->HasSloppyArgumentsElements() || !object->map()->is_extensible()) {
return handle(Smi::FromInt(-1), isolate);
}
@@ -17219,6 +17176,9 @@ MaybeHandle<String> StringTable::InternalizeStringIfExists(
if (string->IsInternalizedString()) {
return string;
}
+ if (string->IsThinString()) {
+ return handle(Handle<ThinString>::cast(string)->actual(), isolate);
+ }
return LookupStringIfExists(isolate, string);
}
@@ -17265,31 +17225,98 @@ void StringTable::EnsureCapacityForDeserialization(Isolate* isolate,
isolate->heap()->SetRootStringTable(*table);
}
+namespace {
+
+template <class StringClass>
+void MigrateExternalStringResource(Isolate* isolate, Handle<String> from,
+ Handle<String> to) {
+ Handle<StringClass> cast_from = Handle<StringClass>::cast(from);
+ Handle<StringClass> cast_to = Handle<StringClass>::cast(to);
+ const typename StringClass::Resource* to_resource = cast_to->resource();
+ if (to_resource == nullptr) {
+ // |to| is a just-created internalized copy of |from|. Migrate the resource.
+ cast_to->set_resource(cast_from->resource());
+ // Zap |from|'s resource pointer to reflect the fact that |from| has
+ // relinquished ownership of its resource.
+ cast_from->set_resource(nullptr);
+ } else if (to_resource != cast_from->resource()) {
+ // |to| already existed and has its own resource. Finalize |from|.
+ isolate->heap()->FinalizeExternalString(*from);
+ }
+}
+
+} // namespace
Handle<String> StringTable::LookupString(Isolate* isolate,
Handle<String> string) {
+ if (string->IsThinString()) {
+ DCHECK(Handle<ThinString>::cast(string)->actual()->IsInternalizedString());
+ return handle(Handle<ThinString>::cast(string)->actual(), isolate);
+ }
if (string->IsConsString() && string->IsFlat()) {
- string = String::Flatten(string);
+ string = handle(Handle<ConsString>::cast(string)->first(), isolate);
if (string->IsInternalizedString()) return string;
}
InternalizedStringKey key(string);
Handle<String> result = LookupKey(isolate, &key);
- if (string->IsConsString()) {
- Handle<ConsString> cons = Handle<ConsString>::cast(string);
- cons->set_first(*result);
- cons->set_second(isolate->heap()->empty_string());
- } else if (string->IsSlicedString()) {
- STATIC_ASSERT(ConsString::kSize == SlicedString::kSize);
- DisallowHeapAllocation no_gc;
- bool one_byte = result->IsOneByteRepresentation();
- Handle<Map> map = one_byte ? isolate->factory()->cons_one_byte_string_map()
- : isolate->factory()->cons_string_map();
- string->set_map(*map);
- Handle<ConsString> cons = Handle<ConsString>::cast(string);
- cons->set_first(*result);
- cons->set_second(isolate->heap()->empty_string());
+ if (FLAG_thin_strings) {
+ if (string->IsExternalString()) {
+ if (result->IsExternalOneByteString()) {
+ MigrateExternalStringResource<ExternalOneByteString>(isolate, string,
+ result);
+ } else if (result->IsExternalTwoByteString()) {
+ MigrateExternalStringResource<ExternalTwoByteString>(isolate, string,
+ result);
+ } else {
+ // If the external string is duped into an existing non-external
+ // internalized string, free its resource (it's about to be rewritten
+ // into a ThinString below).
+ isolate->heap()->FinalizeExternalString(*string);
+ }
+ }
+
+ // The LookupKey() call above tries to internalize the string in-place.
+ // In cases where that wasn't possible (e.g. new-space strings), turn them
+ // into ThinStrings referring to their internalized versions now.
+ if (!string->IsInternalizedString()) {
+ DisallowHeapAllocation no_gc;
+ bool one_byte = result->IsOneByteRepresentation();
+ Handle<Map> map = one_byte
+ ? isolate->factory()->thin_one_byte_string_map()
+ : isolate->factory()->thin_string_map();
+ int old_size = string->Size();
+ DCHECK(old_size >= ThinString::kSize);
+ string->synchronized_set_map(*map);
+ Handle<ThinString> thin = Handle<ThinString>::cast(string);
+ thin->set_actual(*result);
+ Address thin_end = thin->address() + ThinString::kSize;
+ int size_delta = old_size - ThinString::kSize;
+ if (size_delta != 0) {
+ Heap* heap = isolate->heap();
+ heap->CreateFillerObjectAt(thin_end, size_delta,
+ ClearRecordedSlots::kNo);
+ heap->AdjustLiveBytes(*thin, -size_delta);
+ }
+ }
+ } else { // !FLAG_thin_strings
+ if (string->IsConsString()) {
+ Handle<ConsString> cons = Handle<ConsString>::cast(string);
+ cons->set_first(*result);
+ cons->set_second(isolate->heap()->empty_string());
+ } else if (string->IsSlicedString()) {
+ STATIC_ASSERT(ConsString::kSize == SlicedString::kSize);
+ DisallowHeapAllocation no_gc;
+ bool one_byte = result->IsOneByteRepresentation();
+ Handle<Map> map = one_byte
+ ? isolate->factory()->cons_one_byte_string_map()
+ : isolate->factory()->cons_string_map();
+ string->set_map(*map);
+ Handle<ConsString> cons = Handle<ConsString>::cast(string);
+ cons->set_first(*result);
+ cons->set_second(isolate->heap()->empty_string());
+ }
}
return result;
}
@@ -17377,21 +17404,153 @@ Handle<Object> CompilationCacheTable::Lookup(Handle<String> src,
return Handle<Object>(get(index + 1), isolate);
}
+namespace {
-Handle<Object> CompilationCacheTable::LookupEval(
- Handle<String> src, Handle<SharedFunctionInfo> outer_info,
- LanguageMode language_mode, int scope_position) {
- Isolate* isolate = GetIsolate();
- // Cache key is the tuple (source, outer shared function info, scope position)
- // to unambiguously identify the context chain the cached eval code assumes.
- StringSharedKey key(src, outer_info, language_mode, scope_position);
+const int kLiteralEntryLength = 2;
+const int kLiteralInitialLength = 2;
+const int kLiteralContextOffset = 0;
+const int kLiteralLiteralsOffset = 1;
+
+int SearchLiteralsMapEntry(CompilationCacheTable* cache, int cache_entry,
+ Context* native_context) {
+ DisallowHeapAllocation no_gc;
+ DCHECK(native_context->IsNativeContext());
+ Object* obj = cache->get(cache_entry);
+
+ if (obj->IsFixedArray()) {
+ FixedArray* literals_map = FixedArray::cast(obj);
+ int length = literals_map->length();
+ for (int i = 0; i < length; i += kLiteralEntryLength) {
+ if (WeakCell::cast(literals_map->get(i + kLiteralContextOffset))
+ ->value() == native_context) {
+ return i;
+ }
+ }
+ }
+ return -1;
+}
+
+void AddToLiteralsMap(Handle<CompilationCacheTable> cache, int cache_entry,
+ Handle<Context> native_context, Handle<Cell> literals) {
+ Isolate* isolate = native_context->GetIsolate();
+ DCHECK(native_context->IsNativeContext());
+ STATIC_ASSERT(kLiteralEntryLength == 2);
+ Handle<FixedArray> new_literals_map;
+ int entry;
+
+ Object* obj = cache->get(cache_entry);
+
+ if (!obj->IsFixedArray() || FixedArray::cast(obj)->length() == 0) {
+ new_literals_map =
+ isolate->factory()->NewFixedArray(kLiteralInitialLength, TENURED);
+ entry = 0;
+ } else {
+ Handle<FixedArray> old_literals_map(FixedArray::cast(obj), isolate);
+ entry = SearchLiteralsMapEntry(*cache, cache_entry, *native_context);
+ if (entry >= 0) {
+ // Just set the code of the entry.
+ Handle<WeakCell> literals_cell =
+ isolate->factory()->NewWeakCell(literals);
+ old_literals_map->set(entry + kLiteralLiteralsOffset, *literals_cell);
+ return;
+ }
+
+ // Can we reuse an entry?
+ DCHECK(entry < 0);
+ int length = old_literals_map->length();
+ for (int i = 0; i < length; i += kLiteralEntryLength) {
+ if (WeakCell::cast(old_literals_map->get(i + kLiteralContextOffset))
+ ->cleared()) {
+ new_literals_map = old_literals_map;
+ entry = i;
+ break;
+ }
+ }
+
+ if (entry < 0) {
+ // Copy old optimized code map and append one new entry.
+ new_literals_map = isolate->factory()->CopyFixedArrayAndGrow(
+ old_literals_map, kLiteralEntryLength, TENURED);
+ entry = old_literals_map->length();
+ }
+ }
+
+ Handle<WeakCell> literals_cell = isolate->factory()->NewWeakCell(literals);
+ WeakCell* context_cell = native_context->self_weak_cell();
+
+ new_literals_map->set(entry + kLiteralContextOffset, context_cell);
+ new_literals_map->set(entry + kLiteralLiteralsOffset, *literals_cell);
+
+#ifdef DEBUG
+ for (int i = 0; i < new_literals_map->length(); i += kLiteralEntryLength) {
+ WeakCell* cell =
+ WeakCell::cast(new_literals_map->get(i + kLiteralContextOffset));
+ DCHECK(cell->cleared() || cell->value()->IsNativeContext());
+ cell = WeakCell::cast(new_literals_map->get(i + kLiteralLiteralsOffset));
+ DCHECK(cell->cleared() || (cell->value()->IsCell()));
+ }
+#endif
+
+ Object* old_literals_map = cache->get(cache_entry);
+ if (old_literals_map != *new_literals_map) {
+ cache->set(cache_entry, *new_literals_map);
+ }
+}
+
+Cell* SearchLiteralsMap(CompilationCacheTable* cache, int cache_entry,
+ Context* native_context) {
+ Cell* result = nullptr;
+ int entry = SearchLiteralsMapEntry(cache, cache_entry, native_context);
+ if (entry >= 0) {
+ FixedArray* literals_map = FixedArray::cast(cache->get(cache_entry));
+ DCHECK_LE(entry + kLiteralEntryLength, literals_map->length());
+ WeakCell* cell =
+ WeakCell::cast(literals_map->get(entry + kLiteralLiteralsOffset));
+
+ result = cell->cleared() ? nullptr : Cell::cast(cell->value());
+ }
+ DCHECK(result == nullptr || result->IsCell());
+ return result;
+}
+
+} // namespace
+
+InfoVectorPair CompilationCacheTable::LookupScript(Handle<String> src,
+ Handle<Context> context,
+ LanguageMode language_mode) {
+ InfoVectorPair empty_result;
+ Handle<SharedFunctionInfo> shared(context->closure()->shared());
+ StringSharedKey key(src, shared, language_mode, kNoSourcePosition);
int entry = FindEntry(&key);
- if (entry == kNotFound) return isolate->factory()->undefined_value();
+ if (entry == kNotFound) return empty_result;
int index = EntryToIndex(entry);
- if (!get(index)->IsFixedArray()) return isolate->factory()->undefined_value();
- return Handle<Object>(get(EntryToIndex(entry) + 1), isolate);
+ if (!get(index)->IsFixedArray()) return empty_result;
+ Object* obj = get(index + 1);
+ if (obj->IsSharedFunctionInfo()) {
+ Cell* literals =
+ SearchLiteralsMap(this, index + 2, context->native_context());
+ return InfoVectorPair(SharedFunctionInfo::cast(obj), literals);
+ }
+ return empty_result;
}
+InfoVectorPair CompilationCacheTable::LookupEval(
+ Handle<String> src, Handle<SharedFunctionInfo> outer_info,
+ Handle<Context> native_context, LanguageMode language_mode, int position) {
+ InfoVectorPair empty_result;
+ StringSharedKey key(src, outer_info, language_mode, position);
+ int entry = FindEntry(&key);
+ if (entry == kNotFound) return empty_result;
+ int index = EntryToIndex(entry);
+ if (!get(index)->IsFixedArray()) return empty_result;
+ Object* obj = get(EntryToIndex(entry) + 1);
+ if (obj->IsSharedFunctionInfo()) {
+ Cell* literals =
+ SearchLiteralsMap(this, EntryToIndex(entry) + 2, *native_context);
+ return InfoVectorPair(SharedFunctionInfo::cast(obj), literals);
+ }
+ return empty_result;
+}
Handle<Object> CompilationCacheTable::LookupRegExp(Handle<String> src,
JSRegExp::Flags flags) {
@@ -17419,20 +17578,41 @@ Handle<CompilationCacheTable> CompilationCacheTable::Put(
return cache;
}
+Handle<CompilationCacheTable> CompilationCacheTable::PutScript(
+ Handle<CompilationCacheTable> cache, Handle<String> src,
+ Handle<Context> context, LanguageMode language_mode,
+ Handle<SharedFunctionInfo> value, Handle<Cell> literals) {
+ Isolate* isolate = cache->GetIsolate();
+ Handle<SharedFunctionInfo> shared(context->closure()->shared());
+ Handle<Context> native_context(context->native_context());
+ StringSharedKey key(src, shared, language_mode, kNoSourcePosition);
+ Handle<Object> k = key.AsHandle(isolate);
+ cache = EnsureCapacity(cache, 1, &key);
+ int entry = cache->FindInsertionEntry(key.Hash());
+ cache->set(EntryToIndex(entry), *k);
+ cache->set(EntryToIndex(entry) + 1, *value);
+ AddToLiteralsMap(cache, EntryToIndex(entry) + 2, native_context, literals);
+ cache->ElementAdded();
+ return cache;
+}
Handle<CompilationCacheTable> CompilationCacheTable::PutEval(
Handle<CompilationCacheTable> cache, Handle<String> src,
Handle<SharedFunctionInfo> outer_info, Handle<SharedFunctionInfo> value,
- int scope_position) {
+ Handle<Context> native_context, Handle<Cell> literals, int position) {
Isolate* isolate = cache->GetIsolate();
- StringSharedKey key(src, outer_info, value->language_mode(), scope_position);
+ StringSharedKey key(src, outer_info, value->language_mode(), position);
{
Handle<Object> k = key.AsHandle(isolate);
- DisallowHeapAllocation no_allocation_scope;
int entry = cache->FindEntry(&key);
if (entry != kNotFound) {
cache->set(EntryToIndex(entry), *k);
cache->set(EntryToIndex(entry) + 1, *value);
+ // AddToLiteralsMap may allocate a new sub-array to live in the entry,
+ // but it won't change the cache array. Therefore EntryToIndex and
+ // entry remains correct.
+ AddToLiteralsMap(cache, EntryToIndex(entry) + 2, native_context,
+ literals);
return cache;
}
}
@@ -17487,8 +17667,9 @@ void CompilationCacheTable::Age() {
? info->bytecode_array()->IsOld()
: info->code()->kind() != Code::FUNCTION || info->code()->IsOld();
if (is_old) {
- NoWriteBarrierSet(this, entry_index, the_hole_value);
- NoWriteBarrierSet(this, value_index, the_hole_value);
+ for (int i = 0; i < kEntrySize; i++) {
+ NoWriteBarrierSet(this, entry_index + i, the_hole_value);
+ }
ElementRemoved();
}
}
@@ -17503,8 +17684,9 @@ void CompilationCacheTable::Remove(Object* value) {
int entry_index = EntryToIndex(entry);
int value_index = entry_index + 1;
if (get(value_index) == value) {
- NoWriteBarrierSet(this, entry_index, the_hole_value);
- NoWriteBarrierSet(this, value_index, the_hole_value);
+ for (int i = 0; i < kEntrySize; i++) {
+ NoWriteBarrierSet(this, entry_index + i, the_hole_value);
+ }
ElementRemoved();
}
}
@@ -18536,6 +18718,40 @@ bool JSWeakCollection::Delete(Handle<JSWeakCollection> weak_collection,
return was_present;
}
+Handle<JSArray> JSWeakCollection::GetEntries(Handle<JSWeakCollection> holder,
+ int max_entries) {
+ Isolate* isolate = holder->GetIsolate();
+ Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
+ if (max_entries == 0 || max_entries > table->NumberOfElements()) {
+ max_entries = table->NumberOfElements();
+ }
+ int values_per_entry = holder->IsJSWeakMap() ? 2 : 1;
+ Handle<FixedArray> entries =
+ isolate->factory()->NewFixedArray(max_entries * values_per_entry);
+ // Recompute max_values because GC could have removed elements from the table.
+ if (max_entries > table->NumberOfElements()) {
+ max_entries = table->NumberOfElements();
+ }
+
+ {
+ DisallowHeapAllocation no_gc;
+ int count = 0;
+ for (int i = 0;
+ count / values_per_entry < max_entries && i < table->Capacity(); i++) {
+ Handle<Object> key(table->KeyAt(i), isolate);
+ if (table->IsKey(isolate, *key)) {
+ entries->set(count++, *key);
+ if (values_per_entry > 1) {
+ Object* value = table->Lookup(key);
+ entries->set(count++, value);
+ }
+ }
+ }
+ DCHECK_EQ(max_entries * values_per_entry, count);
+ }
+ return isolate->factory()->NewJSArrayWithElements(entries);
+}
+
// Check if there is a break point at this source position.
bool DebugInfo::HasBreakPoint(int source_position) {
// Get the break point info object for this code offset.
@@ -18622,11 +18838,8 @@ void DebugInfo::SetBreakPoint(Handle<DebugInfo> debug_info, int source_position,
DCHECK(index != kNoBreakPointInfo);
// Allocate new BreakPointInfo object and set the break point.
- Handle<BreakPointInfo> new_break_point_info = Handle<BreakPointInfo>::cast(
- isolate->factory()->NewStruct(BREAK_POINT_INFO_TYPE));
- new_break_point_info->set_source_position(source_position);
- new_break_point_info->set_break_point_objects(
- isolate->heap()->undefined_value());
+ Handle<BreakPointInfo> new_break_point_info =
+ isolate->factory()->NewBreakPointInfo(source_position);
BreakPointInfo::SetBreakPoint(new_break_point_info, break_point_object);
debug_info->break_points()->set(index, *new_break_point_info);
}
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 075b9a6d72..04d3d384d1 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -81,7 +81,6 @@
// - FixedArray
// - DescriptorArray
// - FrameArray
-// - LiteralsArray
// - HashTable
// - Dictionary
// - StringTable
@@ -109,6 +108,7 @@
// - SeqTwoByteString
// - SlicedString
// - ConsString
+// - ThinString
// - ExternalString
// - ExternalOneByteString
// - ExternalTwoByteString
@@ -122,17 +122,6 @@
// - ExternalTwoByteInternalizedString
// - Symbol
// - HeapNumber
-// - Simd128Value
-// - Float32x4
-// - Int32x4
-// - Uint32x4
-// - Bool32x4
-// - Int16x8
-// - Uint16x8
-// - Bool16x8
-// - Int8x16
-// - Uint8x16
-// - Bool8x16
// - Cell
// - PropertyCell
// - Code
@@ -142,7 +131,6 @@
// - Foreign
// - SharedFunctionInfo
// - Struct
-// - Box
// - AccessorInfo
// - PromiseResolveThenableJobInfo
// - PromiseReactionJobInfo
@@ -188,19 +176,6 @@ enum MutableMode {
};
-enum ExternalArrayType {
- kExternalInt8Array = 1,
- kExternalUint8Array,
- kExternalInt16Array,
- kExternalUint16Array,
- kExternalInt32Array,
- kExternalUint32Array,
- kExternalFloat32Array,
- kExternalFloat64Array,
- kExternalUint8ClampedArray,
-};
-
-
static inline bool IsTransitionStoreMode(KeyedAccessStoreMode store_mode) {
return store_mode == STORE_TRANSITION_TO_OBJECT ||
store_mode == STORE_TRANSITION_TO_DOUBLE ||
@@ -330,10 +305,12 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(CONS_STRING_TYPE) \
V(EXTERNAL_STRING_TYPE) \
V(SLICED_STRING_TYPE) \
+ V(THIN_STRING_TYPE) \
V(ONE_BYTE_STRING_TYPE) \
V(CONS_ONE_BYTE_STRING_TYPE) \
V(EXTERNAL_ONE_BYTE_STRING_TYPE) \
V(SLICED_ONE_BYTE_STRING_TYPE) \
+ V(THIN_ONE_BYTE_STRING_TYPE) \
V(EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE) \
V(SHORT_EXTERNAL_STRING_TYPE) \
V(SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE) \
@@ -341,7 +318,6 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
\
V(SYMBOL_TYPE) \
V(HEAP_NUMBER_TYPE) \
- V(SIMD128_VALUE_TYPE) \
V(ODDBALL_TYPE) \
\
V(MAP_TYPE) \
@@ -377,7 +353,6 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(SCRIPT_TYPE) \
V(TYPE_FEEDBACK_INFO_TYPE) \
V(ALIASED_ARGUMENTS_ENTRY_TYPE) \
- V(BOX_TYPE) \
V(PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE) \
V(PROMISE_REACTION_JOB_INFO_TYPE) \
V(DEBUG_INFO_TYPE) \
@@ -423,6 +398,7 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(JS_PROMISE_TYPE) \
V(JS_REGEXP_TYPE) \
V(JS_ERROR_TYPE) \
+ V(JS_ASYNC_FROM_SYNC_ITERATOR_TYPE) \
V(JS_STRING_ITERATOR_TYPE) \
\
V(JS_TYPED_ARRAY_KEY_ITERATOR_TYPE) \
@@ -517,7 +493,10 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE, \
ExternalTwoByteString::kShortSize, \
short_external_internalized_string_with_one_byte_data, \
- ShortExternalInternalizedStringWithOneByteData)
+ ShortExternalInternalizedStringWithOneByteData) \
+ V(THIN_STRING_TYPE, ThinString::kSize, thin_string, ThinString) \
+ V(THIN_ONE_BYTE_STRING_TYPE, ThinString::kSize, thin_one_byte_string, \
+ ThinOneByteString)
// A struct is a simple object a set of object-valued fields. Including an
// object type in this causes the compiler to generate most of the boilerplate
@@ -541,7 +520,6 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(SCRIPT, Script, script) \
V(TYPE_FEEDBACK_INFO, TypeFeedbackInfo, type_feedback_info) \
V(ALIASED_ARGUMENTS_ENTRY, AliasedArgumentsEntry, aliased_arguments_entry) \
- V(BOX, Box, box) \
V(PROMISE_RESOLVE_THENABLE_JOB_INFO, PromiseResolveThenableJobInfo, \
promise_resolve_thenable_job_info) \
V(PROMISE_REACTION_JOB_INFO, PromiseReactionJobInfo, \
@@ -569,20 +547,21 @@ const uint32_t kIsNotInternalizedMask = 0x40;
const uint32_t kNotInternalizedTag = 0x40;
const uint32_t kInternalizedTag = 0x0;
-// If bit 7 is clear then bit 2 indicates whether the string consists of
+// If bit 7 is clear then bit 3 indicates whether the string consists of
// two-byte characters or one-byte characters.
-const uint32_t kStringEncodingMask = 0x4;
+const uint32_t kStringEncodingMask = 0x8;
const uint32_t kTwoByteStringTag = 0x0;
-const uint32_t kOneByteStringTag = 0x4;
+const uint32_t kOneByteStringTag = 0x8;
-// If bit 7 is clear, the low-order 2 bits indicate the representation
+// If bit 7 is clear, the low-order 3 bits indicate the representation
// of the string.
-const uint32_t kStringRepresentationMask = 0x03;
+const uint32_t kStringRepresentationMask = 0x07;
enum StringRepresentationTag {
kSeqStringTag = 0x0,
kConsStringTag = 0x1,
kExternalStringTag = 0x2,
- kSlicedStringTag = 0x3
+ kSlicedStringTag = 0x3,
+ kThinStringTag = 0x5
};
const uint32_t kIsIndirectStringMask = 0x1;
const uint32_t kIsIndirectStringTag = 0x1;
@@ -592,21 +571,17 @@ STATIC_ASSERT((kConsStringTag &
kIsIndirectStringMask) == kIsIndirectStringTag); // NOLINT
STATIC_ASSERT((kSlicedStringTag &
kIsIndirectStringMask) == kIsIndirectStringTag); // NOLINT
+STATIC_ASSERT((kThinStringTag & kIsIndirectStringMask) == kIsIndirectStringTag);
-// Use this mask to distinguish between cons and slice only after making
-// sure that the string is one of the two (an indirect string).
-const uint32_t kSlicedNotConsMask = kSlicedStringTag & ~kConsStringTag;
-STATIC_ASSERT(IS_POWER_OF_TWO(kSlicedNotConsMask));
-
-// If bit 7 is clear, then bit 3 indicates whether this two-byte
+// If bit 7 is clear, then bit 4 indicates whether this two-byte
// string actually contains one byte data.
-const uint32_t kOneByteDataHintMask = 0x08;
-const uint32_t kOneByteDataHintTag = 0x08;
+const uint32_t kOneByteDataHintMask = 0x10;
+const uint32_t kOneByteDataHintTag = 0x10;
// If bit 7 is clear and string representation indicates an external string,
-// then bit 4 indicates whether the data pointer is cached.
-const uint32_t kShortExternalStringMask = 0x10;
-const uint32_t kShortExternalStringTag = 0x10;
+// then bit 5 indicates whether the data pointer is cached.
+const uint32_t kShortExternalStringMask = 0x20;
+const uint32_t kShortExternalStringTag = 0x20;
// A ConsString with an empty string as the right side is a candidate
// for being shortcut by the garbage collector. We don't allocate any
@@ -670,13 +645,15 @@ enum InstanceType {
SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE =
SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE |
kNotInternalizedTag,
+ THIN_STRING_TYPE = kTwoByteStringTag | kThinStringTag | kNotInternalizedTag,
+ THIN_ONE_BYTE_STRING_TYPE =
+ kOneByteStringTag | kThinStringTag | kNotInternalizedTag,
// Non-string names
SYMBOL_TYPE = kNotStringTag, // FIRST_NONSTRING_TYPE, LAST_NAME_TYPE
// Other primitives (cannot contain non-map-word pointers to heap objects).
HEAP_NUMBER_TYPE,
- SIMD128_VALUE_TYPE,
ODDBALL_TYPE, // LAST_PRIMITIVE_TYPE
// Objects allocated in their own spaces (never in new space).
@@ -715,7 +692,6 @@ enum InstanceType {
SCRIPT_TYPE,
TYPE_FEEDBACK_INFO_TYPE,
ALIASED_ARGUMENTS_ENTRY_TYPE,
- BOX_TYPE,
PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE,
PROMISE_REACTION_JOB_INFO_TYPE,
DEBUG_INFO_TYPE,
@@ -768,6 +744,7 @@ enum InstanceType {
JS_PROMISE_TYPE,
JS_REGEXP_TYPE,
JS_ERROR_TYPE,
+ JS_ASYNC_FROM_SYNC_ITERATOR_TYPE,
JS_STRING_ITERATOR_TYPE,
JS_TYPED_ARRAY_KEY_ITERATOR_TYPE,
@@ -891,7 +868,6 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
V(HANDLER_TABLE_SUB_TYPE) \
V(JS_COLLECTION_SUB_TYPE) \
V(JS_WEAK_COLLECTION_SUB_TYPE) \
- V(LITERALS_ARRAY_SUB_TYPE) \
V(MAP_CODE_CACHE_SUB_TYPE) \
V(NOSCRIPT_SHARED_FUNCTION_INFOS_SUB_TYPE) \
V(NUMBER_STRING_CACHE_SUB_TYPE) \
@@ -950,12 +926,12 @@ class AllocationSiteUsageContext;
class Cell;
class ConsString;
class ElementsAccessor;
+class FindAndReplacePattern;
class FixedArrayBase;
class FunctionLiteral;
class JSGlobalObject;
class KeyAccumulator;
class LayoutDescriptor;
-class LiteralsArray;
class LookupIterator;
class FieldType;
class Module;
@@ -995,17 +971,6 @@ template <class C> inline bool Is(Object* obj);
#define HEAP_OBJECT_TYPE_LIST(V) \
V(HeapNumber) \
V(MutableHeapNumber) \
- V(Simd128Value) \
- V(Float32x4) \
- V(Int32x4) \
- V(Uint32x4) \
- V(Bool32x4) \
- V(Int16x8) \
- V(Uint16x8) \
- V(Bool16x8) \
- V(Int8x16) \
- V(Uint8x16) \
- V(Bool8x16) \
V(Name) \
V(UniqueName) \
V(String) \
@@ -1018,6 +983,7 @@ template <class C> inline bool Is(Object* obj);
V(SeqTwoByteString) \
V(SeqOneByteString) \
V(InternalizedString) \
+ V(ThinString) \
V(Symbol) \
\
V(FixedTypedArrayBase) \
@@ -1043,7 +1009,6 @@ template <class C> inline bool Is(Object* obj);
V(DescriptorArray) \
V(FrameArray) \
V(TransitionArray) \
- V(LiteralsArray) \
V(FeedbackMetadata) \
V(FeedbackVector) \
V(DeoptimizationInputData) \
@@ -1051,6 +1016,7 @@ template <class C> inline bool Is(Object* obj);
V(DependentCode) \
V(HandlerTable) \
V(FixedArray) \
+ V(BoilerplateDescription) \
V(FixedDoubleArray) \
V(WeakFixedArray) \
V(ArrayList) \
@@ -1075,6 +1041,7 @@ template <class C> inline bool Is(Object* obj);
V(JSArray) \
V(JSArrayBuffer) \
V(JSArrayBufferView) \
+ V(JSAsyncFromSyncIterator) \
V(JSCollection) \
V(JSTypedArray) \
V(JSArrayIterator) \
@@ -1828,6 +1795,9 @@ class HeapNumber: public HeapObject {
inline double value() const;
inline void set_value(double value);
+ inline uint64_t value_as_bits() const;
+ inline void set_value_as_bits(uint64_t bits);
+
DECLARE_CAST(HeapNumber)
// Dispatched behavior.
@@ -1872,71 +1842,6 @@ class HeapNumber: public HeapObject {
DISALLOW_IMPLICIT_CONSTRUCTORS(HeapNumber);
};
-
-// The Simd128Value class describes heap allocated 128 bit SIMD values.
-class Simd128Value : public HeapObject {
- public:
- DECLARE_CAST(Simd128Value)
-
- DECLARE_PRINTER(Simd128Value)
- DECLARE_VERIFIER(Simd128Value)
-
- static Handle<String> ToString(Handle<Simd128Value> input);
-
- // Equality operations.
- inline bool Equals(Simd128Value* that);
- static inline bool Equals(Handle<Simd128Value> one, Handle<Simd128Value> two);
-
- // Checks that another instance is bit-wise equal.
- bool BitwiseEquals(const Simd128Value* other) const;
- // Computes a hash from the 128 bit value, viewed as 4 32-bit integers.
- uint32_t Hash() const;
- // Copies the 16 bytes of SIMD data to the destination address.
- void CopyBits(void* destination) const;
-
- // Layout description.
- static const int kValueOffset = HeapObject::kHeaderSize;
- static const int kSize = kValueOffset + kSimd128Size;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(Simd128Value);
-};
-
-
-// V has parameters (TYPE, Type, type, lane count, lane type)
-#define SIMD128_TYPES(V) \
- V(FLOAT32X4, Float32x4, float32x4, 4, float) \
- V(INT32X4, Int32x4, int32x4, 4, int32_t) \
- V(UINT32X4, Uint32x4, uint32x4, 4, uint32_t) \
- V(BOOL32X4, Bool32x4, bool32x4, 4, bool) \
- V(INT16X8, Int16x8, int16x8, 8, int16_t) \
- V(UINT16X8, Uint16x8, uint16x8, 8, uint16_t) \
- V(BOOL16X8, Bool16x8, bool16x8, 8, bool) \
- V(INT8X16, Int8x16, int8x16, 16, int8_t) \
- V(UINT8X16, Uint8x16, uint8x16, 16, uint8_t) \
- V(BOOL8X16, Bool8x16, bool8x16, 16, bool)
-
-#define SIMD128_VALUE_CLASS(TYPE, Type, type, lane_count, lane_type) \
- class Type final : public Simd128Value { \
- public: \
- inline lane_type get_lane(int lane) const; \
- inline void set_lane(int lane, lane_type value); \
- \
- DECLARE_CAST(Type) \
- \
- DECLARE_PRINTER(Type) \
- \
- static Handle<String> ToString(Handle<Type> input); \
- \
- inline bool Equals(Type* that); \
- \
- private: \
- DISALLOW_IMPLICIT_CONSTRUCTORS(Type); \
- };
-SIMD128_TYPES(SIMD128_VALUE_CLASS)
-#undef SIMD128_VALUE_CLASS
-
-
enum EnsureElementsMode {
DONT_ALLOW_DOUBLE_ELEMENTS,
ALLOW_COPIED_DOUBLE_ELEMENTS,
@@ -1997,11 +1902,14 @@ class JSReceiver: public HeapObject {
MUST_USE_RESULT static Maybe<bool> HasInPrototypeChain(
Isolate* isolate, Handle<JSReceiver> object, Handle<Object> proto);
- // Reads all enumerable own properties of source and adds them to target,
- // using either Set or CreateDataProperty depending on the use_set argument.
+ // Reads all enumerable own properties of source and adds them to
+ // target, using either Set or CreateDataProperty depending on the
+ // use_set argument. This only copies values not present in the
+ // maybe_excluded_properties list.
MUST_USE_RESULT static Maybe<bool> SetOrCopyDataProperties(
Isolate* isolate, Handle<JSReceiver> target, Handle<Object> source,
- bool use_set);
+ const ScopedVector<Handle<Object>>* excluded_properties = nullptr,
+ bool use_set = true);
// Implementation of [[HasProperty]], ECMA-262 5th edition, section 8.12.6.
MUST_USE_RESULT static Maybe<bool> HasProperty(LookupIterator* it);
@@ -2101,7 +2009,7 @@ class JSReceiver: public HeapObject {
// function that was used to instantiate the object).
static Handle<String> GetConstructorName(Handle<JSReceiver> receiver);
- Context* GetCreationContext();
+ Handle<Context> GetCreationContext();
MUST_USE_RESULT static inline Maybe<PropertyAttributes> GetPropertyAttributes(
Handle<JSReceiver> object, Handle<Name> name);
@@ -2497,13 +2405,13 @@ class JSObject: public JSReceiver {
FieldIndex index);
inline Object* RawFastPropertyAt(FieldIndex index);
inline double RawFastDoublePropertyAt(FieldIndex index);
+ inline uint64_t RawFastDoublePropertyAsBitsAt(FieldIndex index);
inline void FastPropertyAtPut(FieldIndex index, Object* value);
inline void RawFastPropertyAtPut(FieldIndex index, Object* value);
- inline void RawFastDoublePropertyAtPut(FieldIndex index, double value);
+ inline void RawFastDoublePropertyAsBitsAtPut(FieldIndex index, uint64_t bits);
inline void WriteToField(int descriptor, PropertyDetails details,
Object* value);
- inline void WriteToField(int descriptor, Object* value);
// Access to in object properties.
inline int GetInObjectPropertyOffset(int index);
@@ -2863,10 +2771,12 @@ class FixedArray: public FixedArrayBase {
void CopyTo(int pos, FixedArray* dest, int dest_pos, int len);
// Garbage collection support.
- static int SizeFor(int length) { return kHeaderSize + length * kPointerSize; }
+ static constexpr int SizeFor(int length) {
+ return kHeaderSize + length * kPointerSize;
+ }
// Code Generation support.
- static int OffsetOfElementAt(int index) { return SizeFor(index); }
+ static constexpr int OffsetOfElementAt(int index) { return SizeFor(index); }
// Garbage collection support.
inline Object** RawFieldOfElementAt(int index);
@@ -2903,7 +2813,6 @@ class FixedArray: public FixedArrayBase {
DISALLOW_IMPLICIT_CONSTRUCTORS(FixedArray);
};
-
// FixedDoubleArray describes fixed-sized arrays with element type double.
class FixedDoubleArray: public FixedArrayBase {
public:
@@ -3031,6 +2940,7 @@ class ArrayList : public FixedArray {
AddMode mode = kNone);
static Handle<ArrayList> Add(Handle<ArrayList> array, Handle<Object> obj1,
Handle<Object> obj2, AddMode = kNone);
+ static Handle<ArrayList> New(Isolate* isolate, int size);
inline int Length();
inline void SetLength(int length);
inline Object* Get(int index);
@@ -3048,57 +2958,6 @@ class ArrayList : public FixedArray {
DISALLOW_IMPLICIT_CONSTRUCTORS(ArrayList);
};
-// The property RegExpMatchInfo includes the matchIndices
-// array of the last successful regexp match (an array of start/end index
-// pairs for the match and all the captured substrings), the invariant is
-// that there are at least two capture indices. The array also contains
-// the subject string for the last successful match.
-// After creation the result must be treated as a FixedArray in all regards.
-class V8_EXPORT_PRIVATE RegExpMatchInfo : NON_EXPORTED_BASE(public FixedArray) {
- public:
- // Returns the number of captures, which is defined as the length of the
- // matchIndices objects of the last match. matchIndices contains two indices
- // for each capture (including the match itself), i.e. 2 * #captures + 2.
- inline int NumberOfCaptureRegisters();
- inline void SetNumberOfCaptureRegisters(int value);
-
- // Returns the subject string of the last match.
- inline String* LastSubject();
- inline void SetLastSubject(String* value);
-
- // Like LastSubject, but modifiable by the user.
- inline Object* LastInput();
- inline void SetLastInput(Object* value);
-
- // Returns the i'th capture index, 0 <= i < NumberOfCaptures(). Capture(0) and
- // Capture(1) determine the start- and endpoint of the match itself.
- inline int Capture(int i);
- inline void SetCapture(int i, int value);
-
- // Reserves space for captures.
- static Handle<RegExpMatchInfo> ReserveCaptures(
- Handle<RegExpMatchInfo> match_info, int capture_count);
-
- DECLARE_CAST(RegExpMatchInfo)
-
- static const int kNumberOfCapturesIndex = 0;
- static const int kLastSubjectIndex = 1;
- static const int kLastInputIndex = 2;
- static const int kFirstCaptureIndex = 3;
- static const int kLastMatchOverhead = kFirstCaptureIndex;
-
- static const int kNumberOfCapturesOffset = FixedArray::kHeaderSize;
- static const int kLastSubjectOffset = kNumberOfCapturesOffset + kPointerSize;
- static const int kLastInputOffset = kLastSubjectOffset + kPointerSize;
- static const int kFirstCaptureOffset = kLastInputOffset + kPointerSize;
-
- // Every match info is guaranteed to have enough space to store two captures.
- static const int kInitialCaptureIndices = 2;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(RegExpMatchInfo);
-};
-
#define FRAME_ARRAY_FIELD_LIST(V) \
V(WasmInstance, Object) \
V(WasmFunctionIndex, Smi) \
@@ -3186,7 +3045,7 @@ class FrameArray : public FixedArray {
// [0]: pointer to fixed array with enum cache
// [1]: either Smi(0) or pointer to fixed array with indices
// [2]: first key
-// [2 + number of descriptors * kDescriptorSize]: start of slack
+// [2 + number of descriptors * kEntrySize]: start of slack
class DescriptorArray: public FixedArray {
public:
// Returns true for both shared empty_descriptor_array and for smis, which the
@@ -3248,7 +3107,8 @@ class DescriptorArray: public FixedArray {
PropertyDetails details);
void Replace(int descriptor_number, Descriptor* descriptor);
- // Generalizes representation and field type of all field descriptors.
+ // Generalizes constness, representation and field type of all field
+ // descriptors.
void GeneralizeAllFields();
// Append automatically sets the enumeration index. This should only be used
@@ -3307,10 +3167,11 @@ class DescriptorArray: public FixedArray {
static const int kEnumCacheBridgeCacheOffset = FixedArray::kHeaderSize;
// Layout of descriptor.
- static const int kDescriptorKey = 0;
- static const int kDescriptorDetails = 1;
- static const int kDescriptorValue = 2;
- static const int kDescriptorSize = 3;
+ // Naming is consistent with Dictionary classes for easy templating.
+ static const int kEntryKeyIndex = 0;
+ static const int kEntryDetailsIndex = 1;
+ static const int kEntryValueIndex = 2;
+ static const int kEntrySize = 3;
#if defined(DEBUG) || defined(OBJECT_PRINT)
// For our gdb macros, we should perhaps change these in the future.
@@ -3341,18 +3202,16 @@ class DescriptorArray: public FixedArray {
}
static int ToDetailsIndex(int descriptor_number) {
- return kFirstIndex + (descriptor_number * kDescriptorSize) +
- kDescriptorDetails;
+ return kFirstIndex + (descriptor_number * kEntrySize) + kEntryDetailsIndex;
}
// Conversion from descriptor number to array indices.
static int ToKeyIndex(int descriptor_number) {
- return kFirstIndex + (descriptor_number * kDescriptorSize) + kDescriptorKey;
+ return kFirstIndex + (descriptor_number * kEntrySize) + kEntryKeyIndex;
}
static int ToValueIndex(int descriptor_number) {
- return kFirstIndex + (descriptor_number * kDescriptorSize) +
- kDescriptorValue;
+ return kFirstIndex + (descriptor_number * kEntrySize) + kEntryValueIndex;
}
private:
@@ -3499,10 +3358,22 @@ class HashTable : public HashTableBase {
public:
typedef Shape ShapeT;
- // Wrapper methods. Defined in src/objects-inl.h
- // to break a cycle with src/heap/heap.h.
- inline uint32_t Hash(Key key);
- inline uint32_t HashForObject(Key key, Object* object);
+ // Wrapper methods
+ inline uint32_t Hash(Key key) {
+ if (Shape::UsesSeed) {
+ return Shape::SeededHash(key, GetHeap()->HashSeed());
+ } else {
+ return Shape::Hash(key);
+ }
+ }
+
+ inline uint32_t HashForObject(Key key, Object* object) {
+ if (Shape::UsesSeed) {
+ return Shape::SeededHashForObject(key, GetHeap()->HashSeed(), object);
+ } else {
+ return Shape::HashForObject(key, object);
+ }
+ }
// Returns a new HashTable object.
MUST_USE_RESULT static Handle<Derived> New(
@@ -4263,22 +4134,23 @@ class OrderedHashTable: public FixedArray {
static const int kNotFound = -1;
static const int kMinCapacity = 4;
- static const int kNumberOfBucketsIndex = 0;
- static const int kNumberOfElementsIndex = kNumberOfBucketsIndex + 1;
- static const int kNumberOfDeletedElementsIndex = kNumberOfElementsIndex + 1;
- static const int kHashTableStartIndex = kNumberOfDeletedElementsIndex + 1;
+ static const int kNumberOfElementsIndex = 0;
+ // The next table is stored at the same index as the nof elements.
static const int kNextTableIndex = kNumberOfElementsIndex;
-
- static const int kNumberOfBucketsOffset =
- kHeaderSize + kNumberOfBucketsIndex * kPointerSize;
- static const int kNumberOfElementsOffset =
- kHeaderSize + kNumberOfElementsIndex * kPointerSize;
- static const int kNumberOfDeletedElementsOffset =
- kHeaderSize + kNumberOfDeletedElementsIndex * kPointerSize;
- static const int kHashTableStartOffset =
- kHeaderSize + kHashTableStartIndex * kPointerSize;
- static const int kNextTableOffset =
- kHeaderSize + kNextTableIndex * kPointerSize;
+ static const int kNumberOfDeletedElementsIndex = kNumberOfElementsIndex + 1;
+ static const int kNumberOfBucketsIndex = kNumberOfDeletedElementsIndex + 1;
+ static const int kHashTableStartIndex = kNumberOfBucketsIndex + 1;
+
+ static constexpr const int kNumberOfElementsOffset =
+ FixedArray::OffsetOfElementAt(kNumberOfElementsIndex);
+ static constexpr const int kNextTableOffset =
+ FixedArray::OffsetOfElementAt(kNextTableIndex);
+ static constexpr const int kNumberOfDeletedElementsOffset =
+ FixedArray::OffsetOfElementAt(kNumberOfDeletedElementsIndex);
+ static constexpr const int kNumberOfBucketsOffset =
+ FixedArray::OffsetOfElementAt(kNumberOfBucketsIndex);
+ static constexpr const int kHashTableStartOffset =
+ FixedArray::OffsetOfElementAt(kHashTableStartIndex);
static const int kEntrySize = entrysize + 1;
static const int kChainOffset = entrysize;
@@ -4965,42 +4837,6 @@ class DeoptimizationOutputData: public FixedArray {
#endif
};
-
-// A literals array contains the literals for a JSFunction. It also holds
-// the type feedback vector.
-class LiteralsArray : public FixedArray {
- public:
- static const int kVectorIndex = 0;
- static const int kFirstLiteralIndex = 1;
- V8_EXPORT_PRIVATE static const int kFeedbackVectorOffset;
- static const int kOffsetToFirstLiteral;
-
- static int OffsetOfLiteralAt(int index) {
- return OffsetOfElementAt(index + kFirstLiteralIndex);
- }
-
- inline FeedbackVector* feedback_vector() const;
- inline void set_feedback_vector(FeedbackVector* vector);
- inline Object* literal(int literal_index) const;
- inline void set_literal(int literal_index, Object* literal);
- inline void set_literal_undefined(int literal_index);
- inline int literals_count() const;
-
- static Handle<LiteralsArray> New(Isolate* isolate,
- Handle<FeedbackVector> vector,
- int number_of_literals,
- PretenureFlag pretenure = TENURED);
-
- DECLARE_CAST(LiteralsArray)
-
- private:
- inline Object* get(int index) const;
- inline void set(int index, Object* value);
- inline void set(int index, Smi* value);
- inline void set(int index, Object* value, WriteBarrierMode mode);
-};
-
-
class TemplateList : public FixedArray {
public:
static Handle<TemplateList> New(Isolate* isolate, int size);
@@ -5040,7 +4876,6 @@ class Code: public HeapObject {
V(LOAD_IC) \
V(LOAD_GLOBAL_IC) \
V(KEYED_LOAD_IC) \
- V(CALL_IC) \
V(STORE_IC) \
V(KEYED_STORE_IC) \
V(BINARY_OP_IC) \
@@ -5091,10 +4926,6 @@ class Code: public HeapObject {
// [source_position_table]: ByteArray for the source positions table.
DECL_ACCESSORS(source_position_table, ByteArray)
- // [protected_instructions]: Fixed array containing protected instruction and
- // corresponding landing pad offsets.
- DECL_ACCESSORS(protected_instructions, FixedArray)
-
// [raw_type_feedback_info]: This field stores various things, depending on
// the kind of the code object.
// FUNCTION => type feedback information.
@@ -5147,7 +4978,7 @@ class Code: public HeapObject {
inline bool is_inline_cache_stub();
inline bool is_debug_stub();
inline bool is_handler();
- inline bool is_call_stub();
+ inline bool is_stub();
inline bool is_binary_op_stub();
inline bool is_compare_ic_stub();
inline bool is_to_boolean_ic_stub();
@@ -5269,7 +5100,6 @@ class Code: public HeapObject {
// Find the first map in an IC stub.
Map* FindFirstMap();
- class FindAndReplacePattern;
// For each (map-to-find, object-to-replace) pair in the pattern, this
// function replaces the corresponding placeholder in the code with the
// object-to-replace. The function assumes that pairs in the pattern come in
@@ -5471,10 +5301,7 @@ class Code: public HeapObject {
// For FUNCTION kind, we store the type feedback info here.
static const int kTypeFeedbackInfoOffset =
kSourcePositionTableOffset + kPointerSize;
- static const int kProtectedInstructionOffset =
- kTypeFeedbackInfoOffset + kPointerSize;
- static const int kNextCodeLinkOffset =
- kProtectedInstructionOffset + kPointerSize;
+ static const int kNextCodeLinkOffset = kTypeFeedbackInfoOffset + kPointerSize;
static const int kGCMetadataOffset = kNextCodeLinkOffset + kPointerSize;
static const int kInstructionSizeOffset = kGCMetadataOffset + kPointerSize;
static const int kICAgeOffset = kInstructionSizeOffset + kIntSize;
@@ -6021,6 +5848,16 @@ class Map: public HeapObject {
int NumberOfFields();
+ // Returns true if transition to the given map requires special
+ // synchronization with the concurrent marker.
+ bool TransitionRequiresSynchronizationWithGC(Map* target);
+ // Returns true if transition to the given map removes a tagged in-object
+ // field.
+ bool TransitionRemovesTaggedField(Map* target);
+ // Returns true if transition to the given map replaces a tagged in-object
+ // field with an untagged in-object field.
+ bool TransitionChangesTaggedFieldToUntaggedField(Map* target);
+
// TODO(ishell): candidate with JSObject::MigrateToMap().
bool InstancesNeedRewriting(Map* target);
bool InstancesNeedRewriting(Map* target, int target_number_of_fields,
@@ -6032,6 +5869,7 @@ class Map: public HeapObject {
Representation rep1, Handle<FieldType> type1, Representation rep2,
Handle<FieldType> type2, Isolate* isolate);
static void GeneralizeField(Handle<Map> map, int modify_index,
+ PropertyConstness new_constness,
Representation new_representation,
Handle<FieldType> new_field_type);
@@ -6046,6 +5884,7 @@ class Map: public HeapObject {
static Handle<Map> PrepareForDataProperty(Handle<Map> old_map,
int descriptor_number,
+ PropertyConstness constness,
Handle<Object> value);
static Handle<Map> Normalize(Handle<Map> map, PropertyNormalizationMode mode,
@@ -6170,8 +6009,8 @@ class Map: public HeapObject {
MUST_USE_RESULT static MaybeHandle<Map> CopyWithField(
Handle<Map> map, Handle<Name> name, Handle<FieldType> type,
- PropertyAttributes attributes, Representation representation,
- TransitionFlag flag);
+ PropertyAttributes attributes, PropertyConstness constness,
+ Representation representation, TransitionFlag flag);
MUST_USE_RESULT static MaybeHandle<Map> CopyWithConstant(
Handle<Map> map,
@@ -6212,6 +6051,7 @@ class Map: public HeapObject {
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
+ PropertyConstness constness,
StoreFromKeyed store_mode);
static Handle<Map> TransitionToAccessorProperty(
Isolate* isolate, Handle<Map> map, Handle<Name> name, int descriptor,
@@ -6379,7 +6219,6 @@ class Map: public HeapObject {
static const int kInstanceTypeAndBitFieldOffset =
kInstanceAttributesOffset + 0;
static const int kBitField2Offset = kInstanceAttributesOffset + 2;
- static const int kUnusedPropertyFieldsByte = 3;
static const int kUnusedPropertyFieldsOffset = kInstanceAttributesOffset + 3;
STATIC_ASSERT(kInstanceTypeAndBitFieldOffset ==
@@ -6504,6 +6343,7 @@ class Map: public HeapObject {
// type. The type must be prepared for storing in descriptor array:
// it must be either a simple type or a map wrapped in a weak cell.
void UpdateFieldType(int descriptor_number, Handle<Name> name,
+ PropertyConstness new_constness,
Representation new_representation,
Handle<Object> new_wrapped_type);
@@ -6545,16 +6385,13 @@ class PromiseResolveThenableJobInfo : public Struct {
DECL_ACCESSORS(resolve, JSFunction)
DECL_ACCESSORS(reject, JSFunction)
- DECL_INT_ACCESSORS(debug_id)
-
DECL_ACCESSORS(context, Context)
static const int kThenableOffset = Struct::kHeaderSize;
static const int kThenOffset = kThenableOffset + kPointerSize;
static const int kResolveOffset = kThenOffset + kPointerSize;
static const int kRejectOffset = kResolveOffset + kPointerSize;
- static const int kDebugIdOffset = kRejectOffset + kPointerSize;
- static const int kContextOffset = kDebugIdOffset + kPointerSize;
+ static const int kContextOffset = kRejectOffset + kPointerSize;
static const int kSize = kContextOffset + kPointerSize;
DECLARE_CAST(PromiseResolveThenableJobInfo)
@@ -6590,8 +6427,7 @@ class PromiseReactionJobInfo : public Struct {
kDeferredPromiseOffset + kPointerSize;
static const int kDeferredOnRejectOffset =
kDeferredOnResolveOffset + kPointerSize;
- static const int kDebugIdOffset = kDeferredOnRejectOffset + kPointerSize;
- static const int kContextOffset = kDebugIdOffset + kPointerSize;
+ static const int kContextOffset = kDeferredOnRejectOffset + kPointerSize;
static const int kSize = kContextOffset + kPointerSize;
DECLARE_CAST(PromiseReactionJobInfo)
@@ -6602,26 +6438,6 @@ class PromiseReactionJobInfo : public Struct {
DISALLOW_IMPLICIT_CONSTRUCTORS(PromiseReactionJobInfo);
};
-// A simple one-element struct, useful where smis need to be boxed.
-class Box : public Struct {
- public:
- // [value]: the boxed contents.
- DECL_ACCESSORS(value, Object)
-
- DECLARE_CAST(Box)
-
- // Dispatched behavior.
- DECLARE_PRINTER(Box)
- DECLARE_VERIFIER(Box)
-
- static const int kValueOffset = HeapObject::kHeaderSize;
- static const int kSize = kValueOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(Box);
-};
-
-
// Container for metadata stored on each prototype map.
class PrototypeInfo : public Struct {
public:
@@ -6742,28 +6558,6 @@ class ContextExtension : public Struct {
DISALLOW_IMPLICIT_CONSTRUCTORS(ContextExtension);
};
-// Pair of {ElementsKind} and an array of constant values for {ArrayLiteral}
-// expressions. Used to communicate with the runtime for literal boilerplate
-// creation within the {Runtime_CreateArrayLiteral} method.
-class ConstantElementsPair : public Struct {
- public:
- DECL_INT_ACCESSORS(elements_kind)
- DECL_ACCESSORS(constant_values, FixedArrayBase)
-
- DECLARE_CAST(ConstantElementsPair)
-
- // Dispatched behavior.
- DECLARE_PRINTER(ConstantElementsPair)
- DECLARE_VERIFIER(ConstantElementsPair)
-
- static const int kElementsKindOffset = HeapObject::kHeaderSize;
- static const int kConstantValuesOffset = kElementsKindOffset + kPointerSize;
- static const int kSize = kConstantValuesOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ConstantElementsPair);
-};
-
// Script describes a script which has been added to the VM.
class Script: public Struct {
public:
@@ -6912,8 +6706,8 @@ class Script: public Struct {
// Look through the list of existing shared function infos to find one
// that matches the function literal. Return empty handle if not found.
- MaybeHandle<SharedFunctionInfo> FindSharedFunctionInfo(Isolate* isolate,
- FunctionLiteral* fun);
+ MaybeHandle<SharedFunctionInfo> FindSharedFunctionInfo(
+ Isolate* isolate, const FunctionLiteral* fun);
// Iterate over all script objects on the heap.
class Iterator {
@@ -6954,7 +6748,7 @@ class Script: public Struct {
static const int kCompilationTypeBit = 0;
static const int kCompilationStateBit = 1;
static const int kOriginOptionsShift = 2;
- static const int kOriginOptionsSize = 3;
+ static const int kOriginOptionsSize = 4;
static const int kOriginOptionsMask = ((1 << kOriginOptionsSize) - 1)
<< kOriginOptionsShift;
@@ -6972,6 +6766,7 @@ class Script: public Struct {
// Installation of ids for the selected builtin functions is handled
// by the bootstrapper.
#define FUNCTIONS_WITH_ID_LIST(V) \
+ V(Array, isArray, ArrayIsArray) \
V(Array.prototype, concat, ArrayConcat) \
V(Array.prototype, every, ArrayEvery) \
V(Array.prototype, fill, ArrayFill) \
@@ -7003,7 +6798,10 @@ class Script: public Struct {
V(Date.prototype, getTime, DateGetTime) \
V(Function.prototype, apply, FunctionApply) \
V(Function.prototype, call, FunctionCall) \
+ V(Object, assign, ObjectAssign) \
+ V(Object, create, ObjectCreate) \
V(Object.prototype, hasOwnProperty, ObjectHasOwnProperty) \
+ V(Object.prototype, toString, ObjectToString) \
V(RegExp.prototype, compile, RegExpCompile) \
V(RegExp.prototype, exec, RegExpExec) \
V(RegExp.prototype, test, RegExpTest) \
@@ -7072,7 +6870,28 @@ class Script: public Struct {
V(Number, parseFloat, NumberParseFloat) \
V(Number, parseInt, NumberParseInt) \
V(Number.prototype, toString, NumberToString) \
- V(Object, create, ObjectCreate)
+ V(Map.prototype, clear, MapClear) \
+ V(Map.prototype, delete, MapDelete) \
+ V(Map.prototype, entries, MapEntries) \
+ V(Map.prototype, forEach, MapForEach) \
+ V(Map.prototype, has, MapHas) \
+ V(Map.prototype, keys, MapKeys) \
+ V(Map.prototype, set, MapSet) \
+ V(Map.prototype, values, MapValues) \
+ V(Set.prototype, add, SetAdd) \
+ V(Set.prototype, clear, SetClear) \
+ V(Set.prototype, delete, SetDelete) \
+ V(Set.prototype, entries, SetEntries) \
+ V(Set.prototype, forEach, SetForEach) \
+ V(Set.prototype, has, SetHas) \
+ V(Set.prototype, keys, SetKeys) \
+ V(Set.prototype, values, SetValues) \
+ V(WeakMap.prototype, delete, WeakMapDelete) \
+ V(WeakMap.prototype, has, WeakMapHas) \
+ V(WeakMap.prototype, set, WeakMapSet) \
+ V(WeakSet.prototype, add, WeakSetAdd) \
+ V(WeakSet.prototype, delete, WeakSetDelete) \
+ V(WeakSet.prototype, has, WeakSetHas)
#define ATOMIC_FUNCTIONS_WITH_ID_LIST(V) \
V(Atomics, load, AtomicsLoad) \
@@ -7118,10 +6937,10 @@ enum BuiltinFunctionId {
};
// Result of searching in an optimized code map of a SharedFunctionInfo. Note
-// that both {code} and {literals} can be NULL to pass search result status.
-struct CodeAndLiterals {
- Code* code; // Cached optimized code.
- LiteralsArray* literals; // Cached literals array.
+// that both {code} and {vector} can be NULL to pass search result status.
+struct CodeAndVector {
+ Code* code; // Cached optimized code.
+ FeedbackVector* vector; // Cached feedback vector.
};
// SharedFunctionInfo describes the JSFunction information that can be
@@ -7154,11 +6973,7 @@ class SharedFunctionInfo: public HeapObject {
DECL_ACCESSORS(optimized_code_map, FixedArray)
// Returns entry from optimized code map for specified context and OSR entry.
- // Note that {code == nullptr, literals == nullptr} indicates no matching
- // entry has been found, whereas {code, literals == nullptr} indicates that
- // code is context-independent.
- CodeAndLiterals SearchOptimizedCodeMap(Context* native_context,
- BailoutId osr_ast_id);
+ Code* SearchOptimizedCodeMap(Context* native_context, BailoutId osr_ast_id);
// Clear optimized code map.
void ClearOptimizedCodeMap();
@@ -7176,16 +6991,10 @@ class SharedFunctionInfo: public HeapObject {
// the entry itself is left in the map in order to proceed sharing literals.
void EvictFromOptimizedCodeMap(Code* optimized_code, const char* reason);
- static Handle<LiteralsArray> FindOrCreateLiterals(
- Handle<SharedFunctionInfo> shared, Handle<Context> native_context);
-
// Add or update entry in the optimized code map for context-dependent code.
- // If {code} is not given, then an existing entry's code won't be overwritten.
static void AddToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
Handle<Context> native_context,
- MaybeHandle<Code> code,
- Handle<LiteralsArray> literals,
- BailoutId osr_ast_id);
+ Handle<Code> code, BailoutId osr_ast_id);
// Set up the link between shared function info and the script. The shared
// function info is added to the list on the script.
@@ -7196,8 +7005,7 @@ class SharedFunctionInfo: public HeapObject {
static const int kEntriesStart = 0;
static const int kContextOffset = 0;
static const int kCachedCodeOffset = 1;
- static const int kLiteralsOffset = 2;
- static const int kEntryLength = 3;
+ static const int kEntryLength = 2;
static const int kInitialLength = kEntriesStart + kEntryLength;
static const int kNotFound = -1;
@@ -7209,8 +7017,6 @@ class SharedFunctionInfo: public HeapObject {
static const int kOffsetToPreviousCachedCode =
FixedArray::kHeaderSize +
kPointerSize * (kCachedCodeOffset - kEntryLength);
- static const int kOffsetToPreviousLiterals =
- FixedArray::kHeaderSize + kPointerSize * (kLiteralsOffset - kEntryLength);
// [scope_info]: Scope info.
DECL_ACCESSORS(scope_info, ScopeInfo)
@@ -7279,12 +7085,12 @@ class SharedFunctionInfo: public HeapObject {
inline bool IsApiFunction();
inline FunctionTemplateInfo* get_api_func_data();
inline void set_api_func_data(FunctionTemplateInfo* data);
- inline bool HasBytecodeArray();
- inline BytecodeArray* bytecode_array();
+ inline bool HasBytecodeArray() const;
+ inline BytecodeArray* bytecode_array() const;
inline void set_bytecode_array(BytecodeArray* bytecode);
inline void ClearBytecodeArray();
- inline bool HasAsmWasmData();
- inline FixedArray* asm_wasm_data();
+ inline bool HasAsmWasmData() const;
+ inline FixedArray* asm_wasm_data() const;
inline void set_asm_wasm_data(FixedArray* data);
inline void ClearAsmWasmData();
@@ -7309,10 +7115,6 @@ class SharedFunctionInfo: public HeapObject {
// [script]: Script from which the function originates.
DECL_ACCESSORS(script, Object)
- // [num_literals]: Number of literals used by this function.
- inline int num_literals() const;
- inline void set_num_literals(int value);
-
// [start_position_and_type]: Field used to store both the source code
// position, whether or not the function is a function expression,
// and whether or not the function is a toplevel function. The two
@@ -7322,15 +7124,47 @@ class SharedFunctionInfo: public HeapObject {
inline void set_start_position_and_type(int value);
// The function is subject to debugging if a debug info is attached.
- inline bool HasDebugInfo();
- inline DebugInfo* GetDebugInfo();
+ inline bool HasDebugInfo() const;
+ inline DebugInfo* GetDebugInfo() const;
// A function has debug code if the compiled code has debug break slots.
- inline bool HasDebugCode();
+ inline bool HasDebugCode() const;
// [debug info]: Debug information.
DECL_ACCESSORS(debug_info, Object)
+ // Bit field containing various information collected for debugging.
+ // This field is either stored on the kDebugInfo slot or inside the
+ // debug info struct.
+ inline int debugger_hints() const;
+ inline void set_debugger_hints(int value);
+
+ // Indicates that the function was created by the Function function.
+ // Though it's anonymous, toString should treat it as if it had the name
+ // "anonymous". We don't set the name itself so that the system does not
+ // see a binding for it.
+ DECL_BOOLEAN_ACCESSORS(name_should_print_as_anonymous)
+
+ // Indicates that the function is either an anonymous expression
+ // or an arrow function (the name field can be set through the API,
+ // which does not change this flag).
+ DECL_BOOLEAN_ACCESSORS(is_anonymous_expression)
+
+ // Indicates that the the shared function info is deserialized from cache.
+ DECL_BOOLEAN_ACCESSORS(deserialized)
+
+ // Indicates that the function cannot cause side-effects.
+ DECL_BOOLEAN_ACCESSORS(has_no_side_effect)
+
+ // Indicates that |has_no_side_effect| has been computed and set.
+ DECL_BOOLEAN_ACCESSORS(computed_has_no_side_effect)
+
+ // Indicates that the function should be skipped during stepping.
+ DECL_BOOLEAN_ACCESSORS(debug_is_blackboxed)
+
+ // Indicates that |debug_is_blackboxed| has been computed and set.
+ DECL_BOOLEAN_ACCESSORS(computed_debug_is_blackboxed)
+
// The function's name if it is non-empty, otherwise the inferred name.
String* DebugName();
@@ -7409,17 +7243,6 @@ class SharedFunctionInfo: public HeapObject {
// Indicate that this function should always be inlined in optimized code.
DECL_BOOLEAN_ACCESSORS(force_inline)
- // Indicates that the function was created by the Function function.
- // Though it's anonymous, toString should treat it as if it had the name
- // "anonymous". We don't set the name itself so that the system does not
- // see a binding for it.
- DECL_BOOLEAN_ACCESSORS(name_should_print_as_anonymous)
-
- // Indicates that the function is either an anonymous expression
- // or an arrow function (the name field can be set through the API,
- // which does not change this flag).
- DECL_BOOLEAN_ACCESSORS(is_anonymous_expression)
-
// Indicates that code for this function must be compiled through the
// Ignition / TurboFan pipeline, and is unsupported by
// FullCodegen / Crankshaft.
@@ -7431,9 +7254,6 @@ class SharedFunctionInfo: public HeapObject {
// Indicates that this function is an asm function.
DECL_BOOLEAN_ACCESSORS(asm_function)
- // Indicates that the the shared function info is deserialized from cache.
- DECL_BOOLEAN_ACCESSORS(deserialized)
-
// Whether this function was created from a FunctionDeclaration.
DECL_BOOLEAN_ACCESSORS(is_declaration)
@@ -7443,12 +7263,6 @@ class SharedFunctionInfo: public HeapObject {
// Indicates that asm->wasm conversion failed and should not be re-attempted.
DECL_BOOLEAN_ACCESSORS(is_asm_wasm_broken)
- // Indicates that the function cannot cause side-effects.
- DECL_BOOLEAN_ACCESSORS(has_no_side_effect)
-
- // Indicates that |has_no_side_effect| has been computed and set.
- DECL_BOOLEAN_ACCESSORS(computed_has_no_side_effect)
-
inline FunctionKind kind() const;
inline void set_kind(FunctionKind kind);
@@ -7473,6 +7287,7 @@ class SharedFunctionInfo: public HeapObject {
// [source code]: Source code for the function.
bool HasSourceCode() const;
Handle<Object> GetSourceCode();
+ Handle<Object> GetSourceCodeHarmony();
// Number of times the function was optimized.
inline int opt_count();
@@ -7723,23 +7538,32 @@ class SharedFunctionInfo: public HeapObject {
// byte 1
kForceInline,
kIsAsmFunction,
- kIsAnonymousExpression,
- kNameShouldPrintAsAnonymous,
kMustUseIgnitionTurbo,
kDontFlush,
kIsDeclaration,
+ kIsAsmWasmBroken,
+
+ kUnused1, // Unused fields.
+ kUnused2,
- kUnused, // unused.
// byte 2
kFunctionKind,
// rest of byte 2 and first two bits of byte 3 are used by FunctionKind
// byte 3
- kDeserialized = kFunctionKind + 10,
- kIsAsmWasmBroken,
+ kCompilerHintsCount = kFunctionKind + 10, // Pseudo entry
+ };
+
+ // Bit positions in debugger_hints.
+ enum DebuggerHints {
+ kIsAnonymousExpression,
+ kNameShouldPrintAsAnonymous,
+ kDeserialized,
kHasNoSideEffect,
kComputedHasNoSideEffect,
- kCompilerHintsCount, // Pseudo entry
+ kDebugIsBlackboxed,
+ kComputedDebugIsBlackboxed,
};
+
// kFunctionKind has to be byte-aligned
STATIC_ASSERT((kFunctionKind % kBitsPerByte) == 0);
@@ -8096,6 +7920,7 @@ class JSFunction: public JSObject {
// [context]: The context for this function.
inline Context* context();
+ inline bool has_context() const;
inline void set_context(Object* context);
inline JSObject* global_proxy();
inline Context* native_context();
@@ -8149,26 +7974,25 @@ class JSFunction: public JSObject {
// Completes inobject slack tracking on initial map if it is active.
inline void CompleteInobjectSlackTrackingIfActive();
- // [literals]: Fixed array holding the materialized literals.
- //
- // If the function contains object, regexp or array literals, the
- // literals array prefix contains the object, regexp, and array
- // function to be used when creating these literals. This is
- // necessary so that we do not dynamically lookup the object, regexp
- // or array functions. Performing a dynamic lookup, we might end up
- // using the functions from a new context that we should not have
- // access to. For API objects we store the boilerplate in the literal array.
- DECL_ACCESSORS(literals, LiteralsArray)
+ // [feedback_vector_cell]: Fixed array holding the feedback vector.
+ DECL_ACCESSORS(feedback_vector_cell, Cell)
+
+ enum FeedbackVectorState {
+ TOP_LEVEL_SCRIPT_NEEDS_VECTOR,
+ NEEDS_VECTOR,
+ HAS_VECTOR
+ };
+
+ inline FeedbackVectorState GetFeedbackVectorState(Isolate* isolate) const;
+ // feedback_vector() can be used once the function is compiled.
+ inline FeedbackVector* feedback_vector() const;
+ inline bool has_feedback_vector() const;
static void EnsureLiterals(Handle<JSFunction> function);
- inline FeedbackVector* feedback_vector();
- // Unconditionally clear the type feedback vector (including vector ICs).
+ // Unconditionally clear the type feedback vector.
void ClearTypeFeedbackInfo();
- // Clear the type feedback vector with a more subtle policy at GC time.
- void ClearTypeFeedbackInfoAtGCTime();
-
// The initial map for an object created by this constructor.
inline Map* initial_map();
static void SetInitialMap(Handle<JSFunction> function, Handle<Map> map,
@@ -8277,8 +8101,9 @@ class JSFunction: public JSObject {
static const int kSharedFunctionInfoOffset =
kPrototypeOrInitialMapOffset + kPointerSize;
static const int kContextOffset = kSharedFunctionInfoOffset + kPointerSize;
- static const int kLiteralsOffset = kContextOffset + kPointerSize;
- static const int kNonWeakFieldsEndOffset = kLiteralsOffset + kPointerSize;
+ static const int kFeedbackVectorOffset = kContextOffset + kPointerSize;
+ static const int kNonWeakFieldsEndOffset =
+ kFeedbackVectorOffset + kPointerSize;
static const int kCodeEntryOffset = kNonWeakFieldsEndOffset;
static const int kNextFunctionLinkOffset = kCodeEntryOffset + kPointerSize;
static const int kSize = kNextFunctionLinkOffset + kPointerSize;
@@ -8584,13 +8409,15 @@ class JSPromise : public JSObject {
// 1) Undefined -- This is the zero state when there is no callback
// or deferred fields registered.
//
- // 2) Object -- There is a single Callable directly attached to the
+ // 2) Object -- There is a single callback directly attached to the
// fulfill_reactions, reject_reactions and the deferred fields are
// directly attached to the slots. In this state, deferred_promise
// is a JSReceiver and deferred_on_{resolve, reject} are Callables.
//
// 3) FixedArray -- There is more than one callback and deferred
// fields attached to a FixedArray.
+ //
+ // The callback can be a Callable or a Symbol.
DECL_ACCESSORS(deferred_promise, Object)
DECL_ACCESSORS(deferred_on_resolve, Object)
DECL_ACCESSORS(deferred_on_reject, Object)
@@ -8805,9 +8632,25 @@ class CompilationCacheShape : public BaseShape<HashTableKey*> {
static inline Handle<Object> AsHandle(Isolate* isolate, HashTableKey* key);
static const int kPrefixSize = 0;
- static const int kEntrySize = 2;
+ static const int kEntrySize = 3;
};
+class InfoVectorPair {
+ public:
+ InfoVectorPair() : shared_(nullptr), vector_cell_(nullptr) {}
+ InfoVectorPair(SharedFunctionInfo* shared, Cell* vector_cell)
+ : shared_(shared), vector_cell_(vector_cell) {}
+
+ SharedFunctionInfo* shared() const { return shared_; }
+ Cell* vector() const { return vector_cell_; }
+
+ bool has_shared() const { return shared_ != nullptr; }
+ bool has_vector() const { return vector_cell_ != nullptr; }
+
+ private:
+ SharedFunctionInfo* shared_;
+ Cell* vector_cell_;
+};
// This cache is used in two different variants. For regexp caching, it simply
// maps identifying info of the regexp to the cached regexp object. Scripts and
@@ -8827,18 +8670,25 @@ class CompilationCacheTable: public HashTable<CompilationCacheTable,
// Find cached value for a string key, otherwise return null.
Handle<Object> Lookup(
Handle<String> src, Handle<Context> context, LanguageMode language_mode);
- Handle<Object> LookupEval(
- Handle<String> src, Handle<SharedFunctionInfo> shared,
- LanguageMode language_mode, int scope_position);
+ InfoVectorPair LookupScript(Handle<String> src, Handle<Context> context,
+ LanguageMode language_mode);
+ InfoVectorPair LookupEval(Handle<String> src,
+ Handle<SharedFunctionInfo> shared,
+ Handle<Context> native_context,
+ LanguageMode language_mode, int position);
Handle<Object> LookupRegExp(Handle<String> source, JSRegExp::Flags flags);
static Handle<CompilationCacheTable> Put(
Handle<CompilationCacheTable> cache, Handle<String> src,
Handle<Context> context, LanguageMode language_mode,
Handle<Object> value);
+ static Handle<CompilationCacheTable> PutScript(
+ Handle<CompilationCacheTable> cache, Handle<String> src,
+ Handle<Context> context, LanguageMode language_mode,
+ Handle<SharedFunctionInfo> value, Handle<Cell> literals);
static Handle<CompilationCacheTable> PutEval(
Handle<CompilationCacheTable> cache, Handle<String> src,
- Handle<SharedFunctionInfo> context, Handle<SharedFunctionInfo> value,
- int scope_position);
+ Handle<SharedFunctionInfo> outer_info, Handle<SharedFunctionInfo> value,
+ Handle<Context> native_context, Handle<Cell> literals, int position);
static Handle<CompilationCacheTable> PutRegExp(
Handle<CompilationCacheTable> cache, Handle<String> src,
JSRegExp::Flags flags, Handle<FixedArray> value);
@@ -9220,6 +9070,7 @@ class StringShape BASE_EMBEDDED {
inline bool IsExternal();
inline bool IsCons();
inline bool IsSliced();
+ inline bool IsThin();
inline bool IsIndirect();
inline bool IsExternalOneByte();
inline bool IsExternalTwoByte();
@@ -9932,6 +9783,34 @@ class ConsString: public String {
DISALLOW_IMPLICIT_CONSTRUCTORS(ConsString);
};
+// The ThinString class describes string objects that are just references
+// to another string object. They are used for in-place internalization when
+// the original string cannot actually be internalized in-place: in these
+// cases, the original string is converted to a ThinString pointing at its
+// internalized version (which is allocated as a new object).
+// In terms of memory layout and most algorithms operating on strings,
+// ThinStrings can be thought of as "one-part cons strings".
+class ThinString : public String {
+ public:
+ // Actual string that this ThinString refers to.
+ inline String* actual() const;
+ inline void set_actual(String* s,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
+ V8_EXPORT_PRIVATE uint16_t ThinStringGet(int index);
+
+ DECLARE_CAST(ThinString)
+ DECLARE_VERIFIER(ThinString)
+
+ // Layout description.
+ static const int kActualOffset = String::kSize;
+ static const int kSize = kActualOffset + kPointerSize;
+
+ typedef FixedBodyDescriptor<kActualOffset, kSize, kSize> BodyDescriptor;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ThinString);
+};
// The Sliced String class describes strings that are substrings of another
// sequential string. The motivation is to save time and memory when creating
@@ -10568,6 +10447,32 @@ class JSArrayIterator : public JSObject {
DISALLOW_IMPLICIT_CONSTRUCTORS(JSArrayIterator);
};
+// The [Async-from-Sync Iterator] object
+// (proposal-async-iteration/#sec-async-from-sync-iterator-objects)
+// An object which wraps an ordinary Iterator and converts it to behave
+// according to the Async Iterator protocol.
+// (See https://tc39.github.io/proposal-async-iteration/#sec-iteration)
+class JSAsyncFromSyncIterator : public JSObject {
+ public:
+ DECLARE_CAST(JSAsyncFromSyncIterator)
+ DECLARE_PRINTER(JSAsyncFromSyncIterator)
+ DECLARE_VERIFIER(JSAsyncFromSyncIterator)
+
+ // Async-from-Sync Iterator instances are ordinary objects that inherit
+ // properties from the %AsyncFromSyncIteratorPrototype% intrinsic object.
+ // Async-from-Sync Iterator instances are initially created with the internal
+ // slots listed in Table 4.
+ // (proposal-async-iteration/#table-async-from-sync-iterator-internal-slots)
+ DECL_ACCESSORS(sync_iterator, JSReceiver)
+
+ // Offsets of object fields.
+ static const int kSyncIteratorOffset = JSObject::kHeaderSize;
+ static const int kSize = kSyncIteratorOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSAsyncFromSyncIterator);
+};
+
class JSStringIterator : public JSObject {
public:
// Dispatched behavior.
@@ -10713,6 +10618,8 @@ class JSWeakCollection: public JSObject {
Handle<Object> value, int32_t hash);
static bool Delete(Handle<JSWeakCollection> collection, Handle<Object> key,
int32_t hash);
+ static Handle<JSArray> GetEntries(Handle<JSWeakCollection> holder,
+ int max_entries);
static const int kTableOffset = JSObject::kHeaderSize;
static const int kNextOffset = kTableOffset + kPointerSize;
@@ -10804,10 +10711,12 @@ class JSArrayBuffer: public JSObject {
void* data, size_t allocated_length,
SharedFlag shared = SharedFlag::kNotShared);
- static bool SetupAllocatingData(Handle<JSArrayBuffer> array_buffer,
- Isolate* isolate, size_t allocated_length,
- bool initialize = true,
- SharedFlag shared = SharedFlag::kNotShared);
+ // Returns false if array buffer contents could not be allocated.
+ // In this case, |array_buffer| will not be set up.
+ static bool SetupAllocatingData(
+ Handle<JSArrayBuffer> array_buffer, Isolate* isolate,
+ size_t allocated_length, bool initialize = true,
+ SharedFlag shared = SharedFlag::kNotShared) WARN_UNUSED_RESULT;
// Dispatched behavior.
DECLARE_PRINTER(JSArrayBuffer)
@@ -10891,6 +10800,10 @@ class JSTypedArray: public JSArrayBufferView {
Handle<JSArrayBuffer> GetBuffer();
+ static inline MaybeHandle<JSTypedArray> Validate(Isolate* isolate,
+ Handle<Object> receiver,
+ const char* method_name);
+
// Dispatched behavior.
DECLARE_PRINTER(JSTypedArray)
DECLARE_VERIFIER(JSTypedArray)
@@ -11446,6 +11359,9 @@ class DebugInfo: public Struct {
// The shared function info for the source being debugged.
DECL_ACCESSORS(shared, SharedFunctionInfo)
+ // Bit field containing various information collected for debugging.
+ DECL_INT_ACCESSORS(debugger_hints)
+
DECL_ACCESSORS(debug_bytecode_array, Object)
// Fixed array holding status information for each active break point.
DECL_ACCESSORS(break_points, FixedArray)
@@ -11466,8 +11382,6 @@ class DebugInfo: public Struct {
// Get the number of break points for this function.
int GetBreakPointCount();
- static Smi* uninitialized() { return Smi::kZero; }
-
inline bool HasDebugBytecodeArray();
inline bool HasDebugCode();
@@ -11482,8 +11396,10 @@ class DebugInfo: public Struct {
DECLARE_VERIFIER(DebugInfo)
static const int kSharedFunctionInfoIndex = Struct::kHeaderSize;
- static const int kDebugBytecodeArrayIndex =
+ static const int kDebuggerHintsIndex =
kSharedFunctionInfoIndex + kPointerSize;
+ static const int kDebugBytecodeArrayIndex =
+ kDebuggerHintsIndex + kPointerSize;
static const int kBreakPointsStateIndex =
kDebugBytecodeArrayIndex + kPointerSize;
static const int kSize = kBreakPointsStateIndex + kPointerSize;
diff --git a/deps/v8/src/objects/literal-objects.cc b/deps/v8/src/objects/literal-objects.cc
new file mode 100644
index 0000000000..551b03621e
--- /dev/null
+++ b/deps/v8/src/objects/literal-objects.cc
@@ -0,0 +1,55 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/literal-objects.h"
+
+#include "src/factory.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+Object* BoilerplateDescription::name(int index) const {
+ // get() already checks for out of bounds access, but we do not want to allow
+ // access to the last element, if it is the number of properties.
+ DCHECK_NE(size(), index);
+ return get(2 * index);
+}
+
+Object* BoilerplateDescription::value(int index) const {
+ return get(2 * index + 1);
+}
+
+int BoilerplateDescription::size() const {
+ DCHECK_EQ(0, (length() - (this->has_number_of_properties() ? 1 : 0)) % 2);
+ // Rounding is intended.
+ return length() / 2;
+}
+
+int BoilerplateDescription::backing_store_size() const {
+ if (has_number_of_properties()) {
+ // If present, the last entry contains the number of properties.
+ return Smi::cast(this->get(length() - 1))->value();
+ }
+ // If the number is not given explicitly, we assume there are no
+ // properties with computed names.
+ return size();
+}
+
+void BoilerplateDescription::set_backing_store_size(Isolate* isolate,
+ int backing_store_size) {
+ DCHECK(has_number_of_properties());
+ DCHECK_NE(size(), backing_store_size);
+ Handle<Object> backing_store_size_obj =
+ isolate->factory()->NewNumberFromInt(backing_store_size);
+ set(length() - 1, *backing_store_size_obj);
+}
+
+bool BoilerplateDescription::has_number_of_properties() const {
+ return length() % 2 != 0;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/literal-objects.h b/deps/v8/src/objects/literal-objects.h
new file mode 100644
index 0000000000..fdd321e74e
--- /dev/null
+++ b/deps/v8/src/objects/literal-objects.h
@@ -0,0 +1,67 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_LITERAL_OBJECTS_H_
+#define V8_OBJECTS_LITERAL_OBJECTS_H_
+
+#include "src/objects.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+// BoilerplateDescription is a list of properties consisting of name value
+// pairs. In addition to the properties, it provides the projected number
+// of properties in the backing store. This number includes properties with
+// computed names that are not
+// in the list.
+class BoilerplateDescription : public FixedArray {
+ public:
+ Object* name(int index) const;
+ Object* value(int index) const;
+
+ // The number of boilerplate properties.
+ int size() const;
+
+ // Number of boilerplate properties and properties with computed names.
+ int backing_store_size() const;
+
+ void set_backing_store_size(Isolate* isolate, int backing_store_size);
+
+ DECLARE_CAST(BoilerplateDescription)
+
+ private:
+ bool has_number_of_properties() const;
+};
+
+// Pair of {ElementsKind} and an array of constant values for {ArrayLiteral}
+// expressions. Used to communicate with the runtime for literal boilerplate
+// creation within the {Runtime_CreateArrayLiteral} method.
+class ConstantElementsPair : public Struct {
+ public:
+ DECL_INT_ACCESSORS(elements_kind)
+ DECL_ACCESSORS(constant_values, FixedArrayBase)
+
+ DECLARE_CAST(ConstantElementsPair)
+
+ // Dispatched behavior.
+ DECLARE_PRINTER(ConstantElementsPair)
+ DECLARE_VERIFIER(ConstantElementsPair)
+
+ static const int kElementsKindOffset = HeapObject::kHeaderSize;
+ static const int kConstantValuesOffset = kElementsKindOffset + kPointerSize;
+ static const int kSize = kConstantValuesOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ConstantElementsPair);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_LITERAL_OBJECTS_H_
diff --git a/deps/v8/src/objects/regexp-match-info.h b/deps/v8/src/objects/regexp-match-info.h
new file mode 100644
index 0000000000..327ded3247
--- /dev/null
+++ b/deps/v8/src/objects/regexp-match-info.h
@@ -0,0 +1,76 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_REGEXP_MATCH_INFO_H_
+#define V8_OBJECTS_REGEXP_MATCH_INFO_H_
+
+#include "src/base/compiler-specific.h"
+#include "src/objects.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+class Object;
+class String;
+
+// The property RegExpMatchInfo includes the matchIndices
+// array of the last successful regexp match (an array of start/end index
+// pairs for the match and all the captured substrings), the invariant is
+// that there are at least two capture indices. The array also contains
+// the subject string for the last successful match.
+// After creation the result must be treated as a FixedArray in all regards.
+class V8_EXPORT_PRIVATE RegExpMatchInfo : NON_EXPORTED_BASE(public FixedArray) {
+ public:
+ // Returns the number of captures, which is defined as the length of the
+ // matchIndices objects of the last match. matchIndices contains two indices
+ // for each capture (including the match itself), i.e. 2 * #captures + 2.
+ inline int NumberOfCaptureRegisters();
+ inline void SetNumberOfCaptureRegisters(int value);
+
+ // Returns the subject string of the last match.
+ inline String* LastSubject();
+ inline void SetLastSubject(String* value);
+
+ // Like LastSubject, but modifiable by the user.
+ inline Object* LastInput();
+ inline void SetLastInput(Object* value);
+
+ // Returns the i'th capture index, 0 <= i < NumberOfCaptures(). Capture(0) and
+ // Capture(1) determine the start- and endpoint of the match itself.
+ inline int Capture(int i);
+ inline void SetCapture(int i, int value);
+
+ // Reserves space for captures.
+ static Handle<RegExpMatchInfo> ReserveCaptures(
+ Handle<RegExpMatchInfo> match_info, int capture_count);
+
+ DECLARE_CAST(RegExpMatchInfo)
+
+ static const int kNumberOfCapturesIndex = 0;
+ static const int kLastSubjectIndex = 1;
+ static const int kLastInputIndex = 2;
+ static const int kFirstCaptureIndex = 3;
+ static const int kLastMatchOverhead = kFirstCaptureIndex;
+
+ static const int kNumberOfCapturesOffset = FixedArray::kHeaderSize;
+ static const int kLastSubjectOffset = kNumberOfCapturesOffset + kPointerSize;
+ static const int kLastInputOffset = kLastSubjectOffset + kPointerSize;
+ static const int kFirstCaptureOffset = kLastInputOffset + kPointerSize;
+
+ // Every match info is guaranteed to have enough space to store two captures.
+ static const int kInitialCaptureIndices = 2;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(RegExpMatchInfo);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_REGEXP_MATCH_INFO_H_
diff --git a/deps/v8/src/objects/scope-info.h b/deps/v8/src/objects/scope-info.h
index 671734e05a..75a374d5d8 100644
--- a/deps/v8/src/objects/scope-info.h
+++ b/deps/v8/src/objects/scope-info.h
@@ -6,7 +6,6 @@
#define V8_OBJECTS_SCOPE_INFO_H_
#include "src/globals.h"
-#include "src/handles.h"
#include "src/objects.h"
#include "src/utils.h"
@@ -16,7 +15,11 @@
namespace v8 {
namespace internal {
+template <typename T>
+class Handle;
class Isolate;
+template <typename T>
+class MaybeHandle;
class Scope;
class Zone;
diff --git a/deps/v8/src/parsing/OWNERS b/deps/v8/src/parsing/OWNERS
index ee49f9c366..5f136aae42 100644
--- a/deps/v8/src/parsing/OWNERS
+++ b/deps/v8/src/parsing/OWNERS
@@ -3,6 +3,7 @@ set noparent
adamk@chromium.org
littledan@chromium.org
marja@chromium.org
+neis@chromium.org
rossberg@chromium.org
verwaest@chromium.org
vogelheim@chromium.org
diff --git a/deps/v8/src/parsing/func-name-inferrer.h b/deps/v8/src/parsing/func-name-inferrer.h
index cc9204bb6d..9eea4a8338 100644
--- a/deps/v8/src/parsing/func-name-inferrer.h
+++ b/deps/v8/src/parsing/func-name-inferrer.h
@@ -5,7 +5,6 @@
#ifndef V8_PARSING_FUNC_NAME_INFERRER_H_
#define V8_PARSING_FUNC_NAME_INFERRER_H_
-#include "src/handles.h"
#include "src/zone/zone.h"
namespace v8 {
diff --git a/deps/v8/src/parsing/parse-info.cc b/deps/v8/src/parsing/parse-info.cc
index b703d3e924..37dca662bc 100644
--- a/deps/v8/src/parsing/parse-info.cc
+++ b/deps/v8/src/parsing/parse-info.cc
@@ -4,15 +4,19 @@
#include "src/parsing/parse-info.h"
+#include "src/api.h"
#include "src/ast/ast-value-factory.h"
#include "src/ast/ast.h"
+#include "src/heap/heap-inl.h"
#include "src/objects-inl.h"
+#include "src/objects/scope-info.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
-ParseInfo::ParseInfo(Zone* zone)
- : zone_(zone),
+ParseInfo::ParseInfo(AccountingAllocator* zone_allocator)
+ : zone_(std::make_shared<Zone>(zone_allocator, ZONE_NAME)),
flags_(0),
source_stream_(nullptr),
source_stream_encoding_(ScriptCompiler::StreamedSource::ONE_BYTE),
@@ -27,16 +31,18 @@ ParseInfo::ParseInfo(Zone* zone)
compiler_hints_(0),
start_position_(0),
end_position_(0),
+ parameters_end_pos_(kNoSourcePosition),
function_literal_id_(FunctionLiteral::kIdTypeInvalid),
max_function_literal_id_(FunctionLiteral::kIdTypeInvalid),
isolate_(nullptr),
cached_data_(nullptr),
ast_value_factory_(nullptr),
function_name_(nullptr),
- literal_(nullptr) {}
+ literal_(nullptr),
+ deferred_handles_(nullptr) {}
-ParseInfo::ParseInfo(Zone* zone, Handle<SharedFunctionInfo> shared)
- : ParseInfo(zone) {
+ParseInfo::ParseInfo(Handle<SharedFunctionInfo> shared)
+ : ParseInfo(shared->GetIsolate()->allocator()) {
isolate_ = shared->GetIsolate();
set_toplevel(shared->is_toplevel());
@@ -66,7 +72,14 @@ ParseInfo::ParseInfo(Zone* zone, Handle<SharedFunctionInfo> shared)
}
}
-ParseInfo::ParseInfo(Zone* zone, Handle<Script> script) : ParseInfo(zone) {
+ParseInfo::ParseInfo(Handle<SharedFunctionInfo> shared,
+ std::shared_ptr<Zone> zone)
+ : ParseInfo(shared) {
+ zone_.swap(zone);
+}
+
+ParseInfo::ParseInfo(Handle<Script> script)
+ : ParseInfo(script->GetIsolate()->allocator()) {
isolate_ = script->GetIsolate();
set_allow_lazy_parsing();
@@ -88,6 +101,46 @@ ParseInfo::~ParseInfo() {
ast_value_factory_ = nullptr;
}
+// static
+ParseInfo* ParseInfo::AllocateWithoutScript(Handle<SharedFunctionInfo> shared) {
+ Isolate* isolate = shared->GetIsolate();
+ ParseInfo* p = new ParseInfo(isolate->allocator());
+ p->isolate_ = isolate;
+
+ p->set_toplevel(shared->is_toplevel());
+ p->set_allow_lazy_parsing(FLAG_lazy_inner_functions);
+ p->set_hash_seed(isolate->heap()->HashSeed());
+ p->set_is_named_expression(shared->is_named_expression());
+ p->set_calls_eval(shared->scope_info()->CallsEval());
+ p->set_compiler_hints(shared->compiler_hints());
+ p->set_start_position(shared->start_position());
+ p->set_end_position(shared->end_position());
+ p->function_literal_id_ = shared->function_literal_id();
+ p->set_stack_limit(isolate->stack_guard()->real_climit());
+ p->set_unicode_cache(isolate->unicode_cache());
+ p->set_language_mode(shared->language_mode());
+ p->set_shared_info(shared);
+ p->set_module(shared->kind() == FunctionKind::kModule);
+
+ // BUG(5946): This function exists as a workaround until we can
+ // get rid of %SetCode in our native functions. The ParseInfo
+ // is explicitly set up for the case that:
+ // a) you have a native built-in,
+ // b) it's being run for the 2nd-Nth time in an isolate,
+ // c) we've already compiled bytecode and therefore don't need
+ // to parse.
+ // We tolerate a ParseInfo without a Script in this case.
+ p->set_native(true);
+ p->set_eval(false);
+
+ Handle<HeapObject> scope_info(shared->outer_scope_info());
+ if (!scope_info->IsTheHole(isolate) &&
+ Handle<ScopeInfo>::cast(scope_info)->length() > 0) {
+ p->set_outer_scope_info(Handle<ScopeInfo>::cast(scope_info));
+ }
+ return p;
+}
+
DeclarationScope* ParseInfo::scope() const { return literal()->scope(); }
bool ParseInfo::is_declaration() const {
@@ -98,6 +151,17 @@ FunctionKind ParseInfo::function_kind() const {
return SharedFunctionInfo::FunctionKindBits::decode(compiler_hints_);
}
+void ParseInfo::set_deferred_handles(
+ std::shared_ptr<DeferredHandles> deferred_handles) {
+ DCHECK(deferred_handles_.get() == nullptr);
+ deferred_handles_.swap(deferred_handles);
+}
+
+void ParseInfo::set_deferred_handles(DeferredHandles* deferred_handles) {
+ DCHECK(deferred_handles_.get() == nullptr);
+ deferred_handles_.reset(deferred_handles);
+}
+
#ifdef DEBUG
bool ParseInfo::script_is_native() const {
return script_->type() == Script::TYPE_NATIVE;
diff --git a/deps/v8/src/parsing/parse-info.h b/deps/v8/src/parsing/parse-info.h
index 87052a5a7e..4828690609 100644
--- a/deps/v8/src/parsing/parse-info.h
+++ b/deps/v8/src/parsing/parse-info.h
@@ -5,10 +5,12 @@
#ifndef V8_PARSING_PARSE_INFO_H_
#define V8_PARSING_PARSE_INFO_H_
+#include <memory>
+
#include "include/v8.h"
#include "src/globals.h"
#include "src/handles.h"
-#include "src/objects/scope-info.h"
+#include "src/parsing/preparsed-scope-data.h"
namespace v8 {
@@ -16,9 +18,11 @@ class Extension;
namespace internal {
+class AccountingAllocator;
class AstRawString;
class AstValueFactory;
class DeclarationScope;
+class DeferredHandles;
class FunctionLiteral;
class ScriptData;
class SharedFunctionInfo;
@@ -29,13 +33,26 @@ class Zone;
// A container for the inputs, configuration options, and outputs of parsing.
class V8_EXPORT_PRIVATE ParseInfo {
public:
- explicit ParseInfo(Zone* zone);
- ParseInfo(Zone* zone, Handle<Script> script);
- ParseInfo(Zone* zone, Handle<SharedFunctionInfo> shared);
+ explicit ParseInfo(AccountingAllocator* zone_allocator);
+ ParseInfo(Handle<Script> script);
+ ParseInfo(Handle<SharedFunctionInfo> shared);
+
+ // TODO(rmcilroy): Remove once Hydrogen no longer needs this.
+ ParseInfo(Handle<SharedFunctionInfo> shared, std::shared_ptr<Zone> zone);
~ParseInfo();
- Zone* zone() const { return zone_; }
+ static ParseInfo* AllocateWithoutScript(Handle<SharedFunctionInfo> shared);
+
+ Zone* zone() const { return zone_.get(); }
+
+ std::shared_ptr<Zone> zone_shared() const { return zone_; }
+
+ void set_deferred_handles(std::shared_ptr<DeferredHandles> deferred_handles);
+ void set_deferred_handles(DeferredHandles* deferred_handles);
+ std::shared_ptr<DeferredHandles> deferred_handles() const {
+ return deferred_handles_;
+ }
// Convenience accessor methods for flags.
#define FLAG_ACCESSOR(flag, getter, setter) \
@@ -94,6 +111,8 @@ class V8_EXPORT_PRIVATE ParseInfo {
ScriptData** cached_data() const { return cached_data_; }
void set_cached_data(ScriptData** cached_data) { cached_data_ = cached_data; }
+ PreParsedScopeData* preparsed_scope_data() { return &preparsed_scope_data_; }
+
ScriptCompiler::CompileOptions compile_options() const {
return compile_options_;
}
@@ -150,6 +169,11 @@ class V8_EXPORT_PRIVATE ParseInfo {
int end_position() const { return end_position_; }
void set_end_position(int end_position) { end_position_ = end_position; }
+ int parameters_end_pos() const { return parameters_end_pos_; }
+ void set_parameters_end_pos(int parameters_end_pos) {
+ parameters_end_pos_ = parameters_end_pos;
+ }
+
int function_literal_id() const { return function_literal_id_; }
void set_function_literal_id(int function_literal_id) {
function_literal_id_ = function_literal_id;
@@ -191,8 +215,12 @@ class V8_EXPORT_PRIVATE ParseInfo {
}
void ReopenHandlesInNewHandleScope() {
- shared_ = Handle<SharedFunctionInfo>(*shared_);
- script_ = Handle<Script>(*script_);
+ if (!script_.is_null()) {
+ script_ = Handle<Script>(*script_);
+ }
+ if (!shared_.is_null()) {
+ shared_ = Handle<SharedFunctionInfo>(*shared_);
+ }
Handle<ScopeInfo> outer_scope_info;
if (maybe_outer_scope_info_.ToHandle(&outer_scope_info)) {
maybe_outer_scope_info_ = Handle<ScopeInfo>(*outer_scope_info);
@@ -224,7 +252,7 @@ class V8_EXPORT_PRIVATE ParseInfo {
};
//------------- Inputs to parsing and scope analysis -----------------------
- Zone* zone_;
+ std::shared_ptr<Zone> zone_;
unsigned flags_;
ScriptCompiler::ExternalSourceStream* source_stream_;
ScriptCompiler::StreamedSource::Encoding source_stream_encoding_;
@@ -239,6 +267,7 @@ class V8_EXPORT_PRIVATE ParseInfo {
int compiler_hints_;
int start_position_;
int end_position_;
+ int parameters_end_pos_;
int function_literal_id_;
int max_function_literal_id_;
@@ -250,11 +279,13 @@ class V8_EXPORT_PRIVATE ParseInfo {
//----------- Inputs+Outputs of parsing and scope analysis -----------------
ScriptData** cached_data_; // used if available, populated if requested.
+ PreParsedScopeData preparsed_scope_data_;
AstValueFactory* ast_value_factory_; // used if available, otherwise new.
const AstRawString* function_name_;
//----------- Output of parsing and scope analysis ------------------------
FunctionLiteral* literal_;
+ std::shared_ptr<DeferredHandles> deferred_handles_;
void SetFlag(Flag f) { flags_ |= f; }
void SetFlag(Flag f, bool v) { flags_ = v ? flags_ | f : flags_ & ~f; }
diff --git a/deps/v8/src/parsing/parser-base.h b/deps/v8/src/parsing/parser-base.h
index dba263390e..cf56c53a8e 100644
--- a/deps/v8/src/parsing/parser-base.h
+++ b/deps/v8/src/parsing/parser-base.h
@@ -101,6 +101,12 @@ struct FormalParametersBase {
// Used in functions where the return type is ExpressionT.
#define CHECK_OK CHECK_OK_CUSTOM(EmptyExpression)
+#define CHECK_OK_VOID ok); \
+ if (!*ok) return; \
+ ((void)0
+#define DUMMY ) // to make indentation work
+#undef DUMMY
+
// Common base class template shared between parser and pre-parser.
// The Impl parameter is the actual class of the parser/pre-parser,
// following the Curiously Recurring Template Pattern (CRTP).
@@ -195,7 +201,7 @@ class ParserBase {
v8::Extension* extension, AstValueFactory* ast_value_factory,
RuntimeCallStats* runtime_call_stats,
bool parsing_on_main_thread = true)
- : scope_state_(nullptr),
+ : scope_(nullptr),
function_state_(nullptr),
extension_(extension),
fni_(nullptr),
@@ -218,7 +224,10 @@ class ParserBase {
allow_harmony_restrictive_generators_(false),
allow_harmony_trailing_commas_(false),
allow_harmony_class_fields_(false),
- allow_harmony_object_spread_(false) {}
+ allow_harmony_object_rest_spread_(false),
+ allow_harmony_dynamic_import_(false),
+ allow_harmony_async_iteration_(false),
+ allow_harmony_template_escapes_(false) {}
#define ALLOW_ACCESSORS(name) \
bool allow_##name() const { return allow_##name##_; } \
@@ -231,7 +240,10 @@ class ParserBase {
ALLOW_ACCESSORS(harmony_restrictive_generators);
ALLOW_ACCESSORS(harmony_trailing_commas);
ALLOW_ACCESSORS(harmony_class_fields);
- ALLOW_ACCESSORS(harmony_object_spread);
+ ALLOW_ACCESSORS(harmony_object_rest_spread);
+ ALLOW_ACCESSORS(harmony_dynamic_import);
+ ALLOW_ACCESSORS(harmony_async_iteration);
+ ALLOW_ACCESSORS(harmony_template_escapes);
#undef ALLOW_ACCESSORS
@@ -280,57 +292,26 @@ class ParserBase {
class ObjectLiteralChecker;
// ---------------------------------------------------------------------------
- // ScopeState and its subclasses implement the parser's scope stack.
- // ScopeState keeps track of the current scope, and the outer ScopeState. The
- // parser's scope_state_ points to the top ScopeState. ScopeState's
- // constructor push on the scope stack and the destructors pop. BlockState and
- // FunctionState are used to hold additional per-block and per-function state.
- class ScopeState BASE_EMBEDDED {
+ // BlockState and FunctionState implement the parser's scope stack.
+ // The parser's current scope is in scope_. BlockState and FunctionState
+ // constructors push on the scope stack and the destructors pop. They are also
+ // used to hold the parser's per-funcion state.
+ class BlockState BASE_EMBEDDED {
public:
- V8_INLINE Scope* scope() const { return scope_; }
- Zone* zone() const { return scope_->zone(); }
-
- protected:
- ScopeState(ScopeState** scope_stack, Scope* scope)
- : scope_stack_(scope_stack), outer_scope_(*scope_stack), scope_(scope) {
- *scope_stack = this;
+ BlockState(Scope** scope_stack, Scope* scope)
+ : scope_stack_(scope_stack), outer_scope_(*scope_stack) {
+ *scope_stack_ = scope;
}
- ~ScopeState() { *scope_stack_ = outer_scope_; }
- private:
- ScopeState** const scope_stack_;
- ScopeState* const outer_scope_;
- Scope* const scope_;
- };
-
- class BlockState final : public ScopeState {
- public:
- BlockState(ScopeState** scope_stack, Scope* scope)
- : ScopeState(scope_stack, scope) {}
+ BlockState(Zone* zone, Scope** scope_stack)
+ : BlockState(scope_stack,
+ new (zone) Scope(zone, *scope_stack, BLOCK_SCOPE)) {}
- // BlockState(ScopeState**) automatically manages Scope(BLOCK_SCOPE)
- // allocation.
- // TODO(verwaest): Move to LazyBlockState class that only allocates the
- // scope when needed.
- explicit BlockState(Zone* zone, ScopeState** scope_stack)
- : ScopeState(scope_stack, NewScope(zone, *scope_stack)) {}
-
- void SetNonlinear() { this->scope()->SetNonlinear(); }
- void set_start_position(int pos) { this->scope()->set_start_position(pos); }
- void set_end_position(int pos) { this->scope()->set_end_position(pos); }
- void set_is_hidden() { this->scope()->set_is_hidden(); }
- Scope* FinalizedBlockScope() const {
- return this->scope()->FinalizeBlockScope();
- }
- LanguageMode language_mode() const {
- return this->scope()->language_mode();
- }
+ ~BlockState() { *scope_stack_ = outer_scope_; }
private:
- Scope* NewScope(Zone* zone, ScopeState* outer_state) {
- Scope* parent = outer_state->scope();
- return new (zone) Scope(zone, parent, BLOCK_SCOPE);
- }
+ Scope** const scope_stack_;
+ Scope* const outer_scope_;
};
struct DestructuringAssignment {
@@ -392,26 +373,13 @@ class ParserBase {
kInsideForInOfBody,
};
- class FunctionState final : public ScopeState {
+ class FunctionState final : public BlockState {
public:
- FunctionState(FunctionState** function_state_stack,
- ScopeState** scope_stack, DeclarationScope* scope);
+ FunctionState(FunctionState** function_state_stack, Scope** scope_stack,
+ DeclarationScope* scope);
~FunctionState();
- DeclarationScope* scope() const {
- return ScopeState::scope()->AsDeclarationScope();
- }
-
- int NextMaterializedLiteralIndex() {
- return next_materialized_literal_index_++;
- }
- int materialized_literal_count() {
- return next_materialized_literal_index_;
- }
-
- void SkipMaterializedLiterals(int count) {
- next_materialized_literal_index_ += count;
- }
+ DeclarationScope* scope() const { return scope_->AsDeclarationScope(); }
void AddProperty() { expected_property_count_++; }
int expected_property_count() { return expected_property_count_; }
@@ -427,6 +395,17 @@ class ParserBase {
return scope()->promise_var();
}
+ void RewindDestructuringAssignments(int pos) {
+ destructuring_assignments_to_rewrite_.Rewind(pos);
+ }
+
+ void SetDestructuringAssignmentsScope(int pos, Scope* scope) {
+ for (int i = pos; i < destructuring_assignments_to_rewrite_.length();
+ ++i) {
+ destructuring_assignments_to_rewrite_[i].scope = scope;
+ }
+ }
+
const ZoneList<DestructuringAssignment>&
destructuring_assignments_to_rewrite() const {
return destructuring_assignments_to_rewrite_;
@@ -471,11 +450,11 @@ class ParserBase {
private:
void AddDestructuringAssignment(DestructuringAssignment pair) {
- destructuring_assignments_to_rewrite_.Add(pair, this->zone());
+ destructuring_assignments_to_rewrite_.Add(pair, scope_->zone());
}
void AddNonPatternForRewriting(ExpressionT expr, bool* ok) {
- non_patterns_to_rewrite_.Add(expr, this->zone());
+ non_patterns_to_rewrite_.Add(expr, scope_->zone());
if (non_patterns_to_rewrite_.length() >=
std::numeric_limits<uint16_t>::max())
*ok = false;
@@ -489,16 +468,9 @@ class ParserBase {
// Properties count estimation.
int expected_property_count_;
- // For generators, this variable may hold the generator object. It variable
- // is used by yield expressions and return statements. It is not necessary
- // for generator functions to have this variable set.
- Variable* generator_object_variable_;
- // For async functions, this variable holds a temporary for the Promise
- // being created as output of the async function.
- Variable* promise_variable_;
-
FunctionState** function_state_stack_;
FunctionState* outer_function_state_;
+ DeclarationScope* scope_;
ZoneList<DestructuringAssignment> destructuring_assignments_to_rewrite_;
TailCallExpressionList tail_call_expressions_;
@@ -592,7 +564,6 @@ class ParserBase {
struct DeclarationDescriptor {
enum Kind { NORMAL, PARAMETER };
Scope* scope;
- Scope* hoist_scope;
VariableMode mode;
int declaration_pos;
int initialization_pos;
@@ -812,6 +783,7 @@ class ParserBase {
bool is_any_identifier(Token::Value token) {
return token == Token::IDENTIFIER || token == Token::ENUM ||
token == Token::AWAIT || token == Token::ASYNC ||
+ token == Token::ESCAPED_STRICT_RESERVED_WORD ||
token == Token::FUTURE_STRICT_RESERVED_WORD || token == Token::LET ||
token == Token::STATIC || token == Token::YIELD;
}
@@ -857,14 +829,12 @@ class ParserBase {
}
// Checks whether an octal literal was last seen between beg_pos and end_pos.
- // If so, reports an error. Only called for strict mode and template strings.
- void CheckOctalLiteral(int beg_pos, int end_pos, bool is_template, bool* ok) {
+ // Only called for strict mode strings.
+ void CheckStrictOctalLiteral(int beg_pos, int end_pos, bool* ok) {
Scanner::Location octal = scanner()->octal_position();
if (octal.IsValid() && beg_pos <= octal.beg_pos &&
octal.end_pos <= end_pos) {
- MessageTemplate::Template message =
- is_template ? MessageTemplate::kTemplateOctalLiteral
- : scanner()->octal_message();
+ MessageTemplate::Template message = scanner()->octal_message();
DCHECK_NE(message, MessageTemplate::kNone);
impl()->ReportMessageAt(octal, message);
scanner()->clear_octal_position();
@@ -875,12 +845,23 @@ class ParserBase {
}
}
- inline void CheckStrictOctalLiteral(int beg_pos, int end_pos, bool* ok) {
- CheckOctalLiteral(beg_pos, end_pos, false, ok);
- }
+ // Checks if an octal literal or an invalid hex or unicode escape sequence
+ // appears in a template literal. In the presence of such, either
+ // returns false or reports an error, depending on should_throw. Otherwise
+ // returns true.
+ inline bool CheckTemplateEscapes(bool should_throw, bool* ok) {
+ if (!scanner()->has_invalid_template_escape()) {
+ return true;
+ }
- inline void CheckTemplateOctalLiteral(int beg_pos, int end_pos, bool* ok) {
- CheckOctalLiteral(beg_pos, end_pos, true, ok);
+ // Handle error case(s)
+ if (should_throw) {
+ impl()->ReportMessageAt(scanner()->invalid_template_escape_location(),
+ scanner()->invalid_template_escape_message());
+ *ok = false;
+ }
+ scanner()->clear_invalid_template_escape();
+ return false;
}
void CheckDestructuringElement(ExpressionT element, int beg_pos, int end_pos);
@@ -1054,14 +1035,6 @@ class ParserBase {
}
}
- void ExpressionUnexpectedToken() {
- MessageTemplate::Template message = MessageTemplate::kUnexpectedToken;
- const char* arg;
- Scanner::Location location = scanner()->peek_location();
- GetUnexpectedTokenMessage(peek(), &message, &location, &arg);
- classifier()->RecordExpressionError(location, message, arg);
- }
-
void BindingPatternUnexpectedToken() {
MessageTemplate::Template message = MessageTemplate::kUnexpectedToken;
const char* arg;
@@ -1156,7 +1129,8 @@ class ParserBase {
FunctionLiteralT ParseClassFieldForInitializer(bool has_initializer,
bool* ok);
ObjectLiteralPropertyT ParseObjectPropertyDefinition(
- ObjectLiteralChecker* checker, bool* is_computed_name, bool* ok);
+ ObjectLiteralChecker* checker, bool* is_computed_name,
+ bool* is_rest_property, bool* ok);
ExpressionListT ParseArguments(Scanner::Location* first_spread_pos,
bool maybe_arrow, bool* ok);
ExpressionListT ParseArguments(Scanner::Location* first_spread_pos,
@@ -1175,9 +1149,14 @@ class ParserBase {
ExpressionT ParseMemberExpression(bool* is_async, bool* ok);
ExpressionT ParseMemberExpressionContinuation(ExpressionT expression,
bool* is_async, bool* ok);
+
+ // `rewritable_length`: length of the destructuring_assignments_to_rewrite()
+ // queue in the parent function state, prior to parsing of formal parameters.
+ // If the arrow function is lazy, any items added during formal parameter
+ // parsing are removed from the queue.
ExpressionT ParseArrowFunctionLiteral(bool accept_IN,
const FormalParametersT& parameters,
- bool* ok);
+ int rewritable_length, bool* ok);
void ParseAsyncFunctionBody(Scope* scope, StatementListT body,
FunctionKind kind, FunctionBodyType type,
bool accept_IN, int pos, bool* ok);
@@ -1186,8 +1165,10 @@ class ParserBase {
Scanner::Location class_name_location,
bool name_is_strict_reserved,
int class_token_pos, bool* ok);
- ExpressionT ParseTemplateLiteral(ExpressionT tag, int start, bool* ok);
+ ExpressionT ParseTemplateLiteral(ExpressionT tag, int start, bool tagged,
+ bool* ok);
ExpressionT ParseSuperExpression(bool is_new, bool* ok);
+ ExpressionT ParseDynamicImportExpression(bool* ok);
ExpressionT ParseNewTargetExpression(bool* ok);
void ParseFormalParameter(FormalParametersT* parameters, bool* ok);
@@ -1212,6 +1193,12 @@ class ParserBase {
bool default_export, bool* ok);
StatementT ParseNativeDeclaration(bool* ok);
+ // Consumes the ending }.
+ void ParseFunctionBody(StatementListT result, IdentifierT function_name,
+ int pos, const FormalParametersT& parameters,
+ FunctionKind kind,
+ FunctionLiteral::FunctionType function_type, bool* ok);
+
// Under some circumstances, we allow preparsing to abort if the preparsed
// function is "long and trivial", and fully parse instead. Our current
// definition of "long and trivial" is:
@@ -1232,6 +1219,9 @@ class ParserBase {
LazyParsingResult ParseStatementList(StatementListT body, int end_token,
bool may_abort, bool* ok);
StatementT ParseStatementListItem(bool* ok);
+ StatementT ParseStatement(ZoneList<const AstRawString*>* labels, bool* ok) {
+ return ParseStatement(labels, kDisallowLabelledFunctionStatement, ok);
+ }
StatementT ParseStatement(ZoneList<const AstRawString*>* labels,
AllowLabelledFunctionStatement allow_function,
bool* ok);
@@ -1242,11 +1232,8 @@ class ParserBase {
// Parse a SubStatement in strict mode, or with an extra block scope in
// sloppy mode to handle
// ES#sec-functiondeclarations-in-ifstatement-statement-clauses
- // The legacy parameter indicates whether function declarations are
- // banned by the ES2015 specification in this location, and they are being
- // permitted here to match previous V8 behavior.
StatementT ParseScopedStatement(ZoneList<const AstRawString*>* labels,
- bool legacy, bool* ok);
+ bool* ok);
StatementT ParseVariableStatement(VariableDeclarationContext var_context,
ZoneList<const AstRawString*>* names,
@@ -1278,6 +1265,21 @@ class ParserBase {
bool* ok);
StatementT ParseTryStatement(bool* ok);
StatementT ParseForStatement(ZoneList<const AstRawString*>* labels, bool* ok);
+ StatementT ParseForEachStatementWithDeclarations(
+ int stmt_pos, ForInfo* for_info, ZoneList<const AstRawString*>* labels,
+ bool* ok);
+ StatementT ParseForEachStatementWithoutDeclarations(
+ int stmt_pos, ExpressionT expression, int lhs_beg_pos, int lhs_end_pos,
+ ForInfo* for_info, ZoneList<const AstRawString*>* labels, bool* ok);
+
+ // Parse a C-style for loop: 'for (<init>; <cond>; <step>) { ... }'
+ StatementT ParseStandardForLoop(int stmt_pos, StatementT init,
+ bool bound_names_are_lexical,
+ ForInfo* for_info, BlockState* for_state,
+ ZoneList<const AstRawString*>* labels,
+ bool* ok);
+ StatementT ParseForAwaitStatement(ZoneList<const AstRawString*>* labels,
+ bool* ok);
bool IsNextLetKeyword();
bool IsTrivialExpression();
@@ -1307,6 +1309,24 @@ class ParserBase {
return expression->IsObjectLiteral() || expression->IsArrayLiteral();
}
+ // Due to hoisting, the value of a 'var'-declared variable may actually change
+ // even if the code contains only the "initial" assignment, namely when that
+ // assignment occurs inside a loop. For example:
+ //
+ // let i = 10;
+ // do { var x = i } while (i--):
+ //
+ // As a simple and very conservative approximation of this, we explicitly mark
+ // as maybe-assigned any non-lexical variable whose initializing "declaration"
+ // does not syntactically occur in the function scope. (In the example above,
+ // it occurs in a block scope.)
+ //
+ // Note that non-lexical variables include temporaries, which may also get
+ // assigned inside a loop due to the various rewritings that the parser
+ // performs.
+ //
+ static void MarkLoopVariableAsAssigned(Scope* scope, Variable* var);
+
// Keep track of eval() calls since they disable all local variable
// optimizations. This checks if expression is an eval call, and if yes,
// forwards the information to scope.
@@ -1380,7 +1400,7 @@ class ParserBase {
ModuleDescriptor* module() const {
return scope()->AsModuleScope()->module();
}
- Scope* scope() const { return scope_state_->scope(); }
+ Scope* scope() const { return scope_; }
// Stack of expression classifiers.
// The top of the stack is always pointed to by classifier().
@@ -1423,7 +1443,7 @@ class ParserBase {
// Parser base's protected field members.
- ScopeState* scope_state_; // Scope stack.
+ Scope* scope_; // Scope stack.
FunctionState* function_state_; // Function state stack.
v8::Extension* extension_;
FuncNameInferrer* fni_;
@@ -1454,22 +1474,24 @@ class ParserBase {
bool allow_harmony_restrictive_generators_;
bool allow_harmony_trailing_commas_;
bool allow_harmony_class_fields_;
- bool allow_harmony_object_spread_;
+ bool allow_harmony_object_rest_spread_;
+ bool allow_harmony_dynamic_import_;
+ bool allow_harmony_async_iteration_;
+ bool allow_harmony_template_escapes_;
friend class DiscardableZoneScope;
};
template <typename Impl>
ParserBase<Impl>::FunctionState::FunctionState(
- FunctionState** function_state_stack, ScopeState** scope_stack,
+ FunctionState** function_state_stack, Scope** scope_stack,
DeclarationScope* scope)
- : ScopeState(scope_stack, scope),
+ : BlockState(scope_stack, scope),
next_materialized_literal_index_(0),
expected_property_count_(0),
- generator_object_variable_(nullptr),
- promise_variable_(nullptr),
function_state_stack_(function_state_stack),
outer_function_state_(*function_state_stack),
+ scope_(scope),
destructuring_assignments_to_rewrite_(16, scope->zone()),
tail_call_expressions_(scope->zone()),
return_expr_context_(ReturnExprContext::kInsideValidBlock),
@@ -1643,7 +1665,8 @@ ParserBase<Impl>::ParseIdentifierOrStrictReservedWord(
!IsAsyncFunction(function_kind)) ||
next == Token::ASYNC) {
*is_strict_reserved = false;
- } else if (next == Token::FUTURE_STRICT_RESERVED_WORD || next == Token::LET ||
+ } else if (next == Token::ESCAPED_STRICT_RESERVED_WORD ||
+ next == Token::FUTURE_STRICT_RESERVED_WORD || next == Token::LET ||
next == Token::STATIC ||
(next == Token::YIELD && !IsGeneratorFunction(function_kind))) {
*is_strict_reserved = true;
@@ -1685,8 +1708,6 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseRegExpLiteral(
return impl()->EmptyExpression();
}
- int literal_index = function_state_->NextMaterializedLiteralIndex();
-
IdentifierT js_pattern = impl()->GetNextSymbol();
Maybe<RegExp::Flags> flags = scanner()->ScanRegExpFlags();
if (flags.IsNothing()) {
@@ -1697,7 +1718,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseRegExpLiteral(
}
int js_flags = flags.FromJust();
Next();
- return factory()->NewRegExpLiteral(js_pattern, js_flags, literal_index, pos);
+ return factory()->NewRegExpLiteral(js_pattern, js_flags, pos);
}
template <typename Impl>
@@ -1797,7 +1818,8 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePrimaryExpression(
}
// Heuristically try to detect immediately called functions before
// seeing the call parentheses.
- if (peek() == Token::FUNCTION) {
+ if (peek() == Token::FUNCTION ||
+ (peek() == Token::ASYNC && PeekAhead() == Token::FUNCTION)) {
function_state_->set_next_function_is_likely_called();
}
ExpressionT expr = ParseExpressionCoverGrammar(true, CHECK_OK);
@@ -1824,7 +1846,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePrimaryExpression(
case Token::TEMPLATE_SPAN:
case Token::TEMPLATE_TAIL:
BindingPatternUnexpectedToken();
- return ParseTemplateLiteral(impl()->NoTemplateTag(), beg_pos, ok);
+ return ParseTemplateLiteral(impl()->NoTemplateTag(), beg_pos, false, ok);
case Token::MOD:
if (allow_natives() || extension_ != NULL) {
@@ -1974,11 +1996,8 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseArrayLiteral(
}
Expect(Token::RBRACK, CHECK_OK);
- // Update the scope information before the pre-parsing bailout.
- int literal_index = function_state_->NextMaterializedLiteralIndex();
-
- ExpressionT result = factory()->NewArrayLiteral(values, first_spread_index,
- literal_index, pos);
+ ExpressionT result =
+ factory()->NewArrayLiteral(values, first_spread_index, pos);
if (first_spread_index >= 0) {
result = factory()->NewRewritableExpression(result);
impl()->QueueNonPatternForRewriting(result, ok);
@@ -2108,18 +2127,25 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePropertyName(
}
case Token::ELLIPSIS:
- if (allow_harmony_object_spread()) {
- // TODO(gsathya): Implement destructuring/rest
- classifier()->RecordPatternError(scanner()->location(),
- MessageTemplate::kUnexpectedToken);
-
+ if (allow_harmony_object_rest_spread()) {
*name = impl()->EmptyIdentifier();
Consume(Token::ELLIPSIS);
- ExpressionClassifier spread_classifier(this);
expression = ParseAssignmentExpression(true, CHECK_OK);
- impl()->RewriteNonPattern(CHECK_OK);
- impl()->AccumulateFormalParameterContainmentErrors();
*kind = PropertyKind::kSpreadProperty;
+
+ if (expression->IsAssignment()) {
+ classifier()->RecordPatternError(
+ scanner()->location(),
+ MessageTemplate::kInvalidDestructuringTarget);
+ } else {
+ CheckDestructuringElement(expression, pos,
+ scanner()->location().end_pos);
+ }
+
+ if (peek() != Token::RBRACE) {
+ classifier()->RecordPatternError(scanner()->location(),
+ MessageTemplate::kElementAfterRest);
+ }
return expression;
}
@@ -2162,10 +2188,12 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
Token::Value name_token = peek();
+ int function_token_position = scanner()->peek_location().beg_pos;
IdentifierT name = impl()->EmptyIdentifier();
ExpressionT name_expression;
if (name_token == Token::STATIC) {
Consume(Token::STATIC);
+ function_token_position = scanner()->peek_location().beg_pos;
if (peek() == Token::LPAREN) {
kind = PropertyKind::kMethodProperty;
name = impl()->GetSymbol(); // TODO(bakkot) specialize on 'static'
@@ -2242,8 +2270,10 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
ExpressionT value = impl()->ParseFunctionLiteral(
name, scanner()->location(), kSkipFunctionNameCheck, kind,
- kNoSourcePosition, FunctionLiteral::kAccessorOrMethod,
- language_mode(), CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
+ FLAG_harmony_function_tostring ? function_token_position
+ : kNoSourcePosition,
+ FunctionLiteral::kAccessorOrMethod, language_mode(),
+ CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
*property_kind = ClassLiteralProperty::METHOD;
return factory()->NewClassLiteralProperty(name_expression, value,
@@ -2270,8 +2300,10 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
FunctionLiteralT value = impl()->ParseFunctionLiteral(
name, scanner()->location(), kSkipFunctionNameCheck, kind,
- kNoSourcePosition, FunctionLiteral::kAccessorOrMethod,
- language_mode(), CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
+ FLAG_harmony_function_tostring ? function_token_position
+ : kNoSourcePosition,
+ FunctionLiteral::kAccessorOrMethod, language_mode(),
+ CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
if (!*is_computed_name) {
impl()->AddAccessorPrefixToFunctionName(is_get, value, name);
@@ -2299,9 +2331,8 @@ ParserBase<Impl>::ParseClassFieldForInitializer(bool has_initializer,
FunctionKind kind = FunctionKind::kConciseMethod;
DeclarationScope* initializer_scope = NewFunctionScope(kind);
initializer_scope->set_start_position(scanner()->location().end_pos);
- FunctionState initializer_state(&function_state_, &scope_state_,
- initializer_scope);
- DCHECK(scope() == initializer_scope);
+ FunctionState initializer_state(&function_state_, &scope_, initializer_scope);
+ DCHECK_EQ(initializer_scope, scope());
scope()->SetLanguageMode(STRICT);
ExpressionClassifier expression_classifier(this);
ExpressionT value;
@@ -2317,7 +2348,6 @@ ParserBase<Impl>::ParseClassFieldForInitializer(bool has_initializer,
body->Add(factory()->NewReturnStatement(value, kNoSourcePosition), zone());
FunctionLiteralT function_literal = factory()->NewFunctionLiteral(
impl()->EmptyIdentifierString(), initializer_scope, body,
- initializer_state.materialized_literal_count(),
initializer_state.expected_property_count(), 0, 0,
FunctionLiteral::kNoDuplicateParameters,
FunctionLiteral::kAnonymousExpression, default_eager_compile_hint_,
@@ -2329,6 +2359,7 @@ template <typename Impl>
typename ParserBase<Impl>::ObjectLiteralPropertyT
ParserBase<Impl>::ParseObjectPropertyDefinition(ObjectLiteralChecker* checker,
bool* is_computed_name,
+ bool* is_rest_property,
bool* ok) {
bool is_get = false;
bool is_set = false;
@@ -2347,12 +2378,13 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ObjectLiteralChecker* checker,
switch (kind) {
case PropertyKind::kSpreadProperty:
- DCHECK(allow_harmony_object_spread());
+ DCHECK(allow_harmony_object_rest_spread());
DCHECK(!is_get && !is_set && !is_generator && !is_async &&
!*is_computed_name);
DCHECK(name_token == Token::ELLIPSIS);
*is_computed_name = true;
+ *is_rest_property = true;
return factory()->NewObjectLiteralProperty(
impl()->GetLiteralTheHole(kNoSourcePosition), name_expression,
@@ -2464,8 +2496,9 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ObjectLiteralChecker* checker,
ExpressionT value = impl()->ParseFunctionLiteral(
name, scanner()->location(), kSkipFunctionNameCheck, kind,
- kNoSourcePosition, FunctionLiteral::kAccessorOrMethod,
- language_mode(), CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ FLAG_harmony_function_tostring ? next_beg_pos : kNoSourcePosition,
+ FunctionLiteral::kAccessorOrMethod, language_mode(),
+ CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
return factory()->NewObjectLiteralProperty(
name_expression, value, ObjectLiteralProperty::COMPUTED,
@@ -2493,8 +2526,9 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ObjectLiteralChecker* checker,
FunctionLiteralT value = impl()->ParseFunctionLiteral(
name, scanner()->location(), kSkipFunctionNameCheck, kind,
- kNoSourcePosition, FunctionLiteral::kAccessorOrMethod,
- language_mode(), CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ FLAG_harmony_function_tostring ? next_beg_pos : kNoSourcePosition,
+ FunctionLiteral::kAccessorOrMethod, language_mode(),
+ CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
if (!*is_computed_name) {
impl()->AddAccessorPrefixToFunctionName(is_get, value, name);
@@ -2526,7 +2560,9 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseObjectLiteral(
typename Types::ObjectPropertyList properties =
impl()->NewObjectPropertyList(4);
int number_of_boilerplate_properties = 0;
+
bool has_computed_names = false;
+ bool has_rest_property = false;
ObjectLiteralChecker checker(this);
Expect(Token::LBRACE, CHECK_OK);
@@ -2535,17 +2571,24 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseObjectLiteral(
FuncNameInferrer::State fni_state(fni_);
bool is_computed_name = false;
- ObjectLiteralPropertyT property =
- ParseObjectPropertyDefinition(&checker, &is_computed_name, CHECK_OK);
+ bool is_rest_property = false;
+ ObjectLiteralPropertyT property = ParseObjectPropertyDefinition(
+ &checker, &is_computed_name, &is_rest_property, CHECK_OK);
if (is_computed_name) {
has_computed_names = true;
}
- // Count CONSTANT or COMPUTED properties to maintain the enumeration order.
- if (!has_computed_names && impl()->IsBoilerplateProperty(property)) {
+ if (is_rest_property) {
+ has_rest_property = true;
+ }
+
+ if (impl()->IsBoilerplateProperty(property) && !has_computed_names) {
+ // Count CONSTANT or COMPUTED properties to maintain the enumeration
+ // order.
number_of_boilerplate_properties++;
}
+
properties->Add(property, zone());
if (peek() != Token::RBRACE) {
@@ -2557,13 +2600,18 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseObjectLiteral(
}
Expect(Token::RBRACE, CHECK_OK);
- // Computation of literal_index must happen before pre parse bailout.
- int literal_index = function_state_->NextMaterializedLiteralIndex();
+ // In pattern rewriter, we rewrite rest property to call out to a
+ // runtime function passing all the other properties as arguments to
+ // this runtime function. Here, we make sure that the number of
+ // properties is less than number of arguments allowed for a runtime
+ // call.
+ if (has_rest_property && properties->length() > Code::kMaxArguments) {
+ this->classifier()->RecordPatternError(Scanner::Location(pos, position()),
+ MessageTemplate::kTooManyArguments);
+ }
- return factory()->NewObjectLiteral(properties,
- literal_index,
- number_of_boilerplate_properties,
- pos);
+ return factory()->NewObjectLiteral(
+ properties, number_of_boilerplate_properties, pos, has_rest_property);
}
template <typename Impl>
@@ -2576,8 +2624,6 @@ typename ParserBase<Impl>::ExpressionListT ParserBase<Impl>::ParseArguments(
ExpressionListT result = impl()->NewExpressionList(4);
Expect(Token::LPAREN, CHECK_OK_CUSTOM(NullExpressionList));
bool done = (peek() == Token::RPAREN);
- bool was_unspread = false;
- int unspread_sequences_count = 0;
while (!done) {
int start_pos = peek_position();
bool is_spread = Check(Token::ELLIPSIS);
@@ -2597,15 +2643,6 @@ typename ParserBase<Impl>::ExpressionListT ParserBase<Impl>::ParseArguments(
}
result->Add(argument, zone_);
- // unspread_sequences_count is the number of sequences of parameters which
- // are not prefixed with a spread '...' operator.
- if (is_spread) {
- was_unspread = false;
- } else if (!was_unspread) {
- was_unspread = true;
- unspread_sequences_count++;
- }
-
if (result->length() > Code::kMaxArguments) {
ReportMessage(MessageTemplate::kTooManyArguments);
*ok = false;
@@ -2632,12 +2669,6 @@ typename ParserBase<Impl>::ExpressionListT ParserBase<Impl>::ParseArguments(
if (maybe_arrow) {
impl()->RewriteNonPattern(CHECK_OK_CUSTOM(NullExpressionList));
}
- if (spread_arg.IsValid()) {
- // Unspread parameter sequences are translated into array literals in the
- // parser. Ensure that the number of materialized literals matches between
- // the parser and preparser
- impl()->MaterializeUnspreadArgumentsLiterals(unspread_sequences_count);
- }
}
return result;
@@ -2664,6 +2695,8 @@ ParserBase<Impl>::ParseAssignmentExpression(bool accept_IN, bool* ok) {
this, classifier()->duplicate_finder());
Scope::Snapshot scope_snapshot(scope());
+ int rewritable_length =
+ function_state_->destructuring_assignments_to_rewrite().length();
bool is_async = peek() == Token::ASYNC &&
!scanner()->HasAnyLineTerminatorAfterNext() &&
@@ -2717,6 +2750,7 @@ ParserBase<Impl>::ParseAssignmentExpression(bool accept_IN, bool* ok) {
this->scope()->PropagateUsageFlagsToScope(scope);
scope_snapshot.Reparent(scope);
+ function_state_->SetDestructuringAssignmentsScope(rewritable_length, scope);
FormalParametersT parameters(scope);
if (!classifier()->is_simple_parameter_list()) {
@@ -2733,7 +2767,8 @@ ParserBase<Impl>::ParseAssignmentExpression(bool accept_IN, bool* ok) {
if (duplicate_loc.IsValid()) {
classifier()->RecordDuplicateFormalParameterError(duplicate_loc);
}
- expression = ParseArrowFunctionLiteral(accept_IN, parameters, CHECK_OK);
+ expression = ParseArrowFunctionLiteral(accept_IN, parameters,
+ rewritable_length, CHECK_OK);
impl()->Discard();
classifier()->RecordPatternError(arrow_loc,
MessageTemplate::kUnexpectedToken,
@@ -3178,7 +3213,7 @@ ParserBase<Impl>::ParseLeftHandSideExpression(bool* ok) {
bool is_super_call = result->IsSuperCallReference();
if (spread_pos.IsValid()) {
- result = impl()->SpreadCall(result, args, pos);
+ result = impl()->SpreadCall(result, args, pos, is_possibly_eval);
} else {
result = factory()->NewCall(result, args, pos, is_possibly_eval);
}
@@ -3213,7 +3248,7 @@ ParserBase<Impl>::ParseLeftHandSideExpression(bool* ok) {
impl()->RewriteNonPattern(CHECK_OK);
BindingPatternUnexpectedToken();
ArrowFormalParametersUnexpectedToken();
- result = ParseTemplateLiteral(result, position(), CHECK_OK);
+ result = ParseTemplateLiteral(result, position(), true, CHECK_OK);
break;
}
@@ -3256,6 +3291,11 @@ ParserBase<Impl>::ParseMemberWithNewPrefixesExpression(bool* is_async,
if (peek() == Token::SUPER) {
const bool is_new = true;
result = ParseSuperExpression(is_new, CHECK_OK);
+ } else if (allow_harmony_dynamic_import() && peek() == Token::IMPORT) {
+ impl()->ReportMessageAt(scanner()->peek_location(),
+ MessageTemplate::kImportCallNotNewExpression);
+ *ok = false;
+ return impl()->EmptyExpression();
} else if (peek() == Token::PERIOD) {
return ParseNewTargetExpression(CHECK_OK);
} else {
@@ -3289,7 +3329,11 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseMemberExpression(
// MemberExpression ::
// (PrimaryExpression | FunctionLiteral | ClassLiteral)
// ('[' Expression ']' | '.' Identifier | Arguments | TemplateLiteral)*
-
+ //
+ // CallExpression ::
+ // (SuperCall | ImportCall)
+ // ('[' Expression ']' | '.' Identifier | Arguments | TemplateLiteral)*
+ //
// The '[' Expression ']' and '.' Identifier parts are parsed by
// ParseMemberExpressionContinuation, and the Arguments part is parsed by the
// caller.
@@ -3327,7 +3371,12 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseMemberExpression(
Scanner::Location function_name_location = Scanner::Location::invalid();
FunctionLiteral::FunctionType function_type =
FunctionLiteral::kAnonymousExpression;
- if (peek_any_identifier()) {
+ if (impl()->ParsingDynamicFunctionDeclaration()) {
+ // We don't want dynamic functions to actually declare their name
+ // "anonymous". We just want that name in the toString().
+ Consume(Token::IDENTIFIER);
+ DCHECK(scanner()->UnescapedLiteralMatches("anonymous", 9));
+ } else if (peek_any_identifier()) {
name = ParseIdentifierOrStrictReservedWord(
function_kind, &is_strict_reserved_name, CHECK_OK);
function_name_location = scanner()->location();
@@ -3342,6 +3391,8 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseMemberExpression(
} else if (peek() == Token::SUPER) {
const bool is_new = false;
result = ParseSuperExpression(is_new, CHECK_OK);
+ } else if (allow_harmony_dynamic_import() && peek() == Token::IMPORT) {
+ result = ParseDynamicImportExpression(CHECK_OK);
} else {
result = ParsePrimaryExpression(is_async, CHECK_OK);
}
@@ -3351,6 +3402,20 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseMemberExpression(
}
template <typename Impl>
+typename ParserBase<Impl>::ExpressionT
+ParserBase<Impl>::ParseDynamicImportExpression(bool* ok) {
+ DCHECK(allow_harmony_dynamic_import());
+ Consume(Token::IMPORT);
+ int pos = position();
+ Expect(Token::LPAREN, CHECK_OK);
+ ExpressionT arg = ParseAssignmentExpression(true, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
+ ZoneList<ExpressionT>* args = new (zone()) ZoneList<ExpressionT>(1, zone());
+ args->Add(arg, zone());
+ return factory()->NewCallRuntime(Runtime::kDynamicImportCall, args, pos);
+}
+
+template <typename Impl>
typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseSuperExpression(
bool is_new, bool* ok) {
Expect(Token::SUPER, CHECK_OK);
@@ -3463,7 +3528,7 @@ ParserBase<Impl>::ParseMemberExpressionContinuation(ExpressionT expression,
expression->AsFunctionLiteral()->SetShouldEagerCompile();
}
}
- expression = ParseTemplateLiteral(expression, pos, CHECK_OK);
+ expression = ParseTemplateLiteral(expression, pos, true, CHECK_OK);
break;
}
case Token::ILLEGAL: {
@@ -3604,12 +3669,7 @@ typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseVariableDeclarations(
}
parsing_result->descriptor.scope = scope();
- parsing_result->descriptor.hoist_scope = nullptr;
- // The scope of a var/const declared variable anywhere inside a function
- // is the entire function (ECMA-262, 3rd, 10.1.3, and 12.2). The scope
- // of a let declared variable is the scope of the immediately enclosing
- // block.
int bindings_start = peek_position();
do {
// Parse binding pattern.
@@ -3793,7 +3853,6 @@ ParserBase<Impl>::ParseHoistableDeclaration(
!is_async && !(allow_harmony_restrictive_generators() && is_generator);
return impl()->DeclareFunction(variable_name, function, mode, pos,
- is_generator, is_async,
is_sloppy_block_function, names, ok);
}
@@ -3883,6 +3942,107 @@ ParserBase<Impl>::ParseAsyncFunctionDeclaration(
}
template <typename Impl>
+void ParserBase<Impl>::ParseFunctionBody(
+ typename ParserBase<Impl>::StatementListT result, IdentifierT function_name,
+ int pos, const FormalParametersT& parameters, FunctionKind kind,
+ FunctionLiteral::FunctionType function_type, bool* ok) {
+ static const int kFunctionNameAssignmentIndex = 0;
+ if (function_type == FunctionLiteral::kNamedExpression) {
+ DCHECK(!impl()->IsEmptyIdentifier(function_name));
+ // If we have a named function expression, we add a local variable
+ // declaration to the body of the function with the name of the
+ // function and let it refer to the function itself (closure).
+ // Not having parsed the function body, the language mode may still change,
+ // so we reserve a spot and create the actual const assignment later.
+ DCHECK_EQ(kFunctionNameAssignmentIndex, result->length());
+ result->Add(impl()->NullStatement(), zone());
+ }
+
+ DeclarationScope* function_scope = scope()->AsDeclarationScope();
+ DeclarationScope* inner_scope = function_scope;
+ BlockT inner_block = impl()->NullBlock();
+
+ StatementListT body = result;
+ if (!parameters.is_simple) {
+ inner_scope = NewVarblockScope();
+ inner_scope->set_start_position(scanner()->location().beg_pos);
+ inner_block = factory()->NewBlock(NULL, 8, true, kNoSourcePosition);
+ inner_block->set_scope(inner_scope);
+ body = inner_block->statements();
+ }
+
+ {
+ BlockState block_state(&scope_, inner_scope);
+
+ if (IsGeneratorFunction(kind)) {
+ impl()->ParseAndRewriteGeneratorFunctionBody(pos, kind, body, ok);
+ } else if (IsAsyncFunction(kind)) {
+ const bool accept_IN = true;
+ ParseAsyncFunctionBody(inner_scope, body, kind, FunctionBodyType::kNormal,
+ accept_IN, pos, CHECK_OK_VOID);
+ } else {
+ ParseStatementList(body, Token::RBRACE, CHECK_OK_VOID);
+ }
+
+ if (IsDerivedConstructor(kind)) {
+ body->Add(factory()->NewReturnStatement(impl()->ThisExpression(),
+ kNoSourcePosition),
+ zone());
+ }
+ }
+
+ Expect(Token::RBRACE, CHECK_OK_VOID);
+ scope()->set_end_position(scanner()->location().end_pos);
+
+ if (!parameters.is_simple) {
+ DCHECK_NOT_NULL(inner_scope);
+ DCHECK_EQ(function_scope, scope());
+ DCHECK_EQ(function_scope, inner_scope->outer_scope());
+ impl()->SetLanguageMode(function_scope, inner_scope->language_mode());
+ BlockT init_block =
+ impl()->BuildParameterInitializationBlock(parameters, CHECK_OK_VOID);
+
+ if (is_sloppy(inner_scope->language_mode())) {
+ impl()->InsertSloppyBlockFunctionVarBindings(inner_scope);
+ }
+
+ // TODO(littledan): Merge the two rejection blocks into one
+ if (IsAsyncFunction(kind)) {
+ init_block = impl()->BuildRejectPromiseOnException(init_block);
+ }
+
+ inner_scope->set_end_position(scanner()->location().end_pos);
+ if (inner_scope->FinalizeBlockScope() != nullptr) {
+ impl()->CheckConflictingVarDeclarations(inner_scope, CHECK_OK_VOID);
+ impl()->InsertShadowingVarBindingInitializers(inner_block);
+ } else {
+ inner_block->set_scope(nullptr);
+ }
+ inner_scope = nullptr;
+
+ result->Add(init_block, zone());
+ result->Add(inner_block, zone());
+ } else {
+ DCHECK_EQ(inner_scope, function_scope);
+ if (is_sloppy(function_scope->language_mode())) {
+ impl()->InsertSloppyBlockFunctionVarBindings(function_scope);
+ }
+ }
+
+ if (!IsArrowFunction(kind)) {
+ // Declare arguments after parsing the function since lexical 'arguments'
+ // masks the arguments object. Declare arguments before declaring the
+ // function var since the arguments object masks 'function arguments'.
+ function_scope->DeclareArguments(ast_value_factory());
+ }
+
+ impl()->CreateFunctionNameAssignment(function_name, pos, function_type,
+ function_scope, result,
+ kFunctionNameAssignmentIndex);
+ impl()->MarkCollectedTailCallExpressions();
+}
+
+template <typename Impl>
void ParserBase<Impl>::CheckArityRestrictions(int param_count,
FunctionKind function_kind,
bool has_rest,
@@ -3957,7 +4117,8 @@ bool ParserBase<Impl>::IsTrivialExpression() {
template <typename Impl>
typename ParserBase<Impl>::ExpressionT
ParserBase<Impl>::ParseArrowFunctionLiteral(
- bool accept_IN, const FormalParametersT& formal_parameters, bool* ok) {
+ bool accept_IN, const FormalParametersT& formal_parameters,
+ int rewritable_length, bool* ok) {
const RuntimeCallStats::CounterId counters[2][2] = {
{&RuntimeCallStats::ParseBackgroundArrowFunctionLiteral,
&RuntimeCallStats::ParseArrowFunctionLiteral},
@@ -3977,7 +4138,6 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
}
StatementListT body = impl()->NullStatementList();
- int materialized_literal_count = -1;
int expected_property_count = -1;
int function_literal_id = GetNextFunctionLiteralId();
@@ -3993,14 +4153,9 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
bool should_be_used_once_hint = false;
bool has_braces = true;
{
- FunctionState function_state(&function_state_, &scope_state_,
+ FunctionState function_state(&function_state_, &scope_,
formal_parameters.scope);
- function_state.SkipMaterializedLiterals(
- formal_parameters.materialized_literals_count);
-
- impl()->ReindexLiterals(formal_parameters);
-
Expect(Token::ARROW, CHECK_OK);
if (peek() == Token::LBRACE) {
@@ -4021,16 +4176,10 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
LazyParsingResult result = impl()->SkipFunction(
kind, formal_parameters.scope, &dummy_num_parameters,
&dummy_function_length, &dummy_has_duplicate_parameters,
- &materialized_literal_count, &expected_property_count, false, true,
- CHECK_OK);
+ &expected_property_count, false, true, CHECK_OK);
formal_parameters.scope->ResetAfterPreparsing(
ast_value_factory_, result == kLazyParsingAborted);
- if (formal_parameters.materialized_literals_count > 0) {
- materialized_literal_count +=
- formal_parameters.materialized_literals_count;
- }
-
if (result == kLazyParsingAborted) {
bookmark.Apply();
// Trigger eager (re-)parsing, just below this block.
@@ -4045,11 +4194,11 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
}
if (!is_lazy_top_level_function) {
Consume(Token::LBRACE);
- body = impl()->ParseEagerFunctionBody(
- impl()->EmptyIdentifier(), kNoSourcePosition, formal_parameters,
- kind, FunctionLiteral::kAnonymousExpression, CHECK_OK);
- materialized_literal_count =
- function_state.materialized_literal_count();
+ body = impl()->NewStatementList(8);
+ impl()->ParseFunctionBody(body, impl()->EmptyIdentifier(),
+ kNoSourcePosition, formal_parameters, kind,
+ FunctionLiteral::kAnonymousExpression,
+ CHECK_OK);
expected_property_count = function_state.expected_property_count();
}
} else {
@@ -4079,7 +4228,6 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
impl()->MarkTailPosition(expression);
}
}
- materialized_literal_count = function_state.materialized_literal_count();
expected_property_count = function_state.expected_property_count();
impl()->MarkCollectedTailCallExpressions();
}
@@ -4101,6 +4249,14 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
}
impl()->CheckConflictingVarDeclarations(formal_parameters.scope, CHECK_OK);
+ if (is_lazy_top_level_function) {
+ FunctionState* parent_state = function_state.outer();
+ DCHECK_NOT_NULL(parent_state);
+ DCHECK_GE(parent_state->destructuring_assignments_to_rewrite().length(),
+ rewritable_length);
+ parent_state->RewindDestructuringAssignments(rewritable_length);
+ }
+
impl()->RewriteDestructuringAssignments();
}
@@ -4112,8 +4268,8 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
}
FunctionLiteralT function_literal = factory()->NewFunctionLiteral(
impl()->EmptyIdentifierString(), formal_parameters.scope, body,
- materialized_literal_count, expected_property_count,
- formal_parameters.num_parameters(), formal_parameters.function_length,
+ expected_property_count, formal_parameters.num_parameters(),
+ formal_parameters.function_length,
FunctionLiteral::kNoDuplicateParameters,
FunctionLiteral::kAnonymousExpression, eager_compile_hint,
formal_parameters.scope->start_position(), has_braces,
@@ -4148,21 +4304,18 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral(
return impl()->EmptyExpression();
}
- BlockState block_state(zone(), &scope_state_);
+ BlockState block_state(zone(), &scope_);
RaiseLanguageMode(STRICT);
ClassInfo class_info(this);
- impl()->DeclareClassVariable(name, block_state.scope(), &class_info,
- class_token_pos, CHECK_OK);
+ impl()->DeclareClassVariable(name, &class_info, class_token_pos, CHECK_OK);
+ scope()->set_start_position(scanner()->location().end_pos);
if (Check(Token::EXTENDS)) {
- block_state.set_start_position(scanner()->location().end_pos);
ExpressionClassifier extends_classifier(this);
class_info.extends = ParseLeftHandSideExpression(CHECK_OK);
impl()->RewriteNonPattern(CHECK_OK);
impl()->AccumulateFormalParameterContainmentErrors();
- } else {
- block_state.set_start_position(scanner()->location().end_pos);
}
ClassLiteralChecker checker(this);
@@ -4243,7 +4396,12 @@ ParserBase<Impl>::ParseAsyncFunctionLiteral(bool* ok) {
IdentifierT name = impl()->EmptyIdentifier();
FunctionLiteral::FunctionType type = FunctionLiteral::kAnonymousExpression;
- if (peek_any_identifier()) {
+ if (impl()->ParsingDynamicFunctionDeclaration()) {
+ // We don't want dynamic functions to actually declare their name
+ // "anonymous". We just want that name in the toString().
+ Consume(Token::IDENTIFIER);
+ DCHECK(scanner()->UnescapedLiteralMatches("anonymous", 9));
+ } else if (peek_any_identifier()) {
type = FunctionLiteral::kNamedExpression;
name = ParseIdentifierOrStrictReservedWord(FunctionKind::kAsyncFunction,
&is_strict_reserved, CHECK_OK);
@@ -4257,7 +4415,7 @@ ParserBase<Impl>::ParseAsyncFunctionLiteral(bool* ok) {
template <typename Impl>
typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseTemplateLiteral(
- ExpressionT tag, int start, bool* ok) {
+ ExpressionT tag, int start, bool tagged, bool* ok) {
// A TemplateLiteral is made up of 0 or more TEMPLATE_SPAN tokens (literal
// text followed by a substitution expression), finalized by a single
// TEMPLATE_TAIL.
@@ -4270,22 +4428,25 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseTemplateLiteral(
// TEMPLATE_SPAN, or a TEMPLATE_TAIL.
CHECK(peek() == Token::TEMPLATE_SPAN || peek() == Token::TEMPLATE_TAIL);
+ bool forbid_illegal_escapes = !allow_harmony_template_escapes() || !tagged;
+
// If we reach a TEMPLATE_TAIL first, we are parsing a NoSubstitutionTemplate.
// In this case we may simply consume the token and build a template with a
// single TEMPLATE_SPAN and no expressions.
if (peek() == Token::TEMPLATE_TAIL) {
Consume(Token::TEMPLATE_TAIL);
int pos = position();
- CheckTemplateOctalLiteral(pos, peek_position(), CHECK_OK);
typename Impl::TemplateLiteralState ts = impl()->OpenTemplateLiteral(pos);
- impl()->AddTemplateSpan(&ts, true);
+ bool is_valid = CheckTemplateEscapes(forbid_illegal_escapes, CHECK_OK);
+ impl()->AddTemplateSpan(&ts, is_valid, true);
return impl()->CloseTemplateLiteral(&ts, start, tag);
}
Consume(Token::TEMPLATE_SPAN);
int pos = position();
typename Impl::TemplateLiteralState ts = impl()->OpenTemplateLiteral(pos);
- impl()->AddTemplateSpan(&ts, false);
+ bool is_valid = CheckTemplateEscapes(forbid_illegal_escapes, CHECK_OK);
+ impl()->AddTemplateSpan(&ts, is_valid, false);
Token::Value next;
// If we open with a TEMPLATE_SPAN, we must scan the subsequent expression,
@@ -4293,7 +4454,6 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseTemplateLiteral(
// case, representing a TemplateMiddle).
do {
- CheckTemplateOctalLiteral(pos, peek_position(), CHECK_OK);
next = peek();
if (next == Token::EOS) {
impl()->ReportMessageAt(Scanner::Location(start, peek_position()),
@@ -4339,11 +4499,11 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseTemplateLiteral(
return impl()->EmptyExpression();
}
- impl()->AddTemplateSpan(&ts, next == Token::TEMPLATE_TAIL);
+ bool is_valid = CheckTemplateEscapes(forbid_illegal_escapes, CHECK_OK);
+ impl()->AddTemplateSpan(&ts, is_valid, next == Token::TEMPLATE_TAIL);
} while (next == Token::TEMPLATE_SPAN);
DCHECK_EQ(next, Token::TEMPLATE_TAIL);
- CheckTemplateOctalLiteral(pos, peek_position(), CHECK_OK);
// Once we've reached a TEMPLATE_TAIL, we can close the TemplateLiteral.
return impl()->CloseTemplateLiteral(&ts, start, tag);
}
@@ -4613,6 +4773,10 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseStatement(
case Token::WHILE:
return ParseWhileStatement(labels, ok);
case Token::FOR:
+ if (V8_UNLIKELY(allow_harmony_async_iteration() && is_async_function() &&
+ PeekAhead() == Token::AWAIT)) {
+ return ParseForAwaitStatement(labels, ok);
+ }
return ParseForStatement(labels, ok);
case Token::CONTINUE:
case Token::BREAK:
@@ -4695,8 +4859,8 @@ typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseBlock(
// Parse the statements and collect escaping labels.
Expect(Token::LBRACE, CHECK_OK_CUSTOM(NullBlock));
{
- BlockState block_state(zone(), &scope_state_);
- block_state.set_start_position(scanner()->location().beg_pos);
+ BlockState block_state(zone(), &scope_);
+ scope()->set_start_position(scanner()->location().beg_pos);
typename Types::Target target(this, body);
while (peek() != Token::RBRACE) {
@@ -4707,30 +4871,27 @@ typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseBlock(
}
Expect(Token::RBRACE, CHECK_OK_CUSTOM(NullBlock));
- block_state.set_end_position(scanner()->location().end_pos);
- body->set_scope(block_state.FinalizedBlockScope());
+ scope()->set_end_position(scanner()->location().end_pos);
+ body->set_scope(scope()->FinalizeBlockScope());
}
return body;
}
template <typename Impl>
typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseScopedStatement(
- ZoneList<const AstRawString*>* labels, bool legacy, bool* ok) {
- if (is_strict(language_mode()) || peek() != Token::FUNCTION || legacy) {
- return ParseStatement(labels, kDisallowLabelledFunctionStatement, ok);
+ ZoneList<const AstRawString*>* labels, bool* ok) {
+ if (is_strict(language_mode()) || peek() != Token::FUNCTION) {
+ return ParseStatement(labels, ok);
} else {
- if (legacy) {
- impl()->CountUsage(v8::Isolate::kLegacyFunctionDeclaration);
- }
// Make a block around the statement for a lexical binding
// is introduced by a FunctionDeclaration.
- BlockState block_state(zone(), &scope_state_);
- block_state.set_start_position(scanner()->location().beg_pos);
+ BlockState block_state(zone(), &scope_);
+ scope()->set_start_position(scanner()->location().beg_pos);
BlockT block = factory()->NewBlock(NULL, 1, false, kNoSourcePosition);
StatementT body = ParseFunctionDeclaration(CHECK_OK);
block->statements()->Add(body, zone());
- block_state.set_end_position(scanner()->location().end_pos);
- block->set_scope(block_state.FinalizedBlockScope());
+ scope()->set_end_position(scanner()->location().end_pos);
+ block->set_scope(scope()->FinalizeBlockScope());
return block;
}
}
@@ -4798,6 +4959,19 @@ ParserBase<Impl>::ParseExpressionOrLabelledStatement(
ReportUnexpectedToken(Next());
*ok = false;
return impl()->NullStatement();
+ case Token::LET: {
+ Token::Value next_next = PeekAhead();
+ // "let" followed by either "[", "{" or an identifier means a lexical
+ // declaration, which should not appear here.
+ if (next_next != Token::LBRACK && next_next != Token::LBRACE &&
+ next_next != Token::IDENTIFIER) {
+ break;
+ }
+ impl()->ReportMessageAt(scanner()->peek_location(),
+ MessageTemplate::kUnexpectedLexicalDeclaration);
+ *ok = false;
+ return impl()->NullStatement();
+ }
default:
break;
}
@@ -4812,14 +4986,11 @@ ParserBase<Impl>::ParseExpressionOrLabelledStatement(
CHECK_OK);
Consume(Token::COLON);
// ES#sec-labelled-function-declarations Labelled Function Declarations
- if (peek() == Token::FUNCTION && is_sloppy(language_mode())) {
- if (allow_function == kAllowLabelledFunctionStatement) {
- return ParseFunctionDeclaration(ok);
- } else {
- return ParseScopedStatement(labels, true, ok);
- }
+ if (peek() == Token::FUNCTION && is_sloppy(language_mode()) &&
+ allow_function == kAllowLabelledFunctionStatement) {
+ return ParseFunctionDeclaration(ok);
}
- return ParseStatement(labels, kDisallowLabelledFunctionStatement, ok);
+ return ParseStatement(labels, ok);
}
// If we have an extension, we allow a native function declaration.
@@ -4847,10 +5018,10 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseIfStatement(
Expect(Token::LPAREN, CHECK_OK);
ExpressionT condition = ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- StatementT then_statement = ParseScopedStatement(labels, false, CHECK_OK);
+ StatementT then_statement = ParseScopedStatement(labels, CHECK_OK);
StatementT else_statement = impl()->NullStatement();
if (Check(Token::ELSE)) {
- else_statement = ParseScopedStatement(labels, false, CHECK_OK);
+ else_statement = ParseScopedStatement(labels, CHECK_OK);
} else {
else_statement = factory()->NewEmptyStatement(kNoSourcePosition);
}
@@ -5003,9 +5174,9 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseWithStatement(
Scope* with_scope = NewScope(WITH_SCOPE);
StatementT body = impl()->NullStatement();
{
- BlockState block_state(&scope_state_, with_scope);
+ BlockState block_state(&scope_, with_scope);
with_scope->set_start_position(scanner()->peek_location().beg_pos);
- body = ParseScopedStatement(labels, true, CHECK_OK);
+ body = ParseStatement(labels, CHECK_OK);
with_scope->set_end_position(scanner()->location().end_pos);
}
return factory()->NewWithStatement(with_scope, expr, body, pos);
@@ -5021,7 +5192,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseDoWhileStatement(
typename Types::Target target(this, loop);
Expect(Token::DO, CHECK_OK);
- StatementT body = ParseScopedStatement(nullptr, true, CHECK_OK);
+ StatementT body = ParseStatement(nullptr, CHECK_OK);
Expect(Token::WHILE, CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
@@ -5051,7 +5222,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseWhileStatement(
Expect(Token::LPAREN, CHECK_OK);
ExpressionT cond = ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- StatementT body = ParseScopedStatement(nullptr, true, CHECK_OK);
+ StatementT body = ParseStatement(nullptr, CHECK_OK);
loop->Initialize(cond, body);
return loop;
@@ -5095,9 +5266,9 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseSwitchStatement(
auto switch_statement = factory()->NewSwitchStatement(labels, switch_pos);
{
- BlockState cases_block_state(zone(), &scope_state_);
- cases_block_state.set_start_position(switch_pos);
- cases_block_state.SetNonlinear();
+ BlockState cases_block_state(zone(), &scope_);
+ scope()->set_start_position(switch_pos);
+ scope()->SetNonlinear();
typename Types::Target target(this, switch_statement);
bool default_seen = false;
@@ -5130,9 +5301,9 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseSwitchStatement(
}
Expect(Token::RBRACE, CHECK_OK);
- cases_block_state.set_end_position(scanner()->location().end_pos);
- return impl()->RewriteSwitchStatement(
- tag, switch_statement, cases, cases_block_state.FinalizedBlockScope());
+ scope()->set_end_position(scanner()->location().end_pos);
+ return impl()->RewriteSwitchStatement(tag, switch_statement, cases,
+ scope()->FinalizeBlockScope());
}
}
@@ -5178,16 +5349,15 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseTryStatement(
CollectExpressionsInTailPositionToListScope
collect_tail_call_expressions_scope(
function_state_, &catch_info.tail_call_expressions);
- BlockState catch_block_state(&scope_state_, catch_info.scope);
+ BlockState catch_block_state(&scope_, catch_info.scope);
catch_block = factory()->NewBlock(nullptr, 16, false, kNoSourcePosition);
// Create a block scope to hold any lexical declarations created
// as part of destructuring the catch parameter.
{
- BlockState catch_variable_block_state(zone(), &scope_state_);
- catch_variable_block_state.set_start_position(
- scanner()->location().beg_pos);
+ BlockState catch_variable_block_state(zone(), &scope_);
+ scope()->set_start_position(scanner()->location().beg_pos);
typename Types::Target target(this, catch_block);
// This does not simply call ParsePrimaryExpression to avoid
@@ -5212,10 +5382,8 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseTryStatement(
catch_info.inner_block = ParseBlock(nullptr, CHECK_OK);
catch_block->statements()->Add(catch_info.inner_block, zone());
impl()->ValidateCatchBlock(catch_info, CHECK_OK);
- catch_variable_block_state.set_end_position(
- scanner()->location().end_pos);
- catch_block->set_scope(
- catch_variable_block_state.FinalizedBlockScope());
+ scope()->set_end_position(scanner()->location().end_pos);
+ catch_block->set_scope(scope()->FinalizeBlockScope());
}
}
@@ -5240,180 +5408,198 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForStatement(
bool bound_names_are_lexical = false;
// Create an in-between scope for let-bound iteration variables.
- BlockState for_state(zone(), &scope_state_);
+ BlockState for_state(zone(), &scope_);
Expect(Token::FOR, CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
- for_state.set_start_position(scanner()->location().beg_pos);
- for_state.set_is_hidden();
+ scope()->set_start_position(scanner()->location().beg_pos);
+ scope()->set_is_hidden();
StatementT init = impl()->NullStatement();
- if (peek() != Token::SEMICOLON) {
- // An initializer is present.
- if (peek() == Token::VAR || peek() == Token::CONST ||
- (peek() == Token::LET && IsNextLetKeyword())) {
- // The initializer contains declarations.
- ParseVariableDeclarations(kForStatement, &for_info.parsing_result,
- nullptr, CHECK_OK);
- bound_names_are_lexical =
- IsLexicalVariableMode(for_info.parsing_result.descriptor.mode);
- for_info.position = scanner()->location().beg_pos;
-
- if (CheckInOrOf(&for_info.mode)) {
- // Just one declaration followed by in/of.
- if (for_info.parsing_result.declarations.length() != 1) {
- impl()->ReportMessageAt(
- for_info.parsing_result.bindings_loc,
- MessageTemplate::kForInOfLoopMultiBindings,
- ForEachStatement::VisitModeString(for_info.mode));
- *ok = false;
- return impl()->NullStatement();
- }
- if (for_info.parsing_result.first_initializer_loc.IsValid() &&
- (is_strict(language_mode()) ||
- for_info.mode == ForEachStatement::ITERATE ||
- bound_names_are_lexical ||
- !impl()->IsIdentifier(
- for_info.parsing_result.declarations[0].pattern))) {
- impl()->ReportMessageAt(
- for_info.parsing_result.first_initializer_loc,
- MessageTemplate::kForInOfLoopInitializer,
- ForEachStatement::VisitModeString(for_info.mode));
- *ok = false;
- return impl()->NullStatement();
- }
- BlockT init_block = impl()->RewriteForVarInLegacy(for_info);
+ if (peek() == Token::VAR || peek() == Token::CONST ||
+ (peek() == Token::LET && IsNextLetKeyword())) {
+ // The initializer contains declarations.
+ ParseVariableDeclarations(kForStatement, &for_info.parsing_result, nullptr,
+ CHECK_OK);
+ bound_names_are_lexical =
+ IsLexicalVariableMode(for_info.parsing_result.descriptor.mode);
+ for_info.position = scanner()->location().beg_pos;
+
+ if (CheckInOrOf(&for_info.mode)) {
+ return ParseForEachStatementWithDeclarations(stmt_pos, &for_info, labels,
+ ok);
+ }
+
+ // One or more declaration not followed by in/of.
+ init = impl()->BuildInitializationBlock(
+ &for_info.parsing_result,
+ bound_names_are_lexical ? &for_info.bound_names : nullptr, CHECK_OK);
+ } else if (peek() != Token::SEMICOLON) {
+ // The initializer does not contain declarations.
+ int lhs_beg_pos = peek_position();
+ ExpressionClassifier classifier(this);
+ ExpressionT expression = ParseExpressionCoverGrammar(false, CHECK_OK);
+ int lhs_end_pos = scanner()->location().end_pos;
+
+ bool is_for_each = CheckInOrOf(&for_info.mode);
+ bool is_destructuring = is_for_each && (expression->IsArrayLiteral() ||
+ expression->IsObjectLiteral());
+
+ if (is_destructuring) {
+ ValidateAssignmentPattern(CHECK_OK);
+ } else {
+ impl()->RewriteNonPattern(CHECK_OK);
+ }
- auto loop =
- factory()->NewForEachStatement(for_info.mode, labels, stmt_pos);
- typename Types::Target target(this, loop);
+ if (is_for_each) {
+ return ParseForEachStatementWithoutDeclarations(stmt_pos, expression,
+ lhs_beg_pos, lhs_end_pos,
+ &for_info, labels, ok);
+ }
+ // Initializer is just an expression.
+ init = factory()->NewExpressionStatement(expression, lhs_beg_pos);
+ }
- int each_keyword_pos = scanner()->location().beg_pos;
+ // Standard 'for' loop, we have parsed the initializer at this point.
+ return ParseStandardForLoop(stmt_pos, init, bound_names_are_lexical,
+ &for_info, &for_state, labels, ok);
+}
- ExpressionT enumerable = impl()->EmptyExpression();
- if (for_info.mode == ForEachStatement::ITERATE) {
- ExpressionClassifier classifier(this);
- enumerable = ParseAssignmentExpression(true, CHECK_OK);
- impl()->RewriteNonPattern(CHECK_OK);
- } else {
- enumerable = ParseExpression(true, CHECK_OK);
- }
+template <typename Impl>
+typename ParserBase<Impl>::StatementT
+ParserBase<Impl>::ParseForEachStatementWithDeclarations(
+ int stmt_pos, ForInfo* for_info, ZoneList<const AstRawString*>* labels,
+ bool* ok) {
+ // Just one declaration followed by in/of.
+ if (for_info->parsing_result.declarations.length() != 1) {
+ impl()->ReportMessageAt(for_info->parsing_result.bindings_loc,
+ MessageTemplate::kForInOfLoopMultiBindings,
+ ForEachStatement::VisitModeString(for_info->mode));
+ *ok = false;
+ return impl()->NullStatement();
+ }
+ if (for_info->parsing_result.first_initializer_loc.IsValid() &&
+ (is_strict(language_mode()) ||
+ for_info->mode == ForEachStatement::ITERATE ||
+ IsLexicalVariableMode(for_info->parsing_result.descriptor.mode) ||
+ !impl()->IsIdentifier(
+ for_info->parsing_result.declarations[0].pattern))) {
+ impl()->ReportMessageAt(for_info->parsing_result.first_initializer_loc,
+ MessageTemplate::kForInOfLoopInitializer,
+ ForEachStatement::VisitModeString(for_info->mode));
+ *ok = false;
+ return impl()->NullStatement();
+ }
- Expect(Token::RPAREN, CHECK_OK);
+ BlockT init_block = impl()->RewriteForVarInLegacy(*for_info);
- StatementT final_loop = impl()->NullStatement();
- {
- ReturnExprScope no_tail_calls(function_state_,
- ReturnExprContext::kInsideForInOfBody);
- BlockState block_state(zone(), &scope_state_);
- block_state.set_start_position(scanner()->location().beg_pos);
-
- StatementT body = ParseScopedStatement(nullptr, true, CHECK_OK);
-
- BlockT body_block = impl()->NullBlock();
- ExpressionT each_variable = impl()->EmptyExpression();
- impl()->DesugarBindingInForEachStatement(&for_info, &body_block,
- &each_variable, CHECK_OK);
- body_block->statements()->Add(body, zone());
- final_loop = impl()->InitializeForEachStatement(
- loop, each_variable, enumerable, body_block, each_keyword_pos);
-
- block_state.set_end_position(scanner()->location().end_pos);
- body_block->set_scope(block_state.FinalizedBlockScope());
- }
+ auto loop = factory()->NewForEachStatement(for_info->mode, labels, stmt_pos);
+ typename Types::Target target(this, loop);
- init_block =
- impl()->CreateForEachStatementTDZ(init_block, for_info, ok);
+ int each_keyword_pos = scanner()->location().beg_pos;
- for_state.set_end_position(scanner()->location().end_pos);
- Scope* for_scope = for_state.FinalizedBlockScope();
- // Parsed for-in loop w/ variable declarations.
- if (!impl()->IsNullStatement(init_block)) {
- init_block->statements()->Add(final_loop, zone());
- init_block->set_scope(for_scope);
- return init_block;
- } else {
- DCHECK_NULL(for_scope);
- return final_loop;
- }
- } else {
- // One or more declaration not followed by in/of.
- init = impl()->BuildInitializationBlock(
- &for_info.parsing_result,
- bound_names_are_lexical ? &for_info.bound_names : nullptr,
- CHECK_OK);
- }
- } else {
- // The initializer does not contain declarations.
- int lhs_beg_pos = peek_position();
- ExpressionClassifier classifier(this);
- ExpressionT expression = ParseExpressionCoverGrammar(false, CHECK_OK);
- int lhs_end_pos = scanner()->location().end_pos;
+ ExpressionT enumerable = impl()->EmptyExpression();
+ if (for_info->mode == ForEachStatement::ITERATE) {
+ ExpressionClassifier classifier(this);
+ enumerable = ParseAssignmentExpression(true, CHECK_OK);
+ impl()->RewriteNonPattern(CHECK_OK);
+ } else {
+ enumerable = ParseExpression(true, CHECK_OK);
+ }
- bool is_for_each = CheckInOrOf(&for_info.mode);
- bool is_destructuring = is_for_each && (expression->IsArrayLiteral() ||
- expression->IsObjectLiteral());
+ Expect(Token::RPAREN, CHECK_OK);
- if (is_destructuring) {
- ValidateAssignmentPattern(CHECK_OK);
- } else {
- impl()->RewriteNonPattern(CHECK_OK);
- }
+ StatementT final_loop = impl()->NullStatement();
+ {
+ ReturnExprScope no_tail_calls(function_state_,
+ ReturnExprContext::kInsideForInOfBody);
+ BlockState block_state(zone(), &scope_);
+ scope()->set_start_position(scanner()->location().beg_pos);
- if (is_for_each) {
- // Initializer is reference followed by in/of.
- if (!is_destructuring) {
- expression = impl()->CheckAndRewriteReferenceExpression(
- expression, lhs_beg_pos, lhs_end_pos,
- MessageTemplate::kInvalidLhsInFor, kSyntaxError, CHECK_OK);
- }
+ StatementT body = ParseStatement(nullptr, CHECK_OK);
- auto loop =
- factory()->NewForEachStatement(for_info.mode, labels, stmt_pos);
- typename Types::Target target(this, loop);
+ BlockT body_block = impl()->NullBlock();
+ ExpressionT each_variable = impl()->EmptyExpression();
+ impl()->DesugarBindingInForEachStatement(for_info, &body_block,
+ &each_variable, CHECK_OK);
+ body_block->statements()->Add(body, zone());
+ final_loop = impl()->InitializeForEachStatement(
+ loop, each_variable, enumerable, body_block, each_keyword_pos);
- int each_keyword_pos = scanner()->location().beg_pos;
+ scope()->set_end_position(scanner()->location().end_pos);
+ body_block->set_scope(scope()->FinalizeBlockScope());
+ }
- ExpressionT enumerable = impl()->EmptyExpression();
- if (for_info.mode == ForEachStatement::ITERATE) {
- ExpressionClassifier classifier(this);
- enumerable = ParseAssignmentExpression(true, CHECK_OK);
- impl()->RewriteNonPattern(CHECK_OK);
- } else {
- enumerable = ParseExpression(true, CHECK_OK);
- }
+ init_block = impl()->CreateForEachStatementTDZ(init_block, *for_info, ok);
- Expect(Token::RPAREN, CHECK_OK);
+ scope()->set_end_position(scanner()->location().end_pos);
+ Scope* for_scope = scope()->FinalizeBlockScope();
+ // Parsed for-in loop w/ variable declarations.
+ if (!impl()->IsNullStatement(init_block)) {
+ init_block->statements()->Add(final_loop, zone());
+ init_block->set_scope(for_scope);
+ return init_block;
+ }
- {
- ReturnExprScope no_tail_calls(function_state_,
- ReturnExprContext::kInsideForInOfBody);
- BlockState block_state(zone(), &scope_state_);
- block_state.set_start_position(scanner()->location().beg_pos);
-
- // For legacy compat reasons, give for loops similar treatment to
- // if statements in allowing a function declaration for a body
- StatementT body = ParseScopedStatement(nullptr, true, CHECK_OK);
- block_state.set_end_position(scanner()->location().end_pos);
- StatementT final_loop = impl()->InitializeForEachStatement(
- loop, expression, enumerable, body, each_keyword_pos);
-
- Scope* for_scope = for_state.FinalizedBlockScope();
- DCHECK_NULL(for_scope);
- USE(for_scope);
- Scope* block_scope = block_state.FinalizedBlockScope();
- DCHECK_NULL(block_scope);
- USE(block_scope);
- return final_loop;
- }
- } else {
- // Initializer is just an expression.
- init = factory()->NewExpressionStatement(expression, lhs_beg_pos);
- }
- }
+ DCHECK_NULL(for_scope);
+ return final_loop;
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT
+ParserBase<Impl>::ParseForEachStatementWithoutDeclarations(
+ int stmt_pos, ExpressionT expression, int lhs_beg_pos, int lhs_end_pos,
+ ForInfo* for_info, ZoneList<const AstRawString*>* labels, bool* ok) {
+ // Initializer is reference followed by in/of.
+ if (!expression->IsArrayLiteral() && !expression->IsObjectLiteral()) {
+ expression = impl()->CheckAndRewriteReferenceExpression(
+ expression, lhs_beg_pos, lhs_end_pos, MessageTemplate::kInvalidLhsInFor,
+ kSyntaxError, CHECK_OK);
}
- // Standard 'for' loop, we have parsed the initializer at this point.
+ auto loop = factory()->NewForEachStatement(for_info->mode, labels, stmt_pos);
+ typename Types::Target target(this, loop);
+
+ int each_keyword_pos = scanner()->location().beg_pos;
+
+ ExpressionT enumerable = impl()->EmptyExpression();
+ if (for_info->mode == ForEachStatement::ITERATE) {
+ ExpressionClassifier classifier(this);
+ enumerable = ParseAssignmentExpression(true, CHECK_OK);
+ impl()->RewriteNonPattern(CHECK_OK);
+ } else {
+ enumerable = ParseExpression(true, CHECK_OK);
+ }
+
+ Expect(Token::RPAREN, CHECK_OK);
+ Scope* for_scope = scope();
+
+ {
+ ReturnExprScope no_tail_calls(function_state_,
+ ReturnExprContext::kInsideForInOfBody);
+ BlockState block_state(zone(), &scope_);
+ scope()->set_start_position(scanner()->location().beg_pos);
+
+ StatementT body = ParseStatement(nullptr, CHECK_OK);
+ scope()->set_end_position(scanner()->location().end_pos);
+ StatementT final_loop = impl()->InitializeForEachStatement(
+ loop, expression, enumerable, body, each_keyword_pos);
+
+ for_scope = for_scope->FinalizeBlockScope();
+ USE(for_scope);
+ DCHECK_NULL(for_scope);
+ Scope* block_scope = scope()->FinalizeBlockScope();
+ USE(block_scope);
+ DCHECK_NULL(block_scope);
+ return final_loop;
+ }
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseStandardForLoop(
+ int stmt_pos, StatementT init, bool bound_names_are_lexical,
+ ForInfo* for_info, BlockState* for_state,
+ ZoneList<const AstRawString*>* labels, bool* ok) {
auto loop = factory()->NewForStatement(labels, stmt_pos);
typename Types::Target target(this, loop);
@@ -5426,13 +5612,12 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForStatement(
// If there are let bindings, then condition and the next statement of the
// for loop must be parsed in a new scope.
Scope* inner_scope = scope();
- // TODO(verwaest): Allocate this through a ScopeState as well.
- if (bound_names_are_lexical && for_info.bound_names.length() > 0) {
+ if (bound_names_are_lexical && for_info->bound_names.length() > 0) {
inner_scope = NewScopeWithParent(inner_scope, BLOCK_SCOPE);
inner_scope->set_start_position(scanner()->location().beg_pos);
}
{
- BlockState block_state(&scope_state_, inner_scope);
+ BlockState block_state(&scope_, inner_scope);
if (peek() != Token::SEMICOLON) {
cond = ParseExpression(true, CHECK_OK);
@@ -5445,53 +5630,201 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForStatement(
}
Expect(Token::RPAREN, CHECK_OK);
- body = ParseScopedStatement(nullptr, true, CHECK_OK);
+ body = ParseStatement(nullptr, CHECK_OK);
}
- if (bound_names_are_lexical && for_info.bound_names.length() > 0) {
+ if (bound_names_are_lexical && for_info->bound_names.length() > 0) {
auto result = impl()->DesugarLexicalBindingsInForStatement(
- loop, init, cond, next, body, inner_scope, for_info, CHECK_OK);
- for_state.set_end_position(scanner()->location().end_pos);
+ loop, init, cond, next, body, inner_scope, *for_info, CHECK_OK);
+ scope()->set_end_position(scanner()->location().end_pos);
+ inner_scope->set_end_position(scanner()->location().end_pos);
return result;
+ }
+
+ scope()->set_end_position(scanner()->location().end_pos);
+ Scope* for_scope = scope()->FinalizeBlockScope();
+ if (for_scope != nullptr) {
+ // Rewrite a for statement of the form
+ // for (const x = i; c; n) b
+ //
+ // into
+ //
+ // {
+ // const x = i;
+ // for (; c; n) b
+ // }
+ //
+ // or, desugar
+ // for (; c; n) b
+ // into
+ // {
+ // for (; c; n) b
+ // }
+ // just in case b introduces a lexical binding some other way, e.g., if b
+ // is a FunctionDeclaration.
+ BlockT block = factory()->NewBlock(nullptr, 2, false, kNoSourcePosition);
+ if (!impl()->IsNullStatement(init)) {
+ block->statements()->Add(init, zone());
+ }
+ block->statements()->Add(loop, zone());
+ block->set_scope(for_scope);
+ loop->Initialize(init, cond, next, body);
+ return block;
+ }
+
+ loop->Initialize(init, cond, next, body);
+ return loop;
+}
+
+template <typename Impl>
+void ParserBase<Impl>::MarkLoopVariableAsAssigned(Scope* scope, Variable* var) {
+ if (!IsLexicalVariableMode(var->mode()) && !scope->is_function_scope()) {
+ var->set_maybe_assigned();
+ }
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForAwaitStatement(
+ ZoneList<const AstRawString*>* labels, bool* ok) {
+ // for await '(' ForDeclaration of AssignmentExpression ')'
+ DCHECK(is_async_function());
+ DCHECK(allow_harmony_async_iteration());
+
+ int stmt_pos = peek_position();
+
+ ForInfo for_info(this);
+ for_info.mode = ForEachStatement::ITERATE;
+
+ // Create an in-between scope for let-bound iteration variables.
+ BlockState for_state(zone(), &scope_);
+ Expect(Token::FOR, CHECK_OK);
+ Expect(Token::AWAIT, CHECK_OK);
+ Expect(Token::LPAREN, CHECK_OK);
+ scope()->set_start_position(scanner()->location().beg_pos);
+ scope()->set_is_hidden();
+
+ auto loop = factory()->NewForOfStatement(labels, stmt_pos);
+ typename Types::Target target(this, loop);
+
+ ExpressionT each_variable = impl()->EmptyExpression();
+
+ bool has_declarations = false;
+
+ if (peek() == Token::VAR || peek() == Token::CONST ||
+ (peek() == Token::LET && IsNextLetKeyword())) {
+ // The initializer contains declarations
+ // 'for' 'await' '(' ForDeclaration 'of' AssignmentExpression ')'
+ // Statement
+ // 'for' 'await' '(' 'var' ForBinding 'of' AssignmentExpression ')'
+ // Statement
+ has_declarations = true;
+ ParseVariableDeclarations(kForStatement, &for_info.parsing_result, nullptr,
+ CHECK_OK);
+ for_info.position = scanner()->location().beg_pos;
+
+ // Only a single declaration is allowed in for-await-of loops
+ if (for_info.parsing_result.declarations.length() != 1) {
+ impl()->ReportMessageAt(for_info.parsing_result.bindings_loc,
+ MessageTemplate::kForInOfLoopMultiBindings,
+ "for-await-of");
+ *ok = false;
+ return impl()->NullStatement();
+ }
+
+ // for-await-of's declarations do not permit initializers.
+ if (for_info.parsing_result.first_initializer_loc.IsValid()) {
+ impl()->ReportMessageAt(for_info.parsing_result.first_initializer_loc,
+ MessageTemplate::kForInOfLoopInitializer,
+ "for-await-of");
+ *ok = false;
+ return impl()->NullStatement();
+ }
} else {
- for_state.set_end_position(scanner()->location().end_pos);
- Scope* for_scope = for_state.FinalizedBlockScope();
- if (for_scope != nullptr) {
- // Rewrite a for statement of the form
- // for (const x = i; c; n) b
- //
- // into
- //
- // {
- // const x = i;
- // for (; c; n) b
- // }
- //
- // or, desugar
- // for (; c; n) b
- // into
- // {
- // for (; c; n) b
- // }
- // just in case b introduces a lexical binding some other way, e.g., if b
- // is a FunctionDeclaration.
- BlockT block = factory()->NewBlock(nullptr, 2, false, kNoSourcePosition);
- if (!impl()->IsNullStatement(init)) {
- block->statements()->Add(init, zone());
- }
- block->statements()->Add(loop, zone());
- block->set_scope(for_scope);
- loop->Initialize(init, cond, next, body);
- return block;
+ // The initializer does not contain declarations.
+ // 'for' 'await' '(' LeftHandSideExpression 'of' AssignmentExpression ')'
+ // Statement
+ int lhs_beg_pos = peek_position();
+ ExpressionClassifier classifier(this);
+ ExpressionT lhs = each_variable = ParseLeftHandSideExpression(CHECK_OK);
+ int lhs_end_pos = scanner()->location().end_pos;
+
+ if (lhs->IsArrayLiteral() || lhs->IsObjectLiteral()) {
+ ValidateAssignmentPattern(CHECK_OK);
} else {
- loop->Initialize(init, cond, next, body);
- return loop;
+ impl()->RewriteNonPattern(CHECK_OK);
+ each_variable = impl()->CheckAndRewriteReferenceExpression(
+ lhs, lhs_beg_pos, lhs_end_pos, MessageTemplate::kInvalidLhsInFor,
+ kSyntaxError, CHECK_OK);
}
}
-}
-#undef CHECK_OK
-#undef CHECK_OK_CUSTOM
+ ExpectContextualKeyword(CStrVector("of"), CHECK_OK);
+ int each_keyword_pos = scanner()->location().beg_pos;
+
+ const bool kAllowIn = true;
+ ExpressionT iterable = impl()->EmptyExpression();
+
+ {
+ ExpressionClassifier classifier(this);
+ iterable = ParseAssignmentExpression(kAllowIn, CHECK_OK);
+ impl()->RewriteNonPattern(CHECK_OK);
+ }
+
+ Expect(Token::RPAREN, CHECK_OK);
+
+ StatementT final_loop = impl()->NullStatement();
+ Scope* for_scope = scope();
+ {
+ ReturnExprScope no_tail_calls(function_state_,
+ ReturnExprContext::kInsideForInOfBody);
+ BlockState block_state(zone(), &scope_);
+ scope()->set_start_position(scanner()->location().beg_pos);
+
+ StatementT body = ParseStatement(nullptr, CHECK_OK);
+ scope()->set_end_position(scanner()->location().end_pos);
+
+ if (has_declarations) {
+ BlockT body_block = impl()->NullBlock();
+ impl()->DesugarBindingInForEachStatement(&for_info, &body_block,
+ &each_variable, CHECK_OK);
+ body_block->statements()->Add(body, zone());
+ body_block->set_scope(scope()->FinalizeBlockScope());
+
+ const bool finalize = true;
+ final_loop = impl()->InitializeForOfStatement(
+ loop, each_variable, iterable, body_block, finalize,
+ IteratorType::kAsync, each_keyword_pos);
+ } else {
+ const bool finalize = true;
+ final_loop = impl()->InitializeForOfStatement(
+ loop, each_variable, iterable, body, finalize, IteratorType::kAsync,
+ each_keyword_pos);
+
+ for_scope = for_scope->FinalizeBlockScope();
+ DCHECK_NULL(for_scope);
+ USE(for_scope);
+ Scope* block_scope = scope()->FinalizeBlockScope();
+ DCHECK_NULL(block_scope);
+ USE(block_scope);
+ return final_loop;
+ }
+ }
+
+ DCHECK(has_declarations);
+ BlockT init_block =
+ impl()->CreateForEachStatementTDZ(impl()->NullBlock(), for_info, ok);
+
+ for_scope->set_end_position(scanner()->location().end_pos);
+ for_scope = for_scope->FinalizeBlockScope();
+ // Parsed for-in loop w/ variable declarations.
+ if (!impl()->IsNullStatement(init_block)) {
+ init_block->statements()->Add(final_loop, zone());
+ init_block->set_scope(for_scope);
+ return init_block;
+ }
+ DCHECK_NULL(for_scope);
+ return final_loop;
+}
template <typename Impl>
void ParserBase<Impl>::ObjectLiteralChecker::CheckDuplicateProto(
@@ -5543,6 +5876,9 @@ void ParserBase<Impl>::ClassLiteralChecker::CheckClassMethodName(
}
}
+#undef CHECK_OK
+#undef CHECK_OK_CUSTOM
+#undef CHECK_OK_VOID
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/parsing/parser.cc b/deps/v8/src/parsing/parser.cc
index 685fe1d6ea..cc6b6a260b 100644
--- a/deps/v8/src/parsing/parser.cc
+++ b/deps/v8/src/parsing/parser.cc
@@ -9,7 +9,6 @@
#include "src/api.h"
#include "src/ast/ast-expression-rewriter.h"
#include "src/ast/ast-function-literal-id-reindexer.h"
-#include "src/ast/ast-literal-reindexer.h"
#include "src/ast/ast-traversal-visitor.h"
#include "src/ast/ast.h"
#include "src/bailout-reason.h"
@@ -168,7 +167,6 @@ void Parser::SetCachedData(ParseInfo* info) {
FunctionLiteral* Parser::DefaultConstructor(const AstRawString* name,
bool call_super, int pos,
int end_pos) {
- int materialized_literal_count = -1;
int expected_property_count = -1;
const int parameter_count = 0;
if (name == nullptr) name = ast_value_factory()->empty_string();
@@ -183,8 +181,7 @@ FunctionLiteral* Parser::DefaultConstructor(const AstRawString* name,
ZoneList<Statement*>* body = NULL;
{
- FunctionState function_state(&function_state_, &scope_state_,
- function_scope);
+ FunctionState function_state(&function_state_, &scope_, function_scope);
body = new (zone()) ZoneList<Statement*>(call_super ? 2 : 1, zone());
if (call_super) {
@@ -208,14 +205,12 @@ FunctionLiteral* Parser::DefaultConstructor(const AstRawString* name,
body->Add(factory()->NewReturnStatement(call, pos), zone());
}
- materialized_literal_count = function_state.materialized_literal_count();
expected_property_count = function_state.expected_property_count();
}
FunctionLiteral* function_literal = factory()->NewFunctionLiteral(
- name, function_scope, body, materialized_literal_count,
- expected_property_count, parameter_count, parameter_count,
- FunctionLiteral::kNoDuplicateParameters,
+ name, function_scope, body, expected_property_count, parameter_count,
+ parameter_count, FunctionLiteral::kNoDuplicateParameters,
FunctionLiteral::kAnonymousExpression, default_eager_compile_hint(), pos,
true, GetNextFunctionLiteralId());
@@ -389,8 +384,8 @@ Expression* Parser::NewSuperPropertyReference(int pos) {
// this_function[home_object_symbol]
VariableProxy* this_function_proxy =
NewUnresolved(ast_value_factory()->this_function_string(), pos);
- Expression* home_object_symbol_literal =
- factory()->NewSymbolLiteral("home_object_symbol", kNoSourcePosition);
+ Expression* home_object_symbol_literal = factory()->NewSymbolLiteral(
+ AstSymbol::kHomeObjectSymbol, kNoSourcePosition);
Expression* home_object = factory()->NewProperty(
this_function_proxy, home_object_symbol_literal, pos);
return factory()->NewSuperPropertyReference(
@@ -519,7 +514,9 @@ Parser::Parser(ParseInfo* info)
cached_parse_data_(nullptr),
total_preparse_skipped_(0),
temp_zoned_(false),
- log_(nullptr) {
+ log_(nullptr),
+ preparsed_scope_data_(info->preparsed_scope_data()),
+ parameters_end_pos_(info->parameters_end_pos()) {
// Even though we were passed ParseInfo, we should not store it in
// Parser - this makes sure that Isolate is not accidentally accessed via
// ParseInfo during background parsing.
@@ -552,7 +549,10 @@ Parser::Parser(ParseInfo* info)
set_allow_harmony_restrictive_generators(FLAG_harmony_restrictive_generators);
set_allow_harmony_trailing_commas(FLAG_harmony_trailing_commas);
set_allow_harmony_class_fields(FLAG_harmony_class_fields);
- set_allow_harmony_object_spread(FLAG_harmony_object_spread);
+ set_allow_harmony_object_rest_spread(FLAG_harmony_object_rest_spread);
+ set_allow_harmony_dynamic_import(FLAG_harmony_dynamic_import);
+ set_allow_harmony_async_iteration(FLAG_harmony_async_iteration);
+ set_allow_harmony_template_escapes(FLAG_harmony_template_escapes);
for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
++feature) {
use_counts_[feature] = 0;
@@ -657,7 +657,7 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
// Note that this function can be called from the main thread or from a
// background thread. We should not access anything Isolate / heap dependent
// via ParseInfo, and also not pass it forward.
- DCHECK_NULL(scope_state_);
+ DCHECK_NULL(scope_);
DCHECK_NULL(target_stack_);
ParsingModeScope mode(this, allow_lazy_ ? PARSE_LAZILY : PARSE_EAGERLY);
@@ -681,7 +681,7 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
scope->set_start_position(0);
- FunctionState function_state(&function_state_, &scope_state_, scope);
+ FunctionState function_state(&function_state_, &scope_, scope);
ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16, zone());
bool ok = true;
@@ -747,8 +747,8 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
RewriteDestructuringAssignments();
int parameter_count = parsing_module_ ? 1 : 0;
result = factory()->NewScriptOrEvalFunctionLiteral(
- scope, body, function_state.materialized_literal_count(),
- function_state.expected_property_count(), parameter_count);
+ scope, body, function_state.expected_property_count(),
+ parameter_count);
}
}
@@ -823,7 +823,7 @@ FunctionLiteral* Parser::DoParseFunction(ParseInfo* info,
const AstRawString* raw_name,
Utf16CharacterStream* source) {
scanner_.Initialize(source);
- DCHECK_NULL(scope_state_);
+ DCHECK_NULL(scope_);
DCHECK_NULL(target_stack_);
DCHECK(ast_value_factory());
@@ -844,9 +844,8 @@ FunctionLiteral* Parser::DoParseFunction(ParseInfo* info,
Scope* outer = original_scope_;
DeclarationScope* outer_function = outer->GetClosureScope();
DCHECK(outer);
- FunctionState function_state(&function_state_, &scope_state_,
- outer_function);
- BlockState block_state(&scope_state_, outer);
+ FunctionState function_state(&function_state_, &scope_, outer_function);
+ BlockState block_state(&scope_, outer);
DCHECK(is_sloppy(outer->language_mode()) ||
is_strict(info->language_mode()));
FunctionLiteral::FunctionType function_type = ComputeFunctionType(info);
@@ -881,12 +880,14 @@ FunctionLiteral* Parser::DoParseFunction(ParseInfo* info,
scope->set_start_position(info->start_position());
ExpressionClassifier formals_classifier(this);
ParserFormalParameters formals(scope);
+ int rewritable_length =
+ function_state.destructuring_assignments_to_rewrite().length();
Checkpoint checkpoint(this);
{
// Parsing patterns as variable reference expression creates
// NewUnresolved references in current scope. Entrer arrow function
// scope for formal parameter parsing.
- BlockState block_state(&scope_state_, scope);
+ BlockState block_state(&scope_, scope);
if (Check(Token::LPAREN)) {
// '(' StrictFormalParameters ')'
ParseFormalParameterList(&formals, &ok);
@@ -917,7 +918,8 @@ FunctionLiteral* Parser::DoParseFunction(ParseInfo* info,
// Pass `accept_IN=true` to ParseArrowFunctionLiteral --- This should
// not be observable, or else the preparser would have failed.
- Expression* expression = ParseArrowFunctionLiteral(true, formals, &ok);
+ Expression* expression =
+ ParseArrowFunctionLiteral(true, formals, rewritable_length, &ok);
if (ok) {
// Scanning must end at the same position that was recorded
// previously. If not, parsing has been interrupted due to a stack
@@ -930,6 +932,10 @@ FunctionLiteral* Parser::DoParseFunction(ParseInfo* info,
// must produce a FunctionLiteral.
DCHECK(expression->IsFunctionLiteral());
result = expression->AsFunctionLiteral();
+ // Rewrite destructuring assignments in the parameters. (The ones
+ // inside the function body are rewritten by
+ // ParseArrowFunctionLiteral.)
+ RewriteDestructuringAssignments();
} else {
ok = false;
}
@@ -962,15 +968,21 @@ Statement* Parser::ParseModuleItem(bool* ok) {
// ExportDeclaration
// StatementListItem
- switch (peek()) {
- case Token::IMPORT:
- ParseImportDeclaration(CHECK_OK);
- return factory()->NewEmptyStatement(kNoSourcePosition);
- case Token::EXPORT:
- return ParseExportDeclaration(ok);
- default:
- return ParseStatementListItem(ok);
+ Token::Value next = peek();
+
+ if (next == Token::EXPORT) {
+ return ParseExportDeclaration(ok);
+ }
+
+ // We must be careful not to parse a dynamic import expression as an import
+ // declaration.
+ if (next == Token::IMPORT &&
+ (!allow_harmony_dynamic_import() || PeekAhead() != Token::LPAREN)) {
+ ParseImportDeclaration(CHECK_OK);
+ return factory()->NewEmptyStatement(kNoSourcePosition);
}
+
+ return ParseStatementListItem(ok);
}
@@ -1474,12 +1486,12 @@ void Parser::DeclareAndInitializeVariables(
Statement* Parser::DeclareFunction(const AstRawString* variable_name,
FunctionLiteral* function, VariableMode mode,
- int pos, bool is_generator, bool is_async,
- bool is_sloppy_block_function,
+ int pos, bool is_sloppy_block_function,
ZoneList<const AstRawString*>* names,
bool* ok) {
VariableProxy* proxy =
factory()->NewVariableProxy(variable_name, NORMAL_VARIABLE);
+
Declaration* declaration =
factory()->NewFunctionDeclaration(proxy, function, scope(), pos);
Declare(declaration, DeclarationDescriptor::NORMAL, mode, kCreatedInitialized,
@@ -1488,8 +1500,8 @@ Statement* Parser::DeclareFunction(const AstRawString* variable_name,
if (is_sloppy_block_function) {
SloppyBlockFunctionStatement* statement =
factory()->NewSloppyBlockFunctionStatement();
- DeclarationScope* target_scope = GetDeclarationScope();
- target_scope->DeclareSloppyBlockFunction(variable_name, scope(), statement);
+ GetDeclarationScope()->DeclareSloppyBlockFunction(variable_name, scope(),
+ statement);
return statement;
}
return factory()->NewEmptyStatement(kNoSourcePosition);
@@ -1676,7 +1688,6 @@ void Parser::RewriteCatchPattern(CatchInfo* catch_info, bool* ok) {
DeclarationDescriptor descriptor;
descriptor.declaration_kind = DeclarationDescriptor::NORMAL;
descriptor.scope = scope();
- descriptor.hoist_scope = nullptr;
descriptor.mode = LET;
descriptor.declaration_pos = catch_info->pattern->position();
descriptor.initialization_pos = catch_info->pattern->position();
@@ -1756,10 +1767,80 @@ Statement* Parser::RewriteTryStatement(Block* try_block, Block* catch_block,
}
}
-// !%_IsJSReceiver(result = iterator.next()) &&
-// %ThrowIteratorResultNotAnObject(result)
+void Parser::ParseAndRewriteGeneratorFunctionBody(int pos, FunctionKind kind,
+ ZoneList<Statement*>* body,
+ bool* ok) {
+ // We produce:
+ //
+ // try { InitialYield; ...body...; return {value: undefined, done: true} }
+ // finally { %_GeneratorClose(generator) }
+ //
+ // - InitialYield yields the actual generator object.
+ // - Any return statement inside the body will have its argument wrapped
+ // in a "done" iterator result object.
+ // - If the generator terminates for whatever reason, we must close it.
+ // Hence the finally clause.
+
+ Block* try_block = factory()->NewBlock(nullptr, 3, false, kNoSourcePosition);
+ Expression* initial_yield = BuildInitialYield(pos, kind);
+ try_block->statements()->Add(
+ factory()->NewExpressionStatement(initial_yield, kNoSourcePosition),
+ zone());
+ ParseStatementList(try_block->statements(), Token::RBRACE, ok);
+ if (!*ok) return;
+
+ Statement* final_return = factory()->NewReturnStatement(
+ BuildIteratorResult(nullptr, true), kNoSourcePosition);
+ try_block->statements()->Add(final_return, zone());
+
+ Block* finally_block =
+ factory()->NewBlock(nullptr, 1, false, kNoSourcePosition);
+ ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(1, zone());
+ VariableProxy* call_proxy =
+ factory()->NewVariableProxy(function_state_->generator_object_variable());
+ args->Add(call_proxy, zone());
+ Expression* call = factory()->NewCallRuntime(Runtime::kInlineGeneratorClose,
+ args, kNoSourcePosition);
+ finally_block->statements()->Add(
+ factory()->NewExpressionStatement(call, kNoSourcePosition), zone());
+
+ body->Add(factory()->NewTryFinallyStatement(try_block, finally_block,
+ kNoSourcePosition),
+ zone());
+}
+
+void Parser::CreateFunctionNameAssignment(
+ const AstRawString* function_name, int pos,
+ FunctionLiteral::FunctionType function_type,
+ DeclarationScope* function_scope, ZoneList<Statement*>* result, int index) {
+ if (function_type == FunctionLiteral::kNamedExpression) {
+ StatementT statement = factory()->NewEmptyStatement(kNoSourcePosition);
+ if (function_scope->LookupLocal(function_name) == nullptr) {
+ // Now that we know the language mode, we can create the const assignment
+ // in the previously reserved spot.
+ DCHECK_EQ(function_scope, scope());
+ Variable* fvar = function_scope->DeclareFunctionVar(function_name);
+ VariableProxy* fproxy = factory()->NewVariableProxy(fvar);
+ statement = factory()->NewExpressionStatement(
+ factory()->NewAssignment(Token::INIT, fproxy,
+ factory()->NewThisFunction(pos),
+ kNoSourcePosition),
+ kNoSourcePosition);
+ }
+ result->Set(index, statement);
+ }
+}
+
+// [if (IteratorType == kNormal)]
+// !%_IsJSReceiver(result = iterator.next()) &&
+// %ThrowIteratorResultNotAnObject(result)
+// [else if (IteratorType == kAsync)]
+// !%_IsJSReceiver(result = Await(iterator.next())) &&
+// %ThrowIteratorResultNotAnObject(result)
+// [endif]
Expression* Parser::BuildIteratorNextResult(Expression* iterator,
- Variable* result, int pos) {
+ Variable* result, IteratorType type,
+ int pos) {
Expression* next_literal = factory()->NewStringLiteral(
ast_value_factory()->next_string(), kNoSourcePosition);
Expression* next_property =
@@ -1768,6 +1849,9 @@ Expression* Parser::BuildIteratorNextResult(Expression* iterator,
new (zone()) ZoneList<Expression*>(0, zone());
Expression* next_call =
factory()->NewCall(next_property, next_arguments, pos);
+ if (type == IteratorType::kAsync) {
+ next_call = RewriteAwaitExpression(next_call, pos);
+ }
Expression* result_proxy = factory()->NewVariableProxy(result);
Expression* left =
factory()->NewAssignment(Token::ASSIGN, result_proxy, next_call, pos);
@@ -1802,7 +1886,7 @@ Statement* Parser::InitializeForEachStatement(ForEachStatement* stmt,
if (for_of != NULL) {
const bool finalize = true;
return InitializeForOfStatement(for_of, each, subject, body, finalize,
- each_keyword_pos);
+ IteratorType::kNormal, each_keyword_pos);
} else {
if (each->IsArrayLiteral() || each->IsObjectLiteral()) {
Variable* temp = NewTemporary(ast_value_factory()->empty_string());
@@ -1896,14 +1980,13 @@ void Parser::DesugarBindingInForEachStatement(ForInfo* for_info,
bool is_for_var_of =
for_info->mode == ForEachStatement::ITERATE &&
for_info->parsing_result.descriptor.mode == VariableMode::VAR;
+ bool collect_names =
+ IsLexicalVariableMode(for_info->parsing_result.descriptor.mode) ||
+ is_for_var_of;
PatternRewriter::DeclareAndInitializeVariables(
this, each_initialization_block, &descriptor, &decl,
- (IsLexicalVariableMode(for_info->parsing_result.descriptor.mode) ||
- is_for_var_of)
- ? &for_info->bound_names
- : nullptr,
- CHECK_OK_VOID);
+ collect_names ? &for_info->bound_names : nullptr, CHECK_OK_VOID);
// Annex B.3.5 prohibits the form
// `try {} catch(e) { for (var e of {}); }`
@@ -1956,17 +2039,14 @@ Block* Parser::CreateForEachStatementTDZ(Block* init_block,
return init_block;
}
-Statement* Parser::InitializeForOfStatement(ForOfStatement* for_of,
- Expression* each,
- Expression* iterable,
- Statement* body, bool finalize,
- int next_result_pos) {
+Statement* Parser::InitializeForOfStatement(
+ ForOfStatement* for_of, Expression* each, Expression* iterable,
+ Statement* body, bool finalize, IteratorType type, int next_result_pos) {
// Create the auxiliary expressions needed for iterating over the iterable,
// and initialize the given ForOfStatement with them.
// If finalize is true, also instrument the loop with code that performs the
// proper ES6 iterator finalization. In that case, the result is not
// immediately a ForOfStatement.
-
const int nopos = kNoSourcePosition;
auto avfactory = ast_value_factory();
@@ -1974,22 +2054,27 @@ Statement* Parser::InitializeForOfStatement(ForOfStatement* for_of,
Variable* result = NewTemporary(avfactory->dot_result_string());
Variable* completion = NewTemporary(avfactory->empty_string());
- // iterator = GetIterator(iterable)
+ // iterator = GetIterator(iterable, type)
Expression* assign_iterator;
{
assign_iterator = factory()->NewAssignment(
Token::ASSIGN, factory()->NewVariableProxy(iterator),
- factory()->NewGetIterator(iterable, iterable->position()),
+ factory()->NewGetIterator(iterable, type, iterable->position()),
iterable->position());
}
- // !%_IsJSReceiver(result = iterator.next()) &&
- // %ThrowIteratorResultNotAnObject(result)
+ // [if (IteratorType == kNormal)]
+ // !%_IsJSReceiver(result = iterator.next()) &&
+ // %ThrowIteratorResultNotAnObject(result)
+ // [else if (IteratorType == kAsync)]
+ // !%_IsJSReceiver(result = Await(iterator.next())) &&
+ // %ThrowIteratorResultNotAnObject(result)
+ // [endif]
Expression* next_result;
{
Expression* iterator_proxy = factory()->NewVariableProxy(iterator);
next_result =
- BuildIteratorNextResult(iterator_proxy, result, next_result_pos);
+ BuildIteratorNextResult(iterator_proxy, result, type, next_result_pos);
}
// result.done
@@ -2077,7 +2162,8 @@ Statement* Parser::InitializeForOfStatement(ForOfStatement* for_of,
for_of->Initialize(body, iterator, assign_iterator, next_result, result_done,
assign_each);
- return finalize ? FinalizeForOfStatement(for_of, completion, nopos) : for_of;
+ return finalize ? FinalizeForOfStatement(for_of, completion, type, nopos)
+ : for_of;
}
Statement* Parser::DesugarLexicalBindingsInForStatement(
@@ -2176,7 +2262,7 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
Block* inner_block = factory()->NewBlock(NULL, 3, false, kNoSourcePosition);
{
- BlockState block_state(&scope_state_, inner_scope);
+ BlockState block_state(&scope_, inner_scope);
Block* ignore_completion_block = factory()->NewBlock(
nullptr, for_info.bound_names.length() + 3, true, kNoSourcePosition);
@@ -2312,7 +2398,6 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
inner_block->statements()->Add(ignore_completion_block, zone());
}
- inner_scope->set_end_position(scanner()->location().end_pos);
inner_block->set_scope(inner_scope);
}
@@ -2370,7 +2455,8 @@ void Parser::AddArrowFunctionFormalParameters(
expr = assignment->target();
}
- AddFormalParameter(parameters, expr, initializer, end_pos, is_rest);
+ AddFormalParameter(parameters, expr, initializer,
+ end_pos, is_rest);
}
void Parser::DeclareArrowFunctionFormalParameters(
@@ -2401,19 +2487,6 @@ void Parser::DeclareArrowFunctionFormalParameters(
DCHECK_EQ(parameters->is_simple, parameters->scope->has_simple_parameters());
}
-void Parser::ReindexLiterals(const ParserFormalParameters& parameters) {
- if (function_state_->materialized_literal_count() > 0) {
- AstLiteralReindexer reindexer;
-
- for (auto p : parameters.params) {
- if (p->pattern != nullptr) reindexer.Reindex(p->pattern);
- if (p->initializer != nullptr) reindexer.Reindex(p->initializer);
- }
-
- DCHECK(reindexer.count() <= function_state_->materialized_literal_count());
- }
-}
-
void Parser::PrepareGeneratorVariables() {
// For generators, allocating variables in contexts is currently a win because
// it minimizes the work needed to suspend and resume an activation. The
@@ -2542,7 +2615,6 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
use_temp_zone && FLAG_lazy_inner_functions && !is_lazy_top_level_function;
ZoneList<Statement*>* body = nullptr;
- int materialized_literal_count = -1;
int expected_property_count = -1;
bool should_be_used_once_hint = false;
int num_parameters = -1;
@@ -2587,11 +2659,10 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
if (is_lazy_top_level_function || is_lazy_inner_function) {
Scanner::BookmarkScope bookmark(scanner());
bookmark.Set();
- LazyParsingResult result =
- SkipFunction(kind, scope, &num_parameters, &function_length,
- &has_duplicate_parameters, &materialized_literal_count,
- &expected_property_count, is_lazy_inner_function,
- is_lazy_top_level_function, CHECK_OK);
+ LazyParsingResult result = SkipFunction(
+ kind, scope, &num_parameters, &function_length,
+ &has_duplicate_parameters, &expected_property_count,
+ is_lazy_inner_function, is_lazy_top_level_function, CHECK_OK);
if (result == kLazyParsingAborted) {
DCHECK(is_lazy_top_level_function);
@@ -2611,10 +2682,10 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
}
if (!is_lazy_top_level_function && !is_lazy_inner_function) {
- body = ParseFunction(
- function_name, pos, kind, function_type, scope, &num_parameters,
- &function_length, &has_duplicate_parameters,
- &materialized_literal_count, &expected_property_count, CHECK_OK);
+ body = ParseFunction(function_name, pos, kind, function_type, scope,
+ &num_parameters, &function_length,
+ &has_duplicate_parameters, &expected_property_count,
+ CHECK_OK);
}
DCHECK(use_temp_zone || !is_lazy_top_level_function);
@@ -2622,7 +2693,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// If the preconditions are correct the function body should never be
// accessed, but do this anyway for better behaviour if they're wrong.
body = nullptr;
- scope->AnalyzePartially(&previous_zone_ast_node_factory);
+ scope->AnalyzePartially(&previous_zone_ast_node_factory,
+ preparsed_scope_data_);
}
DCHECK_IMPLIES(use_temp_zone, temp_zoned_);
@@ -2669,10 +2741,9 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// Note that the FunctionLiteral needs to be created in the main Zone again.
FunctionLiteral* function_literal = factory()->NewFunctionLiteral(
- function_name, scope, body, materialized_literal_count,
- expected_property_count, num_parameters, function_length,
- duplicate_parameters, function_type, eager_compile_hint, pos, true,
- function_literal_id);
+ function_name, scope, body, expected_property_count, num_parameters,
+ function_length, duplicate_parameters, function_type, eager_compile_hint,
+ pos, true, function_literal_id);
function_literal->set_function_token_position(function_token_pos);
if (should_be_used_once_hint)
function_literal->set_should_be_used_once_hint();
@@ -2687,9 +2758,10 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
Parser::LazyParsingResult Parser::SkipFunction(
FunctionKind kind, DeclarationScope* function_scope, int* num_parameters,
int* function_length, bool* has_duplicate_parameters,
- int* materialized_literal_count, int* expected_property_count,
- bool is_inner_function, bool may_abort, bool* ok) {
+ int* expected_property_count, bool is_inner_function, bool may_abort,
+ bool* ok) {
DCHECK_NE(kNoSourcePosition, function_scope->start_position());
+ DCHECK_EQ(kNoSourcePosition, parameters_end_pos_);
if (produce_cached_parse_data()) CHECK(log_);
DCHECK_IMPLIES(IsArrowFunction(kind),
@@ -2714,7 +2786,6 @@ Parser::LazyParsingResult Parser::SkipFunction(
*num_parameters = entry.num_parameters();
*function_length = entry.function_length();
*has_duplicate_parameters = entry.has_duplicate_parameters();
- *materialized_literal_count = entry.literal_count();
*expected_property_count = entry.property_count();
SetLanguageMode(function_scope, entry.language_mode());
if (entry.uses_super_property())
@@ -2740,7 +2811,9 @@ Parser::LazyParsingResult Parser::SkipFunction(
SET_ALLOW(harmony_function_sent);
SET_ALLOW(harmony_trailing_commas);
SET_ALLOW(harmony_class_fields);
- SET_ALLOW(harmony_object_spread);
+ SET_ALLOW(harmony_object_rest_spread);
+ SET_ALLOW(harmony_dynamic_import);
+ SET_ALLOW(harmony_async_iteration);
#undef SET_ALLOW
}
// Aborting inner function preparsing would leave scopes in an inconsistent
@@ -2771,7 +2844,6 @@ Parser::LazyParsingResult Parser::SkipFunction(
*num_parameters = logger->num_parameters();
*function_length = logger->function_length();
*has_duplicate_parameters = logger->has_duplicate_parameters();
- *materialized_literal_count = logger->literals();
*expected_property_count = logger->properties();
SkipFunctionLiterals(logger->num_inner_functions());
if (!is_inner_function && produce_cached_parse_data()) {
@@ -2779,7 +2851,7 @@ Parser::LazyParsingResult Parser::SkipFunction(
log_->LogFunction(
function_scope->start_position(), function_scope->end_position(),
*num_parameters, *function_length, *has_duplicate_parameters,
- *materialized_literal_count, *expected_property_count, language_mode(),
+ *expected_property_count, language_mode(),
function_scope->uses_super_property(), function_scope->calls_eval(),
logger->num_inner_functions());
}
@@ -2855,11 +2927,10 @@ Block* Parser::BuildParameterInitializationBlock(
Block* init_block = factory()->NewBlock(NULL, 1, true, kNoSourcePosition);
int index = 0;
for (auto parameter : parameters.params) {
- if (parameter->is_rest && parameter->pattern->IsVariableProxy()) break;
+ if (parameter->is_nondestructuring_rest()) break;
DeclarationDescriptor descriptor;
descriptor.declaration_kind = DeclarationDescriptor::PARAMETER;
descriptor.scope = scope();
- descriptor.hoist_scope = nullptr;
descriptor.mode = LET;
descriptor.declaration_pos = parameter->pattern->position();
// The position that will be used by the AssignmentExpression
@@ -2894,7 +2965,6 @@ Block* Parser::BuildParameterInitializationBlock(
param_scope->RecordEvalCall();
param_block = factory()->NewBlock(NULL, 8, true, kNoSourcePosition);
param_block->set_scope(param_scope);
- descriptor.hoist_scope = scope();
// Pass the appropriate scope in so that PatternRewriter can appropriately
// rewrite inner initializers of the pattern to param_scope
descriptor.scope = param_scope;
@@ -2903,14 +2973,14 @@ Block* Parser::BuildParameterInitializationBlock(
param_scope);
}
- BlockState block_state(&scope_state_, param_scope);
+ BlockState block_state(&scope_, param_scope);
DeclarationParsingResult::Declaration decl(
parameter->pattern, parameter->initializer_end_position, initial_value);
PatternRewriter::DeclareAndInitializeVariables(
this, param_block, &descriptor, &decl, nullptr, CHECK_OK);
if (param_block != init_block) {
- param_scope = block_state.FinalizedBlockScope();
+ param_scope = param_scope->FinalizeBlockScope();
if (param_scope != nullptr) {
CheckConflictingVarDeclarations(param_scope, CHECK_OK);
}
@@ -2921,7 +2991,7 @@ Block* Parser::BuildParameterInitializationBlock(
return init_block;
}
-Block* Parser::BuildRejectPromiseOnException(Block* inner_block, bool* ok) {
+Block* Parser::BuildRejectPromiseOnException(Block* inner_block) {
// .promise = %AsyncFunctionPromiseCreate();
// try {
// <inner_block>
@@ -2940,7 +3010,7 @@ Block* Parser::BuildRejectPromiseOnException(Block* inner_block, bool* ok) {
Context::ASYNC_FUNCTION_PROMISE_CREATE_INDEX,
new (zone()) ZoneList<Expression*>(0, zone()), kNoSourcePosition);
Assignment* assign_promise = factory()->NewAssignment(
- Token::INIT, factory()->NewVariableProxy(PromiseVariable()),
+ Token::ASSIGN, factory()->NewVariableProxy(PromiseVariable()),
create_promise, kNoSourcePosition);
set_promise =
factory()->NewExpressionStatement(assign_promise, kNoSourcePosition);
@@ -3019,15 +3089,15 @@ Expression* Parser::BuildResolvePromise(Expression* value, int pos) {
}
Expression* Parser::BuildRejectPromise(Expression* value, int pos) {
- // %RejectPromiseNoDebugEvent(.promise, value, true), .promise
- // The NoDebugEvent variant disables the additional debug event for the
- // rejection since a debug event already happened for the exception that got
- // us here.
- ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(2, zone());
+ // %promise_internal_reject(.promise, value, false), .promise
+ // Disables the additional debug event for the rejection since a debug event
+ // already happened for the exception that got us here.
+ ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(3, zone());
args->Add(factory()->NewVariableProxy(PromiseVariable()), zone());
args->Add(value, zone());
+ args->Add(factory()->NewBooleanLiteral(false, pos), zone());
Expression* call_runtime = factory()->NewCallRuntime(
- Context::REJECT_PROMISE_NO_DEBUG_EVENT_INDEX, args, pos);
+ Context::PROMISE_INTERNAL_REJECT_INDEX, args, pos);
return factory()->NewBinaryOperation(
Token::COMMA, call_runtime,
factory()->NewVariableProxy(PromiseVariable()), pos);
@@ -3060,19 +3130,44 @@ ZoneList<Statement*>* Parser::ParseFunction(
const AstRawString* function_name, int pos, FunctionKind kind,
FunctionLiteral::FunctionType function_type,
DeclarationScope* function_scope, int* num_parameters, int* function_length,
- bool* has_duplicate_parameters, int* materialized_literal_count,
- int* expected_property_count, bool* ok) {
+ bool* has_duplicate_parameters, int* expected_property_count, bool* ok) {
ParsingModeScope mode(this, allow_lazy_ ? PARSE_LAZILY : PARSE_EAGERLY);
- FunctionState function_state(&function_state_, &scope_state_, function_scope);
+ FunctionState function_state(&function_state_, &scope_, function_scope);
DuplicateFinder duplicate_finder;
ExpressionClassifier formals_classifier(this, &duplicate_finder);
if (IsResumableFunction(kind)) PrepareGeneratorVariables();
+ int expected_parameters_end_pos = parameters_end_pos_;
+ if (expected_parameters_end_pos != kNoSourcePosition) {
+ // This is the first function encountered in a CreateDynamicFunction eval.
+ parameters_end_pos_ = kNoSourcePosition;
+ // The function name should have been ignored, giving us the empty string
+ // here.
+ DCHECK_EQ(function_name, ast_value_factory()->empty_string());
+ }
+
ParserFormalParameters formals(function_scope);
ParseFormalParameterList(&formals, CHECK_OK);
+ if (expected_parameters_end_pos != kNoSourcePosition) {
+ // Check for '(' or ')' shenanigans in the parameter string for dynamic
+ // functions.
+ int position = peek_position();
+ if (position < expected_parameters_end_pos) {
+ ReportMessageAt(Scanner::Location(position, position + 1),
+ MessageTemplate::kArgStringTerminatesParametersEarly);
+ *ok = false;
+ return nullptr;
+ } else if (position > expected_parameters_end_pos) {
+ ReportMessageAt(Scanner::Location(expected_parameters_end_pos - 2,
+ expected_parameters_end_pos),
+ MessageTemplate::kUnexpectedEndOfArgString);
+ *ok = false;
+ return nullptr;
+ }
+ }
Expect(Token::RPAREN, CHECK_OK);
int formals_end_position = scanner()->location().end_pos;
*num_parameters = formals.num_parameters();
@@ -3083,8 +3178,8 @@ ZoneList<Statement*>* Parser::ParseFunction(
CHECK_OK);
Expect(Token::LBRACE, CHECK_OK);
- ZoneList<Statement*>* body = ParseEagerFunctionBody(
- function_name, pos, formals, kind, function_type, ok);
+ ZoneList<Statement*>* body = new (zone()) ZoneList<Statement*>(8, zone());
+ ParseFunctionBody(body, function_name, pos, formals, kind, function_type, ok);
// Validate parameter names. We can do this only after parsing the function,
// since the function can declare itself strict.
@@ -3099,167 +3194,11 @@ ZoneList<Statement*>* Parser::ParseFunction(
*has_duplicate_parameters =
!classifier()->is_valid_formal_parameter_list_without_duplicates();
- *materialized_literal_count = function_state.materialized_literal_count();
*expected_property_count = function_state.expected_property_count();
return body;
}
-ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
- const AstRawString* function_name, int pos,
- const ParserFormalParameters& parameters, FunctionKind kind,
- FunctionLiteral::FunctionType function_type, bool* ok) {
- ZoneList<Statement*>* result = new(zone()) ZoneList<Statement*>(8, zone());
-
- static const int kFunctionNameAssignmentIndex = 0;
- if (function_type == FunctionLiteral::kNamedExpression) {
- DCHECK(function_name != NULL);
- // If we have a named function expression, we add a local variable
- // declaration to the body of the function with the name of the
- // function and let it refer to the function itself (closure).
- // Not having parsed the function body, the language mode may still change,
- // so we reserve a spot and create the actual const assignment later.
- DCHECK_EQ(kFunctionNameAssignmentIndex, result->length());
- result->Add(NULL, zone());
- }
-
- ZoneList<Statement*>* body = result;
- DeclarationScope* function_scope = scope()->AsDeclarationScope();
- DeclarationScope* inner_scope = function_scope;
- Block* inner_block = nullptr;
- if (!parameters.is_simple) {
- inner_scope = NewVarblockScope();
- inner_scope->set_start_position(scanner()->location().beg_pos);
- inner_block = factory()->NewBlock(NULL, 8, true, kNoSourcePosition);
- inner_block->set_scope(inner_scope);
- body = inner_block->statements();
- }
-
- {
- BlockState block_state(&scope_state_, inner_scope);
-
- if (IsGeneratorFunction(kind)) {
- // We produce:
- //
- // try { InitialYield; ...body...; return {value: undefined, done: true} }
- // finally { %_GeneratorClose(generator) }
- //
- // - InitialYield yields the actual generator object.
- // - Any return statement inside the body will have its argument wrapped
- // in a "done" iterator result object.
- // - If the generator terminates for whatever reason, we must close it.
- // Hence the finally clause.
-
- Block* try_block =
- factory()->NewBlock(nullptr, 3, false, kNoSourcePosition);
- Expression* initial_yield = BuildInitialYield(pos, kind);
- try_block->statements()->Add(
- factory()->NewExpressionStatement(initial_yield, kNoSourcePosition),
- zone());
- ParseStatementList(try_block->statements(), Token::RBRACE, CHECK_OK);
-
- Statement* final_return = factory()->NewReturnStatement(
- BuildIteratorResult(nullptr, true), kNoSourcePosition);
- try_block->statements()->Add(final_return, zone());
-
- Block* finally_block =
- factory()->NewBlock(nullptr, 1, false, kNoSourcePosition);
- ZoneList<Expression*>* args =
- new (zone()) ZoneList<Expression*>(1, zone());
- VariableProxy* call_proxy = factory()->NewVariableProxy(
- function_state_->generator_object_variable());
- args->Add(call_proxy, zone());
- Expression* call = factory()->NewCallRuntime(
- Runtime::kInlineGeneratorClose, args, kNoSourcePosition);
- finally_block->statements()->Add(
- factory()->NewExpressionStatement(call, kNoSourcePosition), zone());
-
- body->Add(factory()->NewTryFinallyStatement(try_block, finally_block,
- kNoSourcePosition),
- zone());
- } else if (IsAsyncFunction(kind)) {
- const bool accept_IN = true;
- ParseAsyncFunctionBody(inner_scope, body, kind, FunctionBodyType::kNormal,
- accept_IN, pos, CHECK_OK);
- } else {
- ParseStatementList(body, Token::RBRACE, CHECK_OK);
- }
-
- if (IsDerivedConstructor(kind)) {
- body->Add(factory()->NewReturnStatement(ThisExpression(kNoSourcePosition),
- kNoSourcePosition),
- zone());
- }
- }
-
- Expect(Token::RBRACE, CHECK_OK);
- scope()->set_end_position(scanner()->location().end_pos);
-
- if (!parameters.is_simple) {
- DCHECK_NOT_NULL(inner_scope);
- DCHECK_EQ(function_scope, scope());
- DCHECK_EQ(function_scope, inner_scope->outer_scope());
- DCHECK_EQ(body, inner_block->statements());
- SetLanguageMode(function_scope, inner_scope->language_mode());
- Block* init_block = BuildParameterInitializationBlock(parameters, CHECK_OK);
-
- if (is_sloppy(inner_scope->language_mode())) {
- InsertSloppyBlockFunctionVarBindings(inner_scope);
- }
-
- // TODO(littledan): Merge the two rejection blocks into one
- if (IsAsyncFunction(kind)) {
- init_block = BuildRejectPromiseOnException(init_block, CHECK_OK);
- }
-
- DCHECK_NOT_NULL(init_block);
-
- inner_scope->set_end_position(scanner()->location().end_pos);
- if (inner_scope->FinalizeBlockScope() != nullptr) {
- CheckConflictingVarDeclarations(inner_scope, CHECK_OK);
- InsertShadowingVarBindingInitializers(inner_block);
- }
- inner_scope = nullptr;
-
- result->Add(init_block, zone());
- result->Add(inner_block, zone());
- } else {
- DCHECK_EQ(inner_scope, function_scope);
- if (is_sloppy(function_scope->language_mode())) {
- InsertSloppyBlockFunctionVarBindings(function_scope);
- }
- }
-
- if (!IsArrowFunction(kind)) {
- // Declare arguments after parsing the function since lexical 'arguments'
- // masks the arguments object. Declare arguments before declaring the
- // function var since the arguments object masks 'function arguments'.
- function_scope->DeclareArguments(ast_value_factory());
- }
-
- if (function_type == FunctionLiteral::kNamedExpression) {
- Statement* statement;
- if (function_scope->LookupLocal(function_name) == nullptr) {
- // Now that we know the language mode, we can create the const assignment
- // in the previously reserved spot.
- DCHECK_EQ(function_scope, scope());
- Variable* fvar = function_scope->DeclareFunctionVar(function_name);
- VariableProxy* fproxy = factory()->NewVariableProxy(fvar);
- statement = factory()->NewExpressionStatement(
- factory()->NewAssignment(Token::INIT, fproxy,
- factory()->NewThisFunction(pos),
- kNoSourcePosition),
- kNoSourcePosition);
- } else {
- statement = factory()->NewEmptyStatement(kNoSourcePosition);
- }
- result->Set(kFunctionNameAssignmentIndex, statement);
- }
-
- MarkCollectedTailCallExpressions();
- return result;
-}
-
-void Parser::DeclareClassVariable(const AstRawString* name, Scope* block_scope,
+void Parser::DeclareClassVariable(const AstRawString* name,
ClassInfo* class_info, int class_token_pos,
bool* ok) {
#ifdef DEBUG
@@ -3269,7 +3208,7 @@ void Parser::DeclareClassVariable(const AstRawString* name, Scope* block_scope,
if (name != nullptr) {
class_info->proxy = factory()->NewVariableProxy(name, NORMAL_VARIABLE);
Declaration* declaration = factory()->NewVariableDeclaration(
- class_info->proxy, block_scope, class_token_pos);
+ class_info->proxy, scope(), class_token_pos);
Declare(declaration, DeclarationDescriptor::NORMAL, CONST,
Variable::DefaultInitializationFlag(CONST), ok);
}
@@ -3379,7 +3318,7 @@ void Parser::InsertShadowingVarBindingInitializers(Block* inner_block) {
DCHECK(inner_scope->is_declaration_scope());
Scope* function_scope = inner_scope->outer_scope();
DCHECK(function_scope->is_function_scope());
- BlockState block_state(&scope_state_, inner_scope);
+ BlockState block_state(&scope_, inner_scope);
for (Declaration* decl : *inner_scope->declarations()) {
if (decl->proxy()->var()->mode() != VAR || !decl->IsVariableDeclaration()) {
continue;
@@ -3460,21 +3399,18 @@ void Parser::HandleSourceURLComments(Isolate* isolate, Handle<Script> script) {
}
}
-
-void Parser::Internalize(Isolate* isolate, Handle<Script> script, bool error) {
- // Internalize strings and values.
- ast_value_factory()->Internalize(isolate);
-
- // Error processing.
- if (error) {
- if (stack_overflow()) {
- isolate->StackOverflow();
- } else {
- DCHECK(pending_error_handler_.has_pending_error());
- pending_error_handler_.ThrowPendingError(isolate, script);
- }
+void Parser::ReportErrors(Isolate* isolate, Handle<Script> script) {
+ if (stack_overflow()) {
+ isolate->StackOverflow();
+ } else {
+ DCHECK(pending_error_handler_.has_pending_error());
+ // Internalize ast values for throwing the pending error.
+ ast_value_factory()->Internalize(isolate);
+ pending_error_handler_.ThrowPendingError(isolate, script);
}
+}
+void Parser::UpdateStatistics(Isolate* isolate, Handle<Script> script) {
// Move statistics to Isolate.
for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
++feature) {
@@ -3571,15 +3507,20 @@ Parser::TemplateLiteralState Parser::OpenTemplateLiteral(int pos) {
return new (zone()) TemplateLiteral(zone(), pos);
}
-
-void Parser::AddTemplateSpan(TemplateLiteralState* state, bool tail) {
+void Parser::AddTemplateSpan(TemplateLiteralState* state, bool should_cook,
+ bool tail) {
+ DCHECK(should_cook || allow_harmony_template_escapes());
int pos = scanner()->location().beg_pos;
int end = scanner()->location().end_pos - (tail ? 1 : 2);
- const AstRawString* tv = scanner()->CurrentSymbol(ast_value_factory());
const AstRawString* trv = scanner()->CurrentRawSymbol(ast_value_factory());
- Literal* cooked = factory()->NewStringLiteral(tv, pos);
Literal* raw = factory()->NewStringLiteral(trv, pos);
- (*state)->AddTemplateSpan(cooked, raw, end, zone());
+ if (should_cook) {
+ const AstRawString* tv = scanner()->CurrentSymbol(ast_value_factory());
+ Literal* cooked = factory()->NewStringLiteral(tv, pos);
+ (*state)->AddTemplateSpan(cooked, raw, end, zone());
+ } else {
+ (*state)->AddTemplateSpan(GetLiteralUndefined(pos), raw, end, zone());
+ }
}
@@ -3623,19 +3564,14 @@ Expression* Parser::CloseTemplateLiteral(TemplateLiteralState* state, int start,
} else {
uint32_t hash = ComputeTemplateLiteralHash(lit);
- int cooked_idx = function_state_->NextMaterializedLiteralIndex();
- int raw_idx = function_state_->NextMaterializedLiteralIndex();
-
// $getTemplateCallSite
ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(4, zone());
args->Add(factory()->NewArrayLiteral(
- const_cast<ZoneList<Expression*>*>(cooked_strings),
- cooked_idx, pos),
+ const_cast<ZoneList<Expression*>*>(cooked_strings), pos),
+ zone());
+ args->Add(factory()->NewArrayLiteral(
+ const_cast<ZoneList<Expression*>*>(raw_strings), pos),
zone());
- args->Add(
- factory()->NewArrayLiteral(
- const_cast<ZoneList<Expression*>*>(raw_strings), raw_idx, pos),
- zone());
// Truncate hash to Smi-range.
Smi* hash_obj = Smi::cast(Internals::IntToSmi(static_cast<int>(hash)));
@@ -3683,6 +3619,19 @@ uint32_t Parser::ComputeTemplateLiteralHash(const TemplateLiteral* lit) {
return running_hash;
}
+namespace {
+
+bool OnlyLastArgIsSpread(ZoneList<Expression*>* args) {
+ for (int i = 0; i < args->length() - 1; i++) {
+ if (args->at(i)->IsSpread()) {
+ return false;
+ }
+ }
+ return args->at(args->length() - 1)->IsSpread();
+}
+
+} // namespace
+
ZoneList<Expression*>* Parser::PrepareSpreadArguments(
ZoneList<Expression*>* list) {
ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(1, zone());
@@ -3719,9 +3668,7 @@ ZoneList<Expression*>* Parser::PrepareSpreadArguments(
while (i < n && !list->at(i)->IsSpread()) {
unspread->Add(list->at(i++), zone());
}
- int literal_index = function_state_->NextMaterializedLiteralIndex();
- args->Add(factory()->NewArrayLiteral(unspread, literal_index,
- kNoSourcePosition),
+ args->Add(factory()->NewArrayLiteral(unspread, kNoSourcePosition),
zone());
if (i == n) break;
@@ -3746,27 +3693,20 @@ ZoneList<Expression*>* Parser::PrepareSpreadArguments(
}
Expression* Parser::SpreadCall(Expression* function,
- ZoneList<Expression*>* args, int pos) {
+ ZoneList<Expression*>* args, int pos,
+ Call::PossiblyEval is_possibly_eval) {
+ // Handle these cases in BytecodeGenerator.
+ // [Call,New]WithSpread bytecodes aren't used with tailcalls - see
+ // https://crbug.com/v8/5867
+ if (!allow_tailcalls() && OnlyLastArgIsSpread(args)) {
+ return factory()->NewCall(function, args, pos);
+ }
+
if (function->IsSuperCallReference()) {
// Super calls
// $super_constructor = %_GetSuperConstructor(<this-function>)
// %reflect_construct($super_constructor, args, new.target)
- bool only_last_arg_is_spread = false;
- for (int i = 0; i < args->length(); i++) {
- if (args->at(i)->IsSpread()) {
- if (i == args->length() - 1) {
- only_last_arg_is_spread = true;
- }
- break;
- }
- }
-
- if (only_last_arg_is_spread) {
- // Handle in BytecodeGenerator.
- Expression* super_call_ref = NewSuperCallReference(pos);
- return factory()->NewCall(super_call_ref, args, pos);
- }
args = PrepareSpreadArguments(args);
ZoneList<Expression*>* tmp = new (zone()) ZoneList<Expression*>(1, zone());
tmp->Add(function->AsSuperCallReference()->this_function_var(), zone());
@@ -3808,6 +3748,10 @@ Expression* Parser::SpreadCall(Expression* function,
Expression* Parser::SpreadCallNew(Expression* function,
ZoneList<Expression*>* args, int pos) {
+ if (OnlyLastArgIsSpread(args)) {
+ // Handle in BytecodeGenerator.
+ return factory()->NewCallNew(function, args, pos);
+ }
args = PrepareSpreadArguments(args);
args->InsertAt(0, function, zone());
@@ -3881,7 +3825,7 @@ void Parser::RewriteAsyncFunctionBody(ZoneList<Statement*>* body, Block* block,
block->statements()->Add(
factory()->NewReturnStatement(return_value, return_value->position()),
zone());
- block = BuildRejectPromiseOnException(block, CHECK_OK_VOID);
+ block = BuildRejectPromiseOnException(block);
body->Add(block, zone());
}
@@ -4149,12 +4093,11 @@ Expression* Parser::RewriteSpreads(ArrayLiteral* lit) {
kNoSourcePosition);
}
// for (each of spread) %AppendElement($R, each)
- ForEachStatement* loop = factory()->NewForEachStatement(
- ForEachStatement::ITERATE, nullptr, kNoSourcePosition);
+ ForOfStatement* loop =
+ factory()->NewForOfStatement(nullptr, kNoSourcePosition);
const bool finalize = false;
- InitializeForOfStatement(loop->AsForOfStatement(),
- factory()->NewVariableProxy(each), subject,
- append_body, finalize);
+ InitializeForOfStatement(loop, factory()->NewVariableProxy(each), subject,
+ append_body, finalize, IteratorType::kNormal);
do_block->statements()->Add(loop, zone());
}
}
@@ -4235,7 +4178,7 @@ void Parser::SetFunctionName(Expression* value, const AstRawString* name) {
// const kReturn = 1;
// const kThrow = 2;
//
-// let input = function.sent;
+// let input = undefined;
// let mode = kNext;
// let output = undefined;
//
@@ -4346,7 +4289,8 @@ Expression* Parser::RewriteYieldStar(Expression* generator,
Variable* var_iterator = NewTemporary(ast_value_factory()->empty_string());
Statement* get_iterator;
{
- Expression* iterator = factory()->NewGetIterator(iterable, nopos);
+ Expression* iterator =
+ factory()->NewGetIterator(iterable, IteratorType::kNormal, nopos);
Expression* iterator_proxy = factory()->NewVariableProxy(var_iterator);
Expression* assignment = factory()->NewAssignment(
Token::ASSIGN, iterator_proxy, iterator, nopos);
@@ -4428,7 +4372,8 @@ Expression* Parser::RewriteYieldStar(Expression* generator,
Block* then = factory()->NewBlock(nullptr, 4 + 1, false, nopos);
BuildIteratorCloseForCompletion(
scope(), then->statements(), var_iterator,
- factory()->NewSmiLiteral(Parser::kNormalCompletion, nopos));
+ factory()->NewSmiLiteral(Parser::kNormalCompletion, nopos),
+ IteratorType::kNormal);
then->statements()->Add(throw_call, zone());
check_throw = factory()->NewIfStatement(
condition, then, factory()->NewEmptyStatement(nopos), nopos);
@@ -4795,7 +4740,8 @@ void Parser::BuildIteratorClose(ZoneList<Statement*>* statements,
void Parser::FinalizeIteratorUse(Scope* use_scope, Variable* completion,
Expression* condition, Variable* iter,
- Block* iterator_use, Block* target) {
+ Block* iterator_use, Block* target,
+ IteratorType type) {
//
// This function adds two statements to [target], corresponding to the
// following code:
@@ -4851,8 +4797,8 @@ void Parser::FinalizeIteratorUse(Scope* use_scope, Variable* completion,
{
Block* block = factory()->NewBlock(nullptr, 2, true, nopos);
Expression* proxy = factory()->NewVariableProxy(completion);
- BuildIteratorCloseForCompletion(use_scope, block->statements(), iter,
- proxy);
+ BuildIteratorCloseForCompletion(use_scope, block->statements(), iter, proxy,
+ type);
DCHECK(block->statements()->length() == 2);
maybe_close = factory()->NewBlock(nullptr, 1, true, nopos);
@@ -4911,7 +4857,8 @@ void Parser::FinalizeIteratorUse(Scope* use_scope, Variable* completion,
void Parser::BuildIteratorCloseForCompletion(Scope* scope,
ZoneList<Statement*>* statements,
Variable* iterator,
- Expression* completion) {
+ Expression* completion,
+ IteratorType type) {
//
// This function adds two statements to [statements], corresponding to the
// following code:
@@ -4922,9 +4869,17 @@ void Parser::BuildIteratorCloseForCompletion(Scope* scope,
// if (!IS_CALLABLE(iteratorReturn)) {
// throw MakeTypeError(kReturnMethodNotCallable);
// }
- // try { %_Call(iteratorReturn, iterator) } catch (_) { }
+ // [if (IteratorType == kAsync)]
+ // try { Await(%_Call(iteratorReturn, iterator) } catch (_) { }
+ // [else]
+ // try { %_Call(iteratorReturn, iterator) } catch (_) { }
+ // [endif]
// } else {
- // let output = %_Call(iteratorReturn, iterator);
+ // [if (IteratorType == kAsync)]
+ // let output = Await(%_Call(iteratorReturn, iterator));
+ // [else]
+ // let output = %_Call(iteratorReturn, iterator);
+ // [endif]
// if (!IS_RECEIVER(output)) {
// %ThrowIterResultNotAnObject(output);
// }
@@ -4969,6 +4924,10 @@ void Parser::BuildIteratorCloseForCompletion(Scope* scope,
Expression* call =
factory()->NewCallRuntime(Runtime::kInlineCall, args, nopos);
+ if (type == IteratorType::kAsync) {
+ call = RewriteAwaitExpression(call, nopos);
+ }
+
Block* try_block = factory()->NewBlock(nullptr, 1, false, nopos);
try_block->statements()->Add(factory()->NewExpressionStatement(call, nopos),
zone());
@@ -4998,6 +4957,9 @@ void Parser::BuildIteratorCloseForCompletion(Scope* scope,
args->Add(factory()->NewVariableProxy(iterator), zone());
Expression* call =
factory()->NewCallRuntime(Runtime::kInlineCall, args, nopos);
+ if (type == IteratorType::kAsync) {
+ call = RewriteAwaitExpression(call, nopos);
+ }
Expression* output_proxy = factory()->NewVariableProxy(var_output);
Expression* assignment =
@@ -5068,7 +5030,8 @@ void Parser::BuildIteratorCloseForCompletion(Scope* scope,
}
Statement* Parser::FinalizeForOfStatement(ForOfStatement* loop,
- Variable* var_completion, int pos) {
+ Variable* var_completion,
+ IteratorType type, int pos) {
//
// This function replaces the loop with the following wrapping:
//
@@ -5112,7 +5075,7 @@ Statement* Parser::FinalizeForOfStatement(ForOfStatement* loop,
DCHECK_EQ(scope()->scope_type(), BLOCK_SCOPE);
FinalizeIteratorUse(loop_scope, var_completion, closing_condition,
- loop->iterator(), try_block, final_loop);
+ loop->iterator(), try_block, final_loop, type);
}
return final_loop;
diff --git a/deps/v8/src/parsing/parser.h b/deps/v8/src/parsing/parser.h
index a898511b23..e2223d9a77 100644
--- a/deps/v8/src/parsing/parser.h
+++ b/deps/v8/src/parsing/parser.h
@@ -27,6 +27,7 @@ class ParseInfo;
class ScriptData;
class ParserTarget;
class ParserTargetScope;
+class PreParsedScopeData;
class FunctionEntry BASE_EMBEDDED {
public:
@@ -35,7 +36,6 @@ class FunctionEntry BASE_EMBEDDED {
kEndPositionIndex,
kNumParametersIndex,
kFunctionLengthIndex,
- kLiteralCountIndex,
kPropertyCountIndex,
kFlagsIndex,
kNumInnerFunctionsIndex,
@@ -68,7 +68,6 @@ class FunctionEntry BASE_EMBEDDED {
int end_pos() const { return backing_[kEndPositionIndex]; }
int num_parameters() const { return backing_[kNumParametersIndex]; }
int function_length() const { return backing_[kFunctionLengthIndex]; }
- int literal_count() const { return backing_[kLiteralCountIndex]; }
int property_count() const { return backing_[kPropertyCountIndex]; }
LanguageMode language_mode() const {
return LanguageModeField::decode(backing_[kFlagsIndex]);
@@ -157,6 +156,12 @@ struct ParserFormalParameters : FormalParametersBase {
bool is_simple() const {
return pattern->IsVariableProxy() && initializer == nullptr && !is_rest;
}
+
+ bool is_nondestructuring_rest() const {
+ DCHECK_IMPLIES(is_rest, initializer == nullptr);
+ return is_rest && pattern->IsVariableProxy();
+ }
+
Parameter** next() { return &next_parameter; }
Parameter* const* next() const { return &next_parameter; }
};
@@ -221,16 +226,17 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
void DeserializeScopeChain(ParseInfo* info,
MaybeHandle<ScopeInfo> maybe_outer_scope_info);
- // Handle errors detected during parsing, move statistics to Isolate,
- // internalize strings (move them to the heap).
- void Internalize(Isolate* isolate, Handle<Script> script, bool error);
+ // Handle errors detected during parsing
+ void ReportErrors(Isolate* isolate, Handle<Script> script);
+ // Move statistics to Isolate
+ void UpdateStatistics(Isolate* isolate, Handle<Script> script);
void HandleSourceURLComments(Isolate* isolate, Handle<Script> script);
private:
friend class ParserBase<Parser>;
friend class v8::internal::ExpressionClassifier<ParserTypes<Parser>>;
- friend bool v8::internal::parsing::ParseProgram(ParseInfo*);
- friend bool v8::internal::parsing::ParseFunction(ParseInfo*);
+ friend bool v8::internal::parsing::ParseProgram(ParseInfo*, bool);
+ friend bool v8::internal::parsing::ParseFunction(ParseInfo*, bool);
bool AllowsLazyParsingWithoutUnresolvedVariables() const {
return scope()->AllowsLazyParsingWithoutUnresolvedVariables(
@@ -340,17 +346,24 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
Block* finally_block,
const CatchInfo& catch_info, int pos);
+ void ParseAndRewriteGeneratorFunctionBody(int pos, FunctionKind kind,
+ ZoneList<Statement*>* body,
+ bool* ok);
+ void CreateFunctionNameAssignment(const AstRawString* function_name, int pos,
+ FunctionLiteral::FunctionType function_type,
+ DeclarationScope* function_scope,
+ ZoneList<Statement*>* result, int index);
+
Statement* DeclareFunction(const AstRawString* variable_name,
FunctionLiteral* function, VariableMode mode,
- int pos, bool is_generator, bool is_async,
- bool is_sloppy_block_function,
+ int pos, bool is_sloppy_block_function,
ZoneList<const AstRawString*>* names, bool* ok);
V8_INLINE Statement* DeclareClass(const AstRawString* variable_name,
Expression* value,
ZoneList<const AstRawString*>* names,
int class_token_pos, int end_pos, bool* ok);
V8_INLINE void DeclareClassVariable(const AstRawString* name,
- Scope* block_scope, ClassInfo* class_info,
+ ClassInfo* class_info,
int class_token_pos, bool* ok);
V8_INLINE void DeclareClassProperty(const AstRawString* class_name,
ClassLiteralProperty* property,
@@ -420,6 +433,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
PatternContext SetAssignmentContextIfNeeded(Expression* node);
PatternContext SetInitializerContextIfNeeded(Expression* node);
+ bool DeclaresParameterContainingSloppyEval() const;
void RewriteParameterScopes(Expression* expr);
Variable* CreateTempVar(Expression* value = nullptr);
@@ -446,10 +460,15 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
DEFINE_AST_VISITOR_MEMBERS_WITHOUT_STACKOVERFLOW()
};
- // !%_IsJSReceiver(result = iterator.next()) &&
- // %ThrowIteratorResultNotAnObject(result)
+ // [if (IteratorType == kAsync)]
+ // !%_IsJSReceiver(result = Await(iterator.next()) &&
+ // %ThrowIteratorResultNotAnObject(result)
+ // [else]
+ // !%_IsJSReceiver(result = iterator.next()) &&
+ // %ThrowIteratorResultNotAnObject(result)
+ // [endif]
Expression* BuildIteratorNextResult(Expression* iterator, Variable* result,
- int pos);
+ IteratorType type, int pos);
// Initialize the components of a for-in / for-of statement.
Statement* InitializeForEachStatement(ForEachStatement* stmt,
@@ -457,8 +476,9 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
Statement* body, int each_keyword_pos);
Statement* InitializeForOfStatement(ForOfStatement* stmt, Expression* each,
Expression* iterable, Statement* body,
- bool finalize,
+ bool finalize, IteratorType type,
int next_result_pos = kNoSourcePosition);
+
Block* RewriteForVarInLegacy(const ForInfo& for_info);
void DesugarBindingInForEachStatement(ForInfo* for_info, Block** body_block,
Expression** each_variable, bool* ok);
@@ -525,28 +545,24 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
// by parsing the function with PreParser. Consumes the ending }.
// If may_abort == true, the (pre-)parser may decide to abort skipping
// in order to force the function to be eagerly parsed, after all.
- LazyParsingResult SkipFunction(
- FunctionKind kind, DeclarationScope* function_scope, int* num_parameters,
- int* function_length, bool* has_duplicate_parameters,
- int* materialized_literal_count, int* expected_property_count,
- bool is_inner_function, bool may_abort, bool* ok);
+ LazyParsingResult SkipFunction(FunctionKind kind,
+ DeclarationScope* function_scope,
+ int* num_parameters, int* function_length,
+ bool* has_duplicate_parameters,
+ int* expected_property_count,
+ bool is_inner_function, bool may_abort,
+ bool* ok);
Block* BuildParameterInitializationBlock(
const ParserFormalParameters& parameters, bool* ok);
- Block* BuildRejectPromiseOnException(Block* block, bool* ok);
-
- // Consumes the ending }.
- ZoneList<Statement*>* ParseEagerFunctionBody(
- const AstRawString* function_name, int pos,
- const ParserFormalParameters& parameters, FunctionKind kind,
- FunctionLiteral::FunctionType function_type, bool* ok);
+ Block* BuildRejectPromiseOnException(Block* block);
ZoneList<Statement*>* ParseFunction(
const AstRawString* function_name, int pos, FunctionKind kind,
FunctionLiteral::FunctionType function_type,
DeclarationScope* function_scope, int* num_parameters,
int* function_length, bool* has_duplicate_parameters,
- int* materialized_literal_count, int* expected_property_count, bool* ok);
+ int* expected_property_count, bool* ok);
void ThrowPendingError(Isolate* isolate, Handle<Script> script);
@@ -582,7 +598,15 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
typedef TemplateLiteral* TemplateLiteralState;
TemplateLiteralState OpenTemplateLiteral(int pos);
- void AddTemplateSpan(TemplateLiteralState* state, bool tail);
+ // "should_cook" means that the span can be "cooked": in tagged template
+ // literals, both the raw and "cooked" representations are available to user
+ // code ("cooked" meaning that escape sequences are converted to their
+ // interpreted values). With the --harmony-template-escapes flag, invalid
+ // escape sequences cause the cooked span to be represented by undefined,
+ // instead of being a syntax error.
+ // "tail" indicates that this span is the last in the literal.
+ void AddTemplateSpan(TemplateLiteralState* state, bool should_cook,
+ bool tail);
void AddTemplateExpression(TemplateLiteralState* state,
Expression* expression);
Expression* CloseTemplateLiteral(TemplateLiteralState* state, int start,
@@ -591,7 +615,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
ZoneList<Expression*>* PrepareSpreadArguments(ZoneList<Expression*>* list);
Expression* SpreadCall(Expression* function, ZoneList<Expression*>* args,
- int pos);
+ int pos, Call::PossiblyEval is_possibly_eval);
Expression* SpreadCallNew(Expression* function, ZoneList<Expression*>* args,
int pos);
Expression* RewriteSuperCall(Expression* call_expression);
@@ -636,16 +660,18 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
void FinalizeIteratorUse(Scope* use_scope, Variable* completion,
Expression* condition, Variable* iter,
- Block* iterator_use, Block* result);
+ Block* iterator_use, Block* result,
+ IteratorType type);
Statement* FinalizeForOfStatement(ForOfStatement* loop, Variable* completion,
- int pos);
+ IteratorType type, int pos);
void BuildIteratorClose(ZoneList<Statement*>* statements, Variable* iterator,
Variable* input, Variable* output);
void BuildIteratorCloseForCompletion(Scope* scope,
ZoneList<Statement*>* statements,
Variable* iterator,
- Expression* completion);
+ Expression* completion,
+ IteratorType type);
Statement* CheckCallable(Variable* var, Expression* error, int pos);
V8_INLINE Expression* RewriteAwaitExpression(Expression* value, int pos);
@@ -1025,8 +1051,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
auto* init_block = BuildParameterInitializationBlock(parameters, ok);
if (!*ok) return;
if (is_async) {
- init_block = BuildRejectPromiseOnException(init_block, ok);
- if (!*ok) return;
+ init_block = BuildRejectPromiseOnException(init_block);
}
if (init_block != nullptr) body->Add(init_block, zone());
}
@@ -1037,8 +1062,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
int initializer_end_position,
bool is_rest) {
parameters->UpdateArityAndFunctionLength(initializer != nullptr, is_rest);
- bool is_simple = pattern->IsVariableProxy() && initializer == nullptr;
- const AstRawString* name = is_simple
+ bool has_simple_name = pattern->IsVariableProxy() && initializer == nullptr;
+ const AstRawString* name = has_simple_name
? pattern->AsVariableProxy()->raw_name()
: ast_value_factory()->empty_string();
auto parameter =
@@ -1051,17 +1076,16 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
V8_INLINE void DeclareFormalParameters(
DeclarationScope* scope,
const ThreadedList<ParserFormalParameters::Parameter>& parameters) {
+ bool is_simple = classifier()->is_simple_parameter_list();
+ if (!is_simple) scope->SetHasNonSimpleParameters();
for (auto parameter : parameters) {
bool is_duplicate = false;
- bool is_simple = classifier()->is_simple_parameter_list();
- auto name = is_simple || parameter->is_rest
- ? parameter->name
- : ast_value_factory()->empty_string();
- auto mode = is_simple || parameter->is_rest ? VAR : TEMPORARY;
- if (!is_simple) scope->SetHasNonSimpleParameters();
+ bool use_name = is_simple || parameter->is_nondestructuring_rest();
bool is_optional = parameter->initializer != nullptr;
- scope->DeclareParameter(name, mode, is_optional, parameter->is_rest,
- &is_duplicate, ast_value_factory());
+ scope->DeclareParameter(
+ use_name ? parameter->name : ast_value_factory()->empty_string(),
+ use_name ? VAR : TEMPORARY, is_optional, parameter->is_rest,
+ &is_duplicate, ast_value_factory());
if (is_duplicate &&
classifier()->is_valid_formal_parameter_list_without_duplicates()) {
classifier()->RecordDuplicateFormalParameterError(
@@ -1076,15 +1100,11 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
Scanner::Location* duplicate_loc,
bool* ok);
- void ReindexLiterals(const ParserFormalParameters& parameters);
-
V8_INLINE Expression* NoTemplateTag() { return NULL; }
V8_INLINE static bool IsTaggedTemplate(const Expression* tag) {
return tag != NULL;
}
- V8_INLINE void MaterializeUnspreadArgumentsLiterals(int count) {}
-
Expression* ExpressionListToExpression(ZoneList<Expression*>* args);
void AddAccessorPrefixToFunctionName(bool is_get, FunctionLiteral* function,
@@ -1109,6 +1129,12 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
++use_counts_[feature];
}
+ // Returns true iff we're parsing the first function literal during
+ // CreateDynamicFunction().
+ V8_INLINE bool ParsingDynamicFunctionDeclaration() const {
+ return parameters_end_pos_ != kNoSourcePosition;
+ }
+
// Parser's private field members.
friend class DiscardableZoneScope; // Uses reusable_preparser_.
// FIXME(marja): Make reusable_preparser_ always use its own temp Zone (call
@@ -1135,6 +1161,14 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
bool allow_lazy_;
bool temp_zoned_;
ParserLogger* log_;
+
+ PreParsedScopeData* preparsed_scope_data_;
+
+ // If not kNoSourcePosition, indicates that the first function literal
+ // encountered is a dynamic function, see CreateDynamicFunction(). This field
+ // indicates the correct position of the ')' that closes the parameter list.
+ // After that ')' is encountered, this field is reset to kNoSourcePosition.
+ int parameters_end_pos_;
};
// ----------------------------------------------------------------------------
diff --git a/deps/v8/src/parsing/parsing.cc b/deps/v8/src/parsing/parsing.cc
index db07bde7c7..ede13ac995 100644
--- a/deps/v8/src/parsing/parsing.cc
+++ b/deps/v8/src/parsing/parsing.cc
@@ -15,7 +15,7 @@ namespace v8 {
namespace internal {
namespace parsing {
-bool ParseProgram(ParseInfo* info) {
+bool ParseProgram(ParseInfo* info, bool internalize) {
DCHECK(info->is_toplevel());
DCHECK_NULL(info->literal());
@@ -29,14 +29,19 @@ bool ParseProgram(ParseInfo* info) {
parser.SetCachedData(info);
result = parser.ParseProgram(isolate, info);
info->set_literal(result);
- parser.Internalize(isolate, info->script(), result == nullptr);
- if (result != nullptr) {
+ if (result == nullptr) {
+ parser.ReportErrors(isolate, info->script());
+ } else {
info->set_language_mode(info->literal()->language_mode());
}
+ parser.UpdateStatistics(isolate, info->script());
+ if (internalize) {
+ info->ast_value_factory()->Internalize(isolate);
+ }
return (result != nullptr);
}
-bool ParseFunction(ParseInfo* info) {
+bool ParseFunction(ParseInfo* info, bool internalize) {
DCHECK(!info->is_toplevel());
DCHECK_NULL(info->literal());
@@ -49,12 +54,19 @@ bool ParseFunction(ParseInfo* info) {
result = parser.ParseFunction(isolate, info);
info->set_literal(result);
- parser.Internalize(isolate, info->script(), result == nullptr);
+ if (result == nullptr) {
+ parser.ReportErrors(isolate, info->script());
+ }
+ parser.UpdateStatistics(isolate, info->script());
+ if (internalize) {
+ info->ast_value_factory()->Internalize(isolate);
+ }
return (result != nullptr);
}
-bool ParseAny(ParseInfo* info) {
- return info->is_toplevel() ? ParseProgram(info) : ParseFunction(info);
+bool ParseAny(ParseInfo* info, bool internalize) {
+ return info->is_toplevel() ? ParseProgram(info, internalize)
+ : ParseFunction(info, internalize);
}
} // namespace parsing
diff --git a/deps/v8/src/parsing/parsing.h b/deps/v8/src/parsing/parsing.h
index 1f92c51838..3902377e0d 100644
--- a/deps/v8/src/parsing/parsing.h
+++ b/deps/v8/src/parsing/parsing.h
@@ -16,16 +16,18 @@ namespace parsing {
// Parses the top-level source code represented by the parse info and sets its
// function literal. Returns false (and deallocates any allocated AST
-// nodes) if parsing failed.
-V8_EXPORT_PRIVATE bool ParseProgram(ParseInfo* info);
+// nodes) if parsing failed. Internalizes AST nodes on the heap if
+// |internalize|.
+V8_EXPORT_PRIVATE bool ParseProgram(ParseInfo* info, bool internalize = true);
-// Like ParseProgram but for an individual function.
-V8_EXPORT_PRIVATE bool ParseFunction(ParseInfo* info);
+// Like ParseProgram but for an individual function. Internalizes AST nodes on
+// the heap if |internalize|.
+V8_EXPORT_PRIVATE bool ParseFunction(ParseInfo* info, bool internalize = true);
// If you don't know whether info->is_toplevel() is true or not, use this method
// to dispatch to either of the above functions. Prefer to use the above methods
-// whenever possible.
-V8_EXPORT_PRIVATE bool ParseAny(ParseInfo* info);
+// whenever possible. Internalizes AST nodes on the heap if |internalize|.
+V8_EXPORT_PRIVATE bool ParseAny(ParseInfo* info, bool internalize = true);
} // namespace parsing
} // namespace internal
diff --git a/deps/v8/src/parsing/pattern-rewriter.cc b/deps/v8/src/parsing/pattern-rewriter.cc
index 9eb3f0665b..b4312a2a30 100644
--- a/deps/v8/src/parsing/pattern-rewriter.cc
+++ b/deps/v8/src/parsing/pattern-rewriter.cc
@@ -137,22 +137,31 @@ void Parser::PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
factory()->NewVariableProxy(name, NORMAL_VARIABLE, pattern->position());
Declaration* declaration = factory()->NewVariableDeclaration(
proxy, descriptor_->scope, descriptor_->declaration_pos);
+
+ // When an extra declaration scope needs to be inserted to account for
+ // a sloppy eval in a default parameter or function body, the parameter
+ // needs to be declared in the function's scope, not in the varblock
+ // scope which will be used for the initializer expression.
+ Scope* outer_function_scope = nullptr;
+ if (DeclaresParameterContainingSloppyEval()) {
+ outer_function_scope = descriptor_->scope->outer_scope();
+ }
Variable* var = parser_->Declare(
declaration, descriptor_->declaration_kind, descriptor_->mode,
Variable::DefaultInitializationFlag(descriptor_->mode), ok_,
- descriptor_->hoist_scope);
+ outer_function_scope);
if (!*ok_) return;
DCHECK_NOT_NULL(var);
DCHECK(proxy->is_resolved());
DCHECK(initializer_position_ != kNoSourcePosition);
var->set_initializer_position(initializer_position_);
- // TODO(adamk): This should probably be checking hoist_scope.
- // Move it to Parser::Declare() to make it easier to test
- // the right scope.
- Scope* declaration_scope = IsLexicalVariableMode(descriptor_->mode)
- ? descriptor_->scope
- : descriptor_->scope->GetDeclarationScope();
+ Scope* declaration_scope =
+ outer_function_scope != nullptr
+ ? outer_function_scope
+ : (IsLexicalVariableMode(descriptor_->mode)
+ ? descriptor_->scope
+ : descriptor_->scope->GetDeclarationScope());
if (declaration_scope->num_var() > kMaxNumFunctionLocals) {
parser_->ReportMessage(MessageTemplate::kTooManyVariables);
*ok_ = false;
@@ -165,6 +174,9 @@ void Parser::PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
// If there's no initializer, we're done.
if (value == nullptr) return;
+ Scope* var_init_scope = descriptor_->scope;
+ MarkLoopVariableAsAssigned(var_init_scope, proxy->var());
+
// A declaration of the form:
//
// var v = x;
@@ -177,7 +189,6 @@ void Parser::PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
// 'v' than the 'v' in the declaration (e.g., if we are inside a
// 'with' statement or 'catch' block). Global var declarations
// also need special treatment.
- Scope* var_init_scope = descriptor_->scope;
if (descriptor_->mode == VAR && var_init_scope->is_script_scope()) {
// Global variable declarations must be compiled in a specific
@@ -220,18 +231,9 @@ void Parser::PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
// But for var declarations we need to do a new lookup.
if (descriptor_->mode == VAR) {
proxy = var_init_scope->NewUnresolved(factory(), name);
- // TODO(neis): Set is_assigned on proxy.
} else {
DCHECK_NOT_NULL(proxy);
DCHECK_NOT_NULL(proxy->var());
- if (var_init_scope->is_script_scope() ||
- var_init_scope->is_module_scope()) {
- // We have to pessimistically assume that top-level variables will be
- // assigned. This is because there may be lazily parsed top-level
- // functions, which, for efficiency, we preparse without variable
- // tracking.
- proxy->set_is_assigned();
- }
}
// Add break location for destructured sub-pattern.
int pos = IsSubPattern() ? pattern->position() : value->position();
@@ -320,39 +322,98 @@ void Parser::PatternRewriter::VisitRewritableExpression(
set_context(old_context);
}
+bool Parser::PatternRewriter::DeclaresParameterContainingSloppyEval() const {
+ // Need to check for a binding context to make sure we have a descriptor.
+ if (IsBindingContext() &&
+ // Only relevant for parameters.
+ descriptor_->declaration_kind == DeclarationDescriptor::PARAMETER &&
+ // And only when scope is a block scope;
+ // without eval, it is a function scope.
+ scope()->is_block_scope()) {
+ DCHECK(scope()->calls_sloppy_eval());
+ DCHECK(scope()->is_declaration_scope());
+ DCHECK(scope()->outer_scope()->is_function_scope());
+ return true;
+ }
+
+ return false;
+}
+
// When an extra declaration scope needs to be inserted to account for
// a sloppy eval in a default parameter or function body, the expressions
// needs to be in that new inner scope which was added after initial
// parsing.
void Parser::PatternRewriter::RewriteParameterScopes(Expression* expr) {
- if (!IsBindingContext()) return;
- if (descriptor_->declaration_kind != DeclarationDescriptor::PARAMETER) return;
- if (!scope()->is_block_scope()) return;
-
- DCHECK(scope()->is_declaration_scope());
- DCHECK(scope()->outer_scope()->is_function_scope());
- DCHECK(scope()->calls_sloppy_eval());
-
- ReparentParameterExpressionScope(parser_->stack_limit(), expr, scope());
+ if (DeclaresParameterContainingSloppyEval()) {
+ ReparentParameterExpressionScope(parser_->stack_limit(), expr, scope());
+ }
}
void Parser::PatternRewriter::VisitObjectLiteral(ObjectLiteral* pattern,
Variable** temp_var) {
auto temp = *temp_var = CreateTempVar(current_value_);
+ ZoneList<Expression*>* rest_runtime_callargs = nullptr;
+ if (pattern->has_rest_property()) {
+ // non_rest_properties_count = pattern->properties()->length - 1;
+ // args_length = 1 + non_rest_properties_count because we need to
+ // pass temp as well to the runtime function.
+ int args_length = pattern->properties()->length();
+ rest_runtime_callargs =
+ new (zone()) ZoneList<Expression*>(args_length, zone());
+ rest_runtime_callargs->Add(factory()->NewVariableProxy(temp), zone());
+ }
+
block_->statements()->Add(parser_->BuildAssertIsCoercible(temp), zone());
for (ObjectLiteralProperty* property : *pattern->properties()) {
PatternContext context = SetInitializerContextIfNeeded(property->value());
+ Expression* value;
+
+ if (property->kind() == ObjectLiteralProperty::Kind::SPREAD) {
+ // var { y, [x++]: a, ...c } = temp
+ // becomes
+ // var y = temp.y;
+ // var temp1 = %ToName(x++);
+ // var a = temp[temp1];
+ // var c;
+ // c = %CopyDataPropertiesWithExcludedProperties(temp, "y", temp1);
+ value = factory()->NewCallRuntime(
+ Runtime::kCopyDataPropertiesWithExcludedProperties,
+ rest_runtime_callargs, kNoSourcePosition);
+ } else {
+ Expression* key = property->key();
- // Computed property names contain expressions which might require
- // scope rewriting.
- if (!property->key()->IsLiteral()) RewriteParameterScopes(property->key());
+ if (!key->IsLiteral()) {
+ // Computed property names contain expressions which might require
+ // scope rewriting.
+ RewriteParameterScopes(key);
+ }
+
+ if (pattern->has_rest_property()) {
+ Expression* excluded_property = key;
+
+ if (property->is_computed_name()) {
+ DCHECK(!key->IsPropertyName() || !key->IsNumberLiteral());
+ auto args = new (zone()) ZoneList<Expression*>(1, zone());
+ args->Add(key, zone());
+ auto to_name_key = CreateTempVar(factory()->NewCallRuntime(
+ Runtime::kToName, args, kNoSourcePosition));
+ key = factory()->NewVariableProxy(to_name_key);
+ excluded_property = factory()->NewVariableProxy(to_name_key);
+ } else {
+ DCHECK(key->IsPropertyName() || key->IsNumberLiteral());
+ }
+
+ DCHECK(rest_runtime_callargs != nullptr);
+ rest_runtime_callargs->Add(excluded_property, zone());
+ }
+
+ value = factory()->NewProperty(factory()->NewVariableProxy(temp), key,
+ kNoSourcePosition);
+ }
- RecurseIntoSubpattern(
- property->value(),
- factory()->NewProperty(factory()->NewVariableProxy(temp),
- property->key(), kNoSourcePosition));
+ RecurseIntoSubpattern(property->value(), value);
set_context(context);
}
}
@@ -369,8 +430,9 @@ void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node,
DCHECK(block_->ignore_completion_value());
auto temp = *temp_var = CreateTempVar(current_value_);
- auto iterator = CreateTempVar(factory()->NewGetIterator(
- factory()->NewVariableProxy(temp), kNoSourcePosition));
+ auto iterator = CreateTempVar(
+ factory()->NewGetIterator(factory()->NewVariableProxy(temp),
+ IteratorType::kNormal, kNoSourcePosition));
auto done =
CreateTempVar(factory()->NewBooleanLiteral(false, kNoSourcePosition));
auto result = CreateTempVar();
@@ -456,7 +518,7 @@ void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node,
factory()->NewExpressionStatement(
parser_->BuildIteratorNextResult(
factory()->NewVariableProxy(iterator), result,
- kNoSourcePosition),
+ IteratorType::kNormal, kNoSourcePosition),
kNoSourcePosition),
zone());
next_block->statements()->Add(inner_if, zone());
@@ -513,11 +575,8 @@ void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node,
Variable* array;
{
auto empty_exprs = new (zone()) ZoneList<Expression*>(0, zone());
- array = CreateTempVar(factory()->NewArrayLiteral(
- empty_exprs,
- // Reuse pattern's literal index - it is unused since there is no
- // actual literal allocated.
- node->literal_index(), kNoSourcePosition));
+ array = CreateTempVar(
+ factory()->NewArrayLiteral(empty_exprs, kNoSourcePosition));
}
// done = true;
@@ -530,7 +589,7 @@ void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node,
// result = IteratorNext(iterator);
Statement* get_next = factory()->NewExpressionStatement(
parser_->BuildIteratorNextResult(factory()->NewVariableProxy(iterator),
- result, nopos),
+ result, IteratorType::kNormal, nopos),
nopos);
// %AppendElement(array, result.value);
@@ -599,7 +658,7 @@ void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node,
Token::NOT, factory()->NewVariableProxy(done), nopos);
parser_->FinalizeIteratorUse(scope(), completion, closing_condition, iterator,
- block_, target);
+ block_, target, IteratorType::kNormal);
block_ = target;
}
diff --git a/deps/v8/src/parsing/preparse-data.cc b/deps/v8/src/parsing/preparse-data.cc
index b7c76a4216..da90f2f3bd 100644
--- a/deps/v8/src/parsing/preparse-data.cc
+++ b/deps/v8/src/parsing/preparse-data.cc
@@ -15,15 +15,14 @@ namespace internal {
void ParserLogger::LogFunction(int start, int end, int num_parameters,
int function_length,
- bool has_duplicate_parameters, int literals,
- int properties, LanguageMode language_mode,
+ bool has_duplicate_parameters, int properties,
+ LanguageMode language_mode,
bool uses_super_property, bool calls_eval,
int num_inner_functions) {
function_store_.Add(start);
function_store_.Add(end);
function_store_.Add(num_parameters);
function_store_.Add(function_length);
- function_store_.Add(literals);
function_store_.Add(properties);
function_store_.Add(
FunctionEntry::EncodeFlags(language_mode, uses_super_property, calls_eval,
diff --git a/deps/v8/src/parsing/preparse-data.h b/deps/v8/src/parsing/preparse-data.h
index ca70f8a45f..eb3847505e 100644
--- a/deps/v8/src/parsing/preparse-data.h
+++ b/deps/v8/src/parsing/preparse-data.h
@@ -56,13 +56,12 @@ class PreParserLogger final {
num_inner_functions_(-1) {}
void LogFunction(int end, int num_parameters, int function_length,
- bool has_duplicate_parameters, int literals, int properties,
+ bool has_duplicate_parameters, int properties,
int num_inner_functions) {
end_ = end;
num_parameters_ = num_parameters;
function_length_ = function_length;
has_duplicate_parameters_ = has_duplicate_parameters;
- literals_ = literals;
properties_ = properties;
num_inner_functions_ = num_inner_functions;
}
@@ -77,9 +76,6 @@ class PreParserLogger final {
bool has_duplicate_parameters() const {
return has_duplicate_parameters_;
}
- int literals() const {
- return literals_;
- }
int properties() const {
return properties_;
}
@@ -91,7 +87,6 @@ class PreParserLogger final {
int num_parameters_;
int function_length_;
bool has_duplicate_parameters_;
- int literals_;
int properties_;
int num_inner_functions_;
};
@@ -101,7 +96,7 @@ class ParserLogger final {
ParserLogger();
void LogFunction(int start, int end, int num_parameters, int function_length,
- bool has_duplicate_parameters, int literals, int properties,
+ bool has_duplicate_parameters, int properties,
LanguageMode language_mode, bool uses_super_property,
bool calls_eval, int num_inner_functions);
diff --git a/deps/v8/src/parsing/preparsed-scope-data.cc b/deps/v8/src/parsing/preparsed-scope-data.cc
new file mode 100644
index 0000000000..d1d497c92f
--- /dev/null
+++ b/deps/v8/src/parsing/preparsed-scope-data.cc
@@ -0,0 +1,86 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/parsing/preparsed-scope-data.h"
+
+#include "src/ast/scopes.h"
+#include "src/ast/variables.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+bool PreParsedScopeData::HasVariablesWhichNeedAllocationData(Scope* scope) {
+ if (!scope->is_hidden()) {
+ for (Variable* var : *scope->locals()) {
+ if (var->mode() == VAR || var->mode() == LET || var->mode() == CONST) {
+ return true;
+ }
+ }
+ }
+ for (Scope* inner = scope->inner_scope(); inner != nullptr;
+ inner = inner->sibling()) {
+ if (HasVariablesWhichNeedAllocationData(inner)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+PreParsedScopeData::ScopeScope::ScopeScope(PreParsedScopeData* data,
+ ScopeType scope_type,
+ int start_position, int end_position)
+ : data_(data), previous_scope_(data->current_scope_) {
+ data->current_scope_ = this;
+ data->backing_store_.push_back(scope_type);
+ data->backing_store_.push_back(start_position);
+ data->backing_store_.push_back(end_position);
+ // Reserve space for variable and inner scope count (we don't know yet how
+ // many will be added).
+ index_in_data_ = data->backing_store_.size();
+ data->backing_store_.push_back(-1);
+ data->backing_store_.push_back(-1);
+}
+
+PreParsedScopeData::ScopeScope::~ScopeScope() {
+ data_->current_scope_ = previous_scope_;
+ if (got_data_) {
+ DCHECK_GT(variable_count_ + inner_scope_count_, 0);
+ if (previous_scope_ != nullptr) {
+ previous_scope_->got_data_ = true;
+ ++previous_scope_->inner_scope_count_;
+ }
+ data_->backing_store_[index_in_data_] = inner_scope_count_;
+ data_->backing_store_[index_in_data_ + 1] = variable_count_;
+ } else {
+ // No interesting data for this scope (or its children); remove from the
+ // data.
+ DCHECK_EQ(data_->backing_store_.size(), index_in_data_ + 2);
+ DCHECK_GE(index_in_data_, 3);
+ DCHECK_EQ(variable_count_, 0);
+ data_->backing_store_.erase(
+ data_->backing_store_.begin() + index_in_data_ - 3,
+ data_->backing_store_.end());
+ }
+}
+
+void PreParsedScopeData::ScopeScope::MaybeAddVariable(Variable* var) {
+ if (var->mode() == VAR || var->mode() == LET || var->mode() == CONST) {
+#ifdef DEBUG
+ // For tests (which check that the data is about the same variables).
+ const AstRawString* name = var->raw_name();
+ data_->backing_store_.push_back(name->length());
+ for (int i = 0; i < name->length(); ++i) {
+ data_->backing_store_.push_back(name->raw_data()[i]);
+ }
+#endif
+ data_->backing_store_.push_back(var->location());
+ data_->backing_store_.push_back(var->maybe_assigned());
+ ++variable_count_;
+ got_data_ = true;
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/parsing/preparsed-scope-data.h b/deps/v8/src/parsing/preparsed-scope-data.h
new file mode 100644
index 0000000000..72d1a717af
--- /dev/null
+++ b/deps/v8/src/parsing/preparsed-scope-data.h
@@ -0,0 +1,57 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PARSING_PREPARSED_SCOPE_DATA_H_
+#define V8_PARSING_PREPARSED_SCOPE_DATA_H_
+
+#include <vector>
+
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+class PreParsedScopeData {
+ public:
+ PreParsedScopeData() {}
+ ~PreParsedScopeData() {}
+
+ // Whether the scope has variables whose context allocation or
+ // maybeassignedness we need to decide based on preparsed scope data.
+ static bool HasVariablesWhichNeedAllocationData(Scope* scope);
+
+ class ScopeScope {
+ public:
+ ScopeScope(PreParsedScopeData* data, ScopeType scope_type,
+ int start_position, int end_position);
+ ~ScopeScope();
+
+ void MaybeAddVariable(Variable* var);
+
+ private:
+ PreParsedScopeData* data_;
+ size_t index_in_data_;
+ ScopeScope* previous_scope_;
+
+ int inner_scope_count_ = 0;
+ int variable_count_ = 0;
+ bool got_data_ = false;
+ DISALLOW_COPY_AND_ASSIGN(ScopeScope);
+ };
+
+ private:
+ friend class ScopeTestHelper;
+
+ // TODO(marja): Make the backing store more efficient once we know exactly
+ // what data is needed.
+ std::vector<int> backing_store_;
+ ScopeScope* current_scope_ = nullptr;
+
+ DISALLOW_COPY_AND_ASSIGN(PreParsedScopeData);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_PARSING_PREPARSED_SCOPE_DATA_H_
diff --git a/deps/v8/src/parsing/preparser.cc b/deps/v8/src/parsing/preparser.cc
index 1dae5e9b66..3ed9a4d66a 100644
--- a/deps/v8/src/parsing/preparser.cc
+++ b/deps/v8/src/parsing/preparser.cc
@@ -103,10 +103,11 @@ PreParser::PreParseResult PreParser::PreParseFunction(
ResetFunctionLiteralId();
// The caller passes the function_scope which is not yet inserted into the
- // scope_state_. All scopes above the function_scope are ignored by the
+ // scope stack. All scopes above the function_scope are ignored by the
// PreParser.
- DCHECK_NULL(scope_state_);
- FunctionState function_state(&function_state_, &scope_state_, function_scope);
+ DCHECK_NULL(function_state_);
+ DCHECK_NULL(scope_);
+ FunctionState function_state(&function_state_, &scope_, function_scope);
// This indirection is needed so that we can use the CHECK_OK macros.
bool ok_holder = true;
bool* ok = &ok_holder;
@@ -131,21 +132,44 @@ PreParser::PreParseResult PreParser::PreParseFunction(
formals_end_position, CHECK_OK_VALUE(kPreParseSuccess));
has_duplicate_parameters =
!classifier()->is_valid_formal_parameter_list_without_duplicates();
-
- if (track_unresolved_variables_) {
- function_scope->DeclareVariableName(
- ast_value_factory()->arguments_string(), VAR);
- function_scope->DeclareVariableName(ast_value_factory()->this_string(),
- VAR);
- }
}
Expect(Token::LBRACE, CHECK_OK_VALUE(kPreParseSuccess));
- LazyParsingResult result = ParseStatementListAndLogFunction(
- &formals, has_duplicate_parameters, may_abort, ok);
+ DeclarationScope* inner_scope = function_scope;
+ LazyParsingResult result;
- if (is_sloppy(function_scope->language_mode())) {
- function_scope->HoistSloppyBlockFunctions(nullptr);
+ if (!formals.is_simple) {
+ inner_scope = NewVarblockScope();
+ inner_scope->set_start_position(scanner()->location().beg_pos);
+ }
+
+ {
+ BlockState block_state(&scope_, inner_scope);
+ result = ParseStatementListAndLogFunction(
+ &formals, has_duplicate_parameters, may_abort, ok);
+ }
+
+ if (!formals.is_simple) {
+ BuildParameterInitializationBlock(formals, ok);
+
+ if (is_sloppy(inner_scope->language_mode())) {
+ inner_scope->HoistSloppyBlockFunctions(nullptr);
+ }
+
+ SetLanguageMode(function_scope, inner_scope->language_mode());
+ inner_scope->set_end_position(scanner()->peek_location().end_pos);
+ inner_scope->FinalizeBlockScope();
+ } else {
+ if (is_sloppy(function_scope->language_mode())) {
+ function_scope->HoistSloppyBlockFunctions(nullptr);
+ }
+ }
+
+ if (!IsArrowFunction(kind) && track_unresolved_variables_) {
+ // Declare arguments after parsing the function since lexical 'arguments'
+ // masks the arguments object. Declare arguments before declaring the
+ // function var since the arguments object masks 'function arguments'.
+ function_scope->DeclareArguments(ast_value_factory());
}
use_counts_ = nullptr;
@@ -209,11 +233,9 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
runtime_call_stats_,
counters[track_unresolved_variables_][parsing_on_main_thread_]);
- // Parse function body.
- PreParserStatementList body;
DeclarationScope* function_scope = NewFunctionScope(kind);
function_scope->SetLanguageMode(language_mode);
- FunctionState function_state(&function_state_, &scope_state_, function_scope);
+ FunctionState function_state(&function_state_, &scope_, function_scope);
DuplicateFinder duplicate_finder;
ExpressionClassifier formals_classifier(this, &duplicate_finder);
GetNextFunctionLiteralId();
@@ -230,8 +252,13 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
formals_end_position, CHECK_OK);
Expect(Token::LBRACE, CHECK_OK);
- ParseStatementList(body, Token::RBRACE, CHECK_OK);
- Expect(Token::RBRACE, CHECK_OK);
+
+ // Parse function body.
+ PreParserStatementList body;
+ int pos = function_token_pos == kNoSourcePosition ? peek_position()
+ : function_token_pos;
+ ParseFunctionBody(body, function_name, pos, formals, kind, function_type,
+ CHECK_OK);
// Parsing the body may change the language mode in our scope.
language_mode = function_scope->language_mode();
@@ -252,7 +279,6 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
if (is_strict(language_mode)) {
CheckStrictOctalLiteral(start_position, end_position, CHECK_OK);
}
- function_scope->set_end_position(end_position);
if (FLAG_trace_preparse) {
PrintF(" [%s]: %i-%i\n",
@@ -275,11 +301,11 @@ PreParser::LazyParsingResult PreParser::ParseStatementListAndLogFunction(
// Position right after terminal '}'.
DCHECK_EQ(Token::RBRACE, scanner()->peek());
int body_end = scanner()->peek_location().end_pos;
- DCHECK(this->scope()->is_function_scope());
- log_.LogFunction(
- body_end, formals->num_parameters(), formals->function_length,
- has_duplicate_parameters, function_state_->materialized_literal_count(),
- function_state_->expected_property_count(), GetLastFunctionLiteralId());
+ DCHECK_EQ(this->scope()->is_function_scope(), formals->is_simple);
+ log_.LogFunction(body_end, formals->num_parameters(),
+ formals->function_length, has_duplicate_parameters,
+ function_state_->expected_property_count(),
+ GetLastFunctionLiteralId());
return kLazyParsingComplete;
}
@@ -305,14 +331,20 @@ void PreParser::DeclareAndInitializeVariables(
ZoneList<const AstRawString*>* names, bool* ok) {
if (declaration->pattern.variables_ != nullptr) {
DCHECK(FLAG_lazy_inner_functions);
- Scope* scope = declaration_descriptor->hoist_scope;
- if (scope == nullptr) {
- scope = this->scope();
- }
+ DCHECK(track_unresolved_variables_);
for (auto variable : *(declaration->pattern.variables_)) {
declaration_descriptor->scope->RemoveUnresolved(variable);
- scope->DeclareVariableName(variable->raw_name(),
- declaration_descriptor->mode);
+ Variable* var = scope()->DeclareVariableName(
+ variable->raw_name(), declaration_descriptor->mode);
+ if (FLAG_preparser_scope_analysis) {
+ MarkLoopVariableAsAssigned(declaration_descriptor->scope, var);
+ // This is only necessary if there is an initializer, but we don't have
+ // that information here. Consequently, the preparser sometimes says
+ // maybe-assigned where the parser (correctly) says never-assigned.
+ }
+ if (names) {
+ names->Add(variable->raw_name(), zone());
+ }
}
}
}
diff --git a/deps/v8/src/parsing/preparser.h b/deps/v8/src/parsing/preparser.h
index 65c482c530..7498127012 100644
--- a/deps/v8/src/parsing/preparser.h
+++ b/deps/v8/src/parsing/preparser.h
@@ -9,6 +9,7 @@
#include "src/ast/scopes.h"
#include "src/parsing/parser-base.h"
#include "src/parsing/preparse-data.h"
+#include "src/pending-compilation-error-handler.h"
namespace v8 {
namespace internal {
@@ -202,9 +203,10 @@ class PreParserExpression {
IsUseAsmField::encode(true));
}
- static PreParserExpression This() {
+ static PreParserExpression This(ZoneList<VariableProxy*>* variables) {
return PreParserExpression(TypeField::encode(kExpression) |
- ExpressionTypeField::encode(kThisExpression));
+ ExpressionTypeField::encode(kThisExpression),
+ variables);
}
static PreParserExpression ThisProperty() {
@@ -414,10 +416,11 @@ class PreParserList {
// These functions make list->Add(some_expression) work (and do nothing).
PreParserList() : length_(0), variables_(nullptr) {}
PreParserList* operator->() { return this; }
- void Add(T, Zone* zone);
+ void Add(const T& element, Zone* zone);
int length() const { return length_; }
static PreParserList Null() { return PreParserList(-1); }
bool IsNull() const { return length_ == -1; }
+ void Set(int index, const T& element) {}
private:
explicit PreParserList(int n) : length_(n), variables_(nullptr) {}
@@ -430,7 +433,7 @@ class PreParserList {
template <>
inline void PreParserList<PreParserExpression>::Add(
- PreParserExpression expression, Zone* zone) {
+ const PreParserExpression& expression, Zone* zone) {
if (expression.variables_ != nullptr) {
DCHECK(FLAG_lazy_inner_functions);
DCHECK(zone != nullptr);
@@ -445,7 +448,7 @@ inline void PreParserList<PreParserExpression>::Add(
}
template <typename T>
-void PreParserList<T>::Add(T, Zone* zone) {
+void PreParserList<T>::Add(const T& element, Zone* zone) {
++length_;
}
@@ -565,13 +568,11 @@ class PreParserFactory {
return PreParserExpression::Default();
}
PreParserExpression NewRegExpLiteral(PreParserIdentifier js_pattern,
- int js_flags, int literal_index,
- int pos) {
+ int js_flags, int pos) {
return PreParserExpression::Default();
}
PreParserExpression NewArrayLiteral(PreParserExpressionList values,
- int first_spread_index, int literal_index,
- int pos) {
+ int first_spread_index, int pos) {
return PreParserExpression::ArrayLiteral(values.variables_);
}
PreParserExpression NewClassLiteralProperty(PreParserExpression key,
@@ -593,9 +594,8 @@ class PreParserFactory {
return PreParserExpression::Default(value.variables_);
}
PreParserExpression NewObjectLiteral(PreParserExpressionList properties,
- int literal_index,
- int boilerplate_properties,
- int pos) {
+ int boilerplate_properties, int pos,
+ bool has_rest_property) {
return PreParserExpression::ObjectLiteral(properties.variables_);
}
PreParserExpression NewVariableProxy(void* variable) {
@@ -676,8 +676,7 @@ class PreParserFactory {
}
PreParserExpression NewFunctionLiteral(
PreParserIdentifier name, Scope* scope, PreParserStatementList body,
- int materialized_literal_count, int expected_property_count,
- int parameter_count, int function_length,
+ int expected_property_count, int parameter_count, int function_length,
FunctionLiteral::ParameterFlag has_duplicate_parameters,
FunctionLiteral::FunctionType function_type,
FunctionLiteral::EagerCompileHint eager_compile_hint, int position,
@@ -765,6 +764,17 @@ class PreParserFactory {
return PreParserStatement::Default();
}
+ PreParserStatement NewForOfStatement(ZoneList<const AstRawString*>* labels,
+ int pos) {
+ return PreParserStatement::Default();
+ }
+
+ PreParserExpression NewCallRuntime(Runtime::FunctionId id,
+ ZoneList<PreParserExpression>* arguments,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+
// Return the object itself as AstVisitor and implement the needed
// dummy method right in this class.
PreParserFactory* visitor() { return this; }
@@ -781,11 +791,20 @@ class PreParserFactory {
struct PreParserFormalParameters : FormalParametersBase {
struct Parameter : public ZoneObject {
- explicit Parameter(PreParserExpression pattern) : pattern(pattern) {}
+ Parameter(PreParserExpression pattern, bool is_destructuring, bool is_rest)
+ : pattern(pattern),
+ is_destructuring(is_destructuring),
+ is_rest(is_rest) {}
Parameter** next() { return &next_parameter; }
Parameter* const* next() const { return &next_parameter; }
+
+ bool is_nondestructuring_rest() const {
+ return is_rest && !is_destructuring;
+ }
PreParserExpression pattern;
Parameter* next_parameter = nullptr;
+ bool is_destructuring : 1;
+ bool is_rest : 1;
};
explicit PreParserFormalParameters(DeclarationScope* scope)
: FormalParametersBase(scope) {}
@@ -886,9 +905,8 @@ class PreParser : public ParserBase<PreParser> {
// success (even if parsing failed, the pre-parse data successfully
// captured the syntax error), and false if a stack-overflow happened
// during parsing.
- PreParseResult PreParseProgram(int* materialized_literals = 0,
- bool is_module = false) {
- DCHECK_NULL(scope_state_);
+ PreParseResult PreParseProgram(bool is_module = false) {
+ DCHECK_NULL(scope_);
DeclarationScope* scope = NewScriptScope();
#ifdef DEBUG
scope->set_is_being_lazily_parsed(true);
@@ -899,7 +917,7 @@ class PreParser : public ParserBase<PreParser> {
// the global scope.
if (is_module) scope = NewModuleScope(scope);
- FunctionState top_scope(&function_state_, &scope_state_, scope);
+ FunctionState top_scope(&function_state_, &scope_, scope);
bool ok = true;
int start_position = scanner()->peek_location().beg_pos;
parsing_module_ = is_module;
@@ -912,9 +930,6 @@ class PreParser : public ParserBase<PreParser> {
CheckStrictOctalLiteral(start_position, scanner()->location().end_pos,
&ok);
}
- if (materialized_literals) {
- *materialized_literals = function_state_->materialized_literal_count();
- }
return kPreParseSuccess;
}
@@ -943,21 +958,16 @@ class PreParser : public ParserBase<PreParser> {
// By making the 'exception handling' explicit, we are forced to check
// for failure at the call sites.
- V8_INLINE PreParserStatementList ParseEagerFunctionBody(
- PreParserIdentifier function_name, int pos,
- const PreParserFormalParameters& parameters, FunctionKind kind,
- FunctionLiteral::FunctionType function_type, bool* ok);
-
// Indicates that we won't switch from the preparser to the preparser; we'll
// just stay where we are.
bool AllowsLazyParsingWithoutUnresolvedVariables() const { return false; }
bool parse_lazily() const { return false; }
- V8_INLINE LazyParsingResult SkipFunction(
- FunctionKind kind, DeclarationScope* function_scope, int* num_parameters,
- int* function_length, bool* has_duplicate_parameters,
- int* materialized_literal_count, int* expected_property_count,
- bool is_inner_function, bool may_abort, bool* ok) {
+ V8_INLINE LazyParsingResult
+ SkipFunction(FunctionKind kind, DeclarationScope* function_scope,
+ int* num_parameters, int* function_length,
+ bool* has_duplicate_parameters, int* expected_property_count,
+ bool is_inner_function, bool may_abort, bool* ok) {
UNREACHABLE();
return kLazyParsingComplete;
}
@@ -977,7 +987,8 @@ class PreParser : public ParserBase<PreParser> {
}
V8_INLINE void AddTemplateExpression(TemplateLiteralState* state,
PreParserExpression expression) {}
- V8_INLINE void AddTemplateSpan(TemplateLiteralState* state, bool tail) {}
+ V8_INLINE void AddTemplateSpan(TemplateLiteralState* state, bool should_cook,
+ bool tail) {}
V8_INLINE PreParserExpression CloseTemplateLiteral(
TemplateLiteralState* state, int start, PreParserExpression tag);
V8_INLINE void CheckConflictingVarDeclarations(Scope* scope, bool* ok) {}
@@ -992,7 +1003,8 @@ class PreParser : public ParserBase<PreParser> {
V8_INLINE PreParserExpression SpreadCall(PreParserExpression function,
PreParserExpressionList args,
- int pos);
+ int pos,
+ Call::PossiblyEval possibly_eval);
V8_INLINE PreParserExpression SpreadCallNew(PreParserExpression function,
PreParserExpressionList args,
int pos);
@@ -1079,6 +1091,16 @@ class PreParser : public ParserBase<PreParser> {
return PreParserStatement::Default();
}
+ V8_INLINE void ParseAndRewriteGeneratorFunctionBody(
+ int pos, FunctionKind kind, PreParserStatementList body, bool* ok) {
+ ParseStatementList(body, Token::RBRACE, ok);
+ }
+ V8_INLINE void CreateFunctionNameAssignment(
+ PreParserIdentifier function_name, int pos,
+ FunctionLiteral::FunctionType function_type,
+ DeclarationScope* function_scope, PreParserStatementList result,
+ int index) {}
+
V8_INLINE PreParserExpression RewriteDoExpression(PreParserStatement body,
int pos, bool* ok) {
return PreParserExpression::Default();
@@ -1097,9 +1119,8 @@ class PreParser : public ParserBase<PreParser> {
V8_INLINE PreParserStatement DeclareFunction(
PreParserIdentifier variable_name, PreParserExpression function,
- VariableMode mode, int pos, bool is_generator, bool is_async,
- bool is_sloppy_block_function, ZoneList<const AstRawString*>* names,
- bool* ok) {
+ VariableMode mode, int pos, bool is_sloppy_block_function,
+ ZoneList<const AstRawString*>* names, bool* ok) {
DCHECK_NULL(names);
if (variable_name.string_ != nullptr) {
DCHECK(track_unresolved_variables_);
@@ -1125,7 +1146,7 @@ class PreParser : public ParserBase<PreParser> {
return PreParserStatement::Default();
}
V8_INLINE void DeclareClassVariable(PreParserIdentifier name,
- Scope* block_scope, ClassInfo* class_info,
+ ClassInfo* class_info,
int class_token_pos, bool* ok) {}
V8_INLINE void DeclareClassProperty(PreParserIdentifier class_name,
PreParserExpression property,
@@ -1304,6 +1325,14 @@ class PreParser : public ParserBase<PreParser> {
return stmt;
}
+ V8_INLINE PreParserStatement InitializeForOfStatement(
+ PreParserStatement stmt, PreParserExpression each,
+ PreParserExpression iterable, PreParserStatement body, bool finalize,
+ IteratorType type, int next_result_pos = kNoSourcePosition) {
+ MarkExpressionAsAssigned(each);
+ return stmt;
+ }
+
V8_INLINE PreParserStatement RewriteForVarInLegacy(const ForInfo& for_info) {
return PreParserStatement::Null();
}
@@ -1313,14 +1342,30 @@ class PreParser : public ParserBase<PreParser> {
PreParserExpression* each_variable, bool* ok) {
if (track_unresolved_variables_) {
DCHECK(for_info->parsing_result.declarations.length() == 1);
+ bool is_for_var_of =
+ for_info->mode == ForEachStatement::ITERATE &&
+ for_info->parsing_result.descriptor.mode == VariableMode::VAR;
+ bool collect_names =
+ IsLexicalVariableMode(for_info->parsing_result.descriptor.mode) ||
+ is_for_var_of;
+
DeclareAndInitializeVariables(
PreParserStatement::Default(), &for_info->parsing_result.descriptor,
- &for_info->parsing_result.declarations[0], nullptr, ok);
+ &for_info->parsing_result.declarations[0],
+ collect_names ? &for_info->bound_names : nullptr, ok);
}
}
V8_INLINE PreParserStatement CreateForEachStatementTDZ(
PreParserStatement init_block, const ForInfo& for_info, bool* ok) {
+ if (track_unresolved_variables_) {
+ if (IsLexicalVariableMode(for_info.parsing_result.descriptor.mode)) {
+ for (auto name : for_info.bound_names) {
+ scope()->DeclareVariableName(name, LET);
+ }
+ return PreParserStatement::Default();
+ }
+ }
return init_block;
}
@@ -1329,9 +1374,43 @@ class PreParser : public ParserBase<PreParser> {
PreParserExpression cond, PreParserStatement next,
PreParserStatement body, Scope* inner_scope, const ForInfo& for_info,
bool* ok) {
+ // See Parser::DesugarLexicalBindingsInForStatement.
+ if (track_unresolved_variables_) {
+ for (auto name : for_info.bound_names) {
+ inner_scope->DeclareVariableName(
+ name, for_info.parsing_result.descriptor.mode);
+ }
+ }
return loop;
}
+ V8_INLINE PreParserStatement BuildParameterInitializationBlock(
+ const PreParserFormalParameters& parameters, bool* ok) {
+ if (track_unresolved_variables_) {
+ for (auto parameter : parameters.params) {
+ if (parameter->is_nondestructuring_rest()) break;
+ if (parameter->pattern.variables_ != nullptr) {
+ for (auto variable : *parameter->pattern.variables_) {
+ scope()->DeclareVariableName(variable->raw_name(), LET);
+ }
+ }
+ }
+ }
+ return PreParserStatement::Default();
+ }
+
+ V8_INLINE PreParserStatement
+ BuildRejectPromiseOnException(PreParserStatement init_block) {
+ return PreParserStatement::Default();
+ }
+
+ V8_INLINE void InsertSloppyBlockFunctionVarBindings(DeclarationScope* scope) {
+ scope->HoistSloppyBlockFunctions(nullptr);
+ }
+
+ V8_INLINE void InsertShadowingVarBindingInitializers(
+ PreParserStatement block) {}
+
V8_INLINE PreParserExpression
NewThrowReferenceError(MessageTemplate::Template message, int pos) {
return PreParserExpression::Default();
@@ -1448,7 +1527,19 @@ class PreParser : public ParserBase<PreParser> {
}
V8_INLINE PreParserExpression ThisExpression(int pos = kNoSourcePosition) {
- return PreParserExpression::This();
+ ZoneList<VariableProxy*>* variables = nullptr;
+ if (track_unresolved_variables_) {
+ AstNodeFactory factory(ast_value_factory());
+ // Setting the Zone is necessary because zone_ might be the temp Zone, and
+ // AstValueFactory doesn't know about it.
+ factory.set_zone(zone());
+ VariableProxy* proxy = scope()->NewUnresolved(
+ &factory, ast_value_factory()->this_string(), pos, THIS_VARIABLE);
+
+ variables = new (zone()) ZoneList<VariableProxy*>(1, zone());
+ variables->Add(proxy, zone());
+ }
+ return PreParserExpression::This(variables);
}
V8_INLINE PreParserExpression NewSuperPropertyReference(int pos) {
@@ -1525,8 +1616,8 @@ class PreParser : public ParserBase<PreParser> {
bool is_rest) {
if (track_unresolved_variables_) {
DCHECK(FLAG_lazy_inner_functions);
- parameters->params.Add(new (zone())
- PreParserFormalParameters::Parameter(pattern));
+ parameters->params.Add(new (zone()) PreParserFormalParameters::Parameter(
+ pattern, !IsIdentifier(pattern), is_rest));
}
parameters->UpdateArityAndFunctionLength(!initializer.IsEmpty(), is_rest);
}
@@ -1534,16 +1625,18 @@ class PreParser : public ParserBase<PreParser> {
V8_INLINE void DeclareFormalParameters(
DeclarationScope* scope,
const ThreadedList<PreParserFormalParameters::Parameter>& parameters) {
- if (!classifier()->is_simple_parameter_list()) {
- scope->SetHasNonSimpleParameters();
- }
+ bool is_simple = classifier()->is_simple_parameter_list();
+ if (!is_simple) scope->SetHasNonSimpleParameters();
if (track_unresolved_variables_) {
DCHECK(FLAG_lazy_inner_functions);
for (auto parameter : parameters) {
- if (parameter->pattern.variables_ != nullptr) {
- for (auto variable : *parameter->pattern.variables_) {
- scope->DeclareVariableName(variable->raw_name(), VAR);
- }
+ bool use_name = is_simple || parameter->is_nondestructuring_rest();
+ if (use_name) {
+ DCHECK_NOT_NULL(parameter->pattern.variables_);
+ DCHECK_EQ(parameter->pattern.variables_->length(), 1);
+ auto variable = (*parameter->pattern.variables_)[0];
+ scope->DeclareParameterName(variable->raw_name(), parameter->is_rest,
+ ast_value_factory());
}
}
}
@@ -1565,8 +1658,6 @@ class PreParser : public ParserBase<PreParser> {
}
}
- V8_INLINE void ReindexLiterals(const PreParserFormalParameters& parameters) {}
-
V8_INLINE PreParserExpression NoTemplateTag() {
return PreParserExpression::NoTemplateTag();
}
@@ -1575,12 +1666,6 @@ class PreParser : public ParserBase<PreParser> {
return !tag.IsNoTemplateTag();
}
- V8_INLINE void MaterializeUnspreadArgumentsLiterals(int count) {
- for (int i = 0; i < count; ++i) {
- function_state_->NextMaterializedLiteralIndex();
- }
- }
-
V8_INLINE PreParserExpression
ExpressionListToExpression(PreParserExpressionList args) {
return PreParserExpression::Default(args.variables_);
@@ -1607,6 +1692,8 @@ class PreParser : public ParserBase<PreParser> {
if (use_counts_ != nullptr) ++use_counts_[feature];
}
+ V8_INLINE bool ParsingDynamicFunctionDeclaration() const { return false; }
+
// Preparser's private field members.
int* use_counts_;
@@ -1616,9 +1703,9 @@ class PreParser : public ParserBase<PreParser> {
};
PreParserExpression PreParser::SpreadCall(PreParserExpression function,
- PreParserExpressionList args,
- int pos) {
- return factory()->NewCall(function, args, pos);
+ PreParserExpressionList args, int pos,
+ Call::PossiblyEval possibly_eval) {
+ return factory()->NewCall(function, args, pos, possibly_eval);
}
PreParserExpression PreParser::SpreadCallNew(PreParserExpression function,
@@ -1627,38 +1714,9 @@ PreParserExpression PreParser::SpreadCallNew(PreParserExpression function,
return factory()->NewCallNew(function, args, pos);
}
-PreParserStatementList PreParser::ParseEagerFunctionBody(
- PreParserIdentifier function_name, int pos,
- const PreParserFormalParameters& parameters, FunctionKind kind,
- FunctionLiteral::FunctionType function_type, bool* ok) {
- PreParserStatementList result;
-
- DeclarationScope* inner_scope = scope()->AsDeclarationScope();
- if (!parameters.is_simple) inner_scope = NewVarblockScope();
-
- {
- BlockState block_state(&scope_state_, inner_scope);
- ParseStatementList(result, Token::RBRACE, ok);
- if (!*ok) return PreParserStatementList();
- }
-
- Expect(Token::RBRACE, ok);
-
- if (is_sloppy(inner_scope->language_mode())) {
- inner_scope->HoistSloppyBlockFunctions(nullptr);
- }
- return result;
-}
-
PreParserExpression PreParser::CloseTemplateLiteral(TemplateLiteralState* state,
int start,
PreParserExpression tag) {
- if (IsTaggedTemplate(tag)) {
- // Emulate generation of array literals for tag callsite
- // 1st is array of cooked strings, second is array of raw strings
- function_state_->NextMaterializedLiteralIndex();
- function_state_->NextMaterializedLiteralIndex();
- }
return EmptyExpression();
}
diff --git a/deps/v8/src/parsing/scanner-character-streams.h b/deps/v8/src/parsing/scanner-character-streams.h
index b9c28248dc..291765cee4 100644
--- a/deps/v8/src/parsing/scanner-character-streams.h
+++ b/deps/v8/src/parsing/scanner-character-streams.h
@@ -6,13 +6,15 @@
#define V8_PARSING_SCANNER_CHARACTER_STREAMS_H_
#include "include/v8.h" // for v8::ScriptCompiler
-#include "src/handles.h"
namespace v8 {
namespace internal {
+template <typename T>
+class Handle;
class Utf16CharacterStream;
class RuntimeCallStats;
+class String;
class ScannerStream {
public:
diff --git a/deps/v8/src/parsing/scanner.cc b/deps/v8/src/parsing/scanner.cc
index bfb5e03d68..c1580bbeae 100644
--- a/deps/v8/src/parsing/scanner.cc
+++ b/deps/v8/src/parsing/scanner.cc
@@ -19,6 +19,46 @@
namespace v8 {
namespace internal {
+// Scoped helper for saving & restoring scanner error state.
+// This is used for tagged template literals, in which normally forbidden
+// escape sequences are allowed.
+class ErrorState {
+ public:
+ ErrorState(MessageTemplate::Template* message_stack,
+ Scanner::Location* location_stack)
+ : message_stack_(message_stack),
+ old_message_(*message_stack),
+ location_stack_(location_stack),
+ old_location_(*location_stack) {
+ *message_stack_ = MessageTemplate::kNone;
+ *location_stack_ = Scanner::Location::invalid();
+ }
+
+ ~ErrorState() {
+ *message_stack_ = old_message_;
+ *location_stack_ = old_location_;
+ }
+
+ void MoveErrorTo(MessageTemplate::Template* message_dest,
+ Scanner::Location* location_dest) {
+ if (*message_stack_ == MessageTemplate::kNone) {
+ return;
+ }
+ if (*message_dest == MessageTemplate::kNone) {
+ *message_dest = *message_stack_;
+ *location_dest = *location_stack_;
+ }
+ *message_stack_ = MessageTemplate::kNone;
+ *location_stack_ = Scanner::Location::invalid();
+ }
+
+ private:
+ MessageTemplate::Template* const message_stack_;
+ MessageTemplate::Template const old_message_;
+ Scanner::Location* const location_stack_;
+ Scanner::Location const old_location_;
+};
+
Handle<String> Scanner::LiteralBuffer::Internalize(Isolate* isolate) const {
if (is_one_byte()) {
return isolate->factory()->InternalizeOneByteString(one_byte_literal());
@@ -948,16 +988,12 @@ bool Scanner::ScanEscape() {
break;
}
- // According to ECMA-262, section 7.8.4, characters not covered by the
- // above cases should be illegal, but they are commonly handled as
- // non-escaped characters by JS VMs.
+ // Other escaped characters are interpreted as their non-escaped version.
AddLiteralChar(c);
return true;
}
-// Octal escapes of the forms '\0xx' and '\xxx' are not a part of
-// ECMA-262. Other JS VMs support them.
template <bool capture_raw>
uc32 Scanner::ScanOctalEscape(uc32 c, int length) {
uc32 x = c - '0';
@@ -1039,6 +1075,12 @@ Token::Value Scanner::ScanTemplateSpan() {
// TEMPLATE_TAIL terminates a TemplateLiteral and does not need to be
// followed by an Expression.
+ // These scoped helpers save and restore the original error state, so that we
+ // can specially treat invalid escape sequences in templates (which are
+ // handled by the parser).
+ ErrorState scanner_error_state(&scanner_error_, &scanner_error_location_);
+ ErrorState octal_error_state(&octal_message_, &octal_pos_);
+
Token::Value result = Token::TEMPLATE_SPAN;
LiteralScope literal(this);
StartRawLiteral();
@@ -1069,8 +1111,16 @@ Token::Value Scanner::ScanTemplateSpan() {
AddRawLiteralChar('\n');
}
}
- } else if (!ScanEscape<capture_raw, in_template_literal>()) {
- return Token::ILLEGAL;
+ } else {
+ bool success = ScanEscape<capture_raw, in_template_literal>();
+ USE(success);
+ DCHECK_EQ(!success, has_error());
+ // For templates, invalid escape sequence checking is handled in the
+ // parser.
+ scanner_error_state.MoveErrorTo(&invalid_template_escape_message_,
+ &invalid_template_escape_location_);
+ octal_error_state.MoveErrorTo(&invalid_template_escape_message_,
+ &invalid_template_escape_location_);
}
} else if (c < 0) {
// Unterminated template literal
@@ -1095,6 +1145,7 @@ Token::Value Scanner::ScanTemplateSpan() {
literal.Complete();
next_.location.end_pos = source_pos();
next_.token = result;
+
return result;
}
@@ -1489,7 +1540,9 @@ Token::Value Scanner::ScanIdentifierOrKeyword() {
Vector<const uint8_t> chars = next_.literal_chars->one_byte_literal();
Token::Value token =
KeywordOrIdentifierToken(chars.start(), chars.length());
- if (token == Token::IDENTIFIER) literal.Complete();
+ if (token == Token::IDENTIFIER ||
+ token == Token::FUTURE_STRICT_RESERVED_WORD)
+ literal.Complete();
return token;
}
literal.Complete();
diff --git a/deps/v8/src/parsing/scanner.h b/deps/v8/src/parsing/scanner.h
index 075b9ca6b2..9885b8ed0a 100644
--- a/deps/v8/src/parsing/scanner.h
+++ b/deps/v8/src/parsing/scanner.h
@@ -209,10 +209,27 @@ class Scanner {
// (the token last returned by Next()).
Location location() const { return current_.location; }
+ // This error is specifically an invalid hex or unicode escape sequence.
bool has_error() const { return scanner_error_ != MessageTemplate::kNone; }
MessageTemplate::Template error() const { return scanner_error_; }
Location error_location() const { return scanner_error_location_; }
+ bool has_invalid_template_escape() const {
+ return invalid_template_escape_message_ != MessageTemplate::kNone;
+ }
+ MessageTemplate::Template invalid_template_escape_message() const {
+ return invalid_template_escape_message_;
+ }
+ Location invalid_template_escape_location() const {
+ return invalid_template_escape_location_;
+ }
+
+ void clear_invalid_template_escape() {
+ DCHECK(has_invalid_template_escape());
+ invalid_template_escape_message_ = MessageTemplate::kNone;
+ invalid_template_escape_location_ = Location::invalid();
+ }
+
// Similar functions for the upcoming token.
// One token look-ahead (past the token returned by Next()).
@@ -466,6 +483,7 @@ class Scanner {
next_next_.raw_literal_chars = NULL;
found_html_comment_ = false;
scanner_error_ = MessageTemplate::kNone;
+ invalid_template_escape_message_ = MessageTemplate::kNone;
}
void ReportScannerError(const Location& location,
@@ -756,6 +774,9 @@ class Scanner {
MessageTemplate::Template scanner_error_;
Location scanner_error_location_;
+
+ MessageTemplate::Template invalid_template_escape_message_;
+ Location invalid_template_escape_location_;
};
} // namespace internal
diff --git a/deps/v8/src/pending-compilation-error-handler.cc b/deps/v8/src/pending-compilation-error-handler.cc
index 8f7660dd6b..e2db6db81c 100644
--- a/deps/v8/src/pending-compilation-error-handler.cc
+++ b/deps/v8/src/pending-compilation-error-handler.cc
@@ -9,6 +9,7 @@
#include "src/handles.h"
#include "src/isolate.h"
#include "src/messages.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -21,7 +22,6 @@ Handle<String> PendingCompilationErrorHandler::ArgumentString(
->NewStringFromUtf8(CStrVector(char_arg_))
.ToHandleChecked();
}
- if (!handle_arg_.is_null()) return handle_arg_;
return isolate->factory()->undefined_string();
}
diff --git a/deps/v8/src/pending-compilation-error-handler.h b/deps/v8/src/pending-compilation-error-handler.h
index 563bef93a2..42c679e75c 100644
--- a/deps/v8/src/pending-compilation-error-handler.h
+++ b/deps/v8/src/pending-compilation-error-handler.h
@@ -58,20 +58,6 @@ class PendingCompilationErrorHandler {
error_type_ = error_type;
}
- void ReportMessageAt(int start_position, int end_position,
- MessageTemplate::Template message, Handle<String> arg,
- ParseErrorType error_type = kSyntaxError) {
- if (has_pending_error_) return;
- has_pending_error_ = true;
- start_position_ = start_position;
- end_position_ = end_position;
- message_ = message;
- char_arg_ = nullptr;
- arg_ = nullptr;
- handle_arg_ = arg;
- error_type_ = error_type;
- }
-
bool has_pending_error() const { return has_pending_error_; }
void ThrowPendingError(Isolate* isolate, Handle<Script> script);
@@ -86,7 +72,6 @@ class PendingCompilationErrorHandler {
MessageTemplate::Template message_;
const AstRawString* arg_;
const char* char_arg_;
- Handle<String> handle_arg_;
ParseErrorType error_type_;
DISALLOW_COPY_AND_ASSIGN(PendingCompilationErrorHandler);
diff --git a/deps/v8/src/ppc/assembler-ppc-inl.h b/deps/v8/src/ppc/assembler-ppc-inl.h
index 12201daf21..216650c2f4 100644
--- a/deps/v8/src/ppc/assembler-ppc-inl.h
+++ b/deps/v8/src/ppc/assembler-ppc-inl.h
@@ -41,7 +41,7 @@
#include "src/assembler.h"
#include "src/debug/debug.h"
-
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -131,6 +131,17 @@ Address RelocInfo::constant_pool_entry_address() {
int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
+Address Assembler::target_address_at(Address pc, Code* code) {
+ Address constant_pool = code ? code->constant_pool() : NULL;
+ return target_address_at(pc, constant_pool);
+}
+
+void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
+ Address target,
+ ICacheFlushMode icache_flush_mode) {
+ Address constant_pool = code ? code->constant_pool() : NULL;
+ set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
+}
Address Assembler::target_address_from_return_address(Address pc) {
// Returns the address of the call target from the return address that will
@@ -466,9 +477,9 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
#if V8_TARGET_ARCH_PPC64
-const int kLoadIntptrOpcode = LD;
+const uint32_t kLoadIntptrOpcode = LD;
#else
-const int kLoadIntptrOpcode = LWZ;
+const uint32_t kLoadIntptrOpcode = LWZ;
#endif
// Constant pool load sequence detection:
@@ -481,7 +492,7 @@ const int kLoadIntptrOpcode = LWZ;
bool Assembler::IsConstantPoolLoadStart(Address pc,
ConstantPoolEntry::Access* access) {
Instr instr = instr_at(pc);
- int opcode = instr & kOpcodeMask;
+ uint32_t opcode = instr & kOpcodeMask;
if (!GetRA(instr).is(kConstantPoolRegister)) return false;
bool overflowed = (opcode == ADDIS);
#ifdef DEBUG
@@ -501,7 +512,7 @@ bool Assembler::IsConstantPoolLoadStart(Address pc,
bool Assembler::IsConstantPoolLoadEnd(Address pc,
ConstantPoolEntry::Access* access) {
Instr instr = instr_at(pc);
- int opcode = instr & kOpcodeMask;
+ uint32_t opcode = instr & kOpcodeMask;
bool overflowed = false;
if (!(opcode == kLoadIntptrOpcode || opcode == LFD)) return false;
if (!GetRA(instr).is(kConstantPoolRegister)) {
diff --git a/deps/v8/src/ppc/assembler-ppc.cc b/deps/v8/src/ppc/assembler-ppc.cc
index 32408f3079..645561dbdd 100644
--- a/deps/v8/src/ppc/assembler-ppc.cc
+++ b/deps/v8/src/ppc/assembler-ppc.cc
@@ -355,7 +355,7 @@ bool Assembler::Is32BitLoadIntoR12(Instr instr1, Instr instr2) {
bool Assembler::IsCmpRegister(Instr instr) {
return (((instr & kOpcodeMask) == EXT2) &&
- ((instr & kExt2OpcodeMask) == CMP));
+ ((EXT2 | (instr & kExt2OpcodeMask)) == CMP));
}
@@ -370,7 +370,7 @@ bool Assembler::IsAndi(Instr instr) { return ((instr & kOpcodeMask) == ANDIx); }
#if V8_TARGET_ARCH_PPC64
bool Assembler::IsRldicl(Instr instr) {
return (((instr & kOpcodeMask) == EXT5) &&
- ((instr & kExt5OpcodeMask) == RLDICL));
+ ((EXT5 | (instr & kExt5OpcodeMask)) == RLDICL));
}
#endif
@@ -382,7 +382,7 @@ bool Assembler::IsCmpImmediate(Instr instr) {
bool Assembler::IsCrSet(Instr instr) {
return (((instr & kOpcodeMask) == EXT1) &&
- ((instr & kExt1OpcodeMask) == CREQV));
+ ((EXT1 | (instr & kExt1OpcodeMask)) == CREQV));
}
@@ -425,7 +425,7 @@ enum {
int Assembler::target_at(int pos) {
Instr instr = instr_at(pos);
// check which type of branch this is 16 or 26 bit offset
- int opcode = instr & kOpcodeMask;
+ uint32_t opcode = instr & kOpcodeMask;
int link;
switch (opcode) {
case BX:
@@ -455,7 +455,7 @@ int Assembler::target_at(int pos) {
void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
Instr instr = instr_at(pos);
- int opcode = instr & kOpcodeMask;
+ uint32_t opcode = instr & kOpcodeMask;
if (is_branch != nullptr) {
*is_branch = (opcode == BX || opcode == BCX);
@@ -535,7 +535,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
int Assembler::max_reach_from(int pos) {
Instr instr = instr_at(pos);
- int opcode = instr & kOpcodeMask;
+ uint32_t opcode = instr & kOpcodeMask;
// check which type of branch this is 16 or 26 bit offset
switch (opcode) {
@@ -646,7 +646,6 @@ void Assembler::x_form(Instr instr, Register ra, Register rs, Register rb,
emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | r);
}
-
void Assembler::xo_form(Instr instr, Register rt, Register ra, Register rb,
OEBit o, RCBit r) {
emit(instr | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | o | r);
@@ -2251,13 +2250,13 @@ void Assembler::fcfidu(const DoubleRegister frt, const DoubleRegister frb,
void Assembler::fcfidus(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc) {
- emit(EXT3 | FCFIDU | frt.code() * B21 | frb.code() * B11 | rc);
+ emit(EXT3 | FCFIDUS | frt.code() * B21 | frb.code() * B11 | rc);
}
void Assembler::fcfids(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc) {
- emit(EXT3 | FCFID | frt.code() * B21 | frb.code() * B11 | rc);
+ emit(EXT3 | FCFIDS | frt.code() * B21 | frb.code() * B11 | rc);
}
diff --git a/deps/v8/src/ppc/assembler-ppc.h b/deps/v8/src/ppc/assembler-ppc.h
index b385af0321..810b42f900 100644
--- a/deps/v8/src/ppc/assembler-ppc.h
+++ b/deps/v8/src/ppc/assembler-ppc.h
@@ -206,6 +206,7 @@ const Register kRootRegister = r29; // Roots array pointer.
const Register cp = r30; // JavaScript context pointer.
static const bool kSimpleFPAliasing = true;
+static const bool kSimdMaskRegisters = false;
// Double word FP register.
struct DoubleRegister {
@@ -469,17 +470,10 @@ class Assembler : public AssemblerBase {
INLINE(static void set_target_address_at(
Isolate* isolate, Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
- INLINE(static Address target_address_at(Address pc, Code* code)) {
- Address constant_pool = code ? code->constant_pool() : NULL;
- return target_address_at(pc, constant_pool);
- }
+ INLINE(static Address target_address_at(Address pc, Code* code));
INLINE(static void set_target_address_at(
Isolate* isolate, Address pc, Code* code, Address target,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
- Address constant_pool = code ? code->constant_pool() : NULL;
- set_target_address_at(isolate, pc, constant_pool, target,
- icache_flush_mode);
- }
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
diff --git a/deps/v8/src/ppc/code-stubs-ppc.cc b/deps/v8/src/ppc/code-stubs-ppc.cc
index ec1d3a0c56..389cba2f17 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.cc
+++ b/deps/v8/src/ppc/code-stubs-ppc.cc
@@ -205,9 +205,6 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmpi(r7, Operand(SYMBOL_TYPE));
__ beq(slow);
- // Call runtime on identical SIMD values since we must throw a TypeError.
- __ cmpi(r7, Operand(SIMD128_VALUE_TYPE));
- __ beq(slow);
} else {
__ CompareObjectType(r3, r7, r7, HEAP_NUMBER_TYPE);
__ beq(&heap_number);
@@ -218,9 +215,6 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmpi(r7, Operand(SYMBOL_TYPE));
__ beq(slow);
- // Call runtime on identical SIMD values since we must throw a TypeError.
- __ cmpi(r7, Operand(SIMD128_VALUE_TYPE));
- __ beq(slow);
// Normally here we fall through to return_equal, but undefined is
// special: (undefined == undefined) == true, but
// (undefined <= undefined) == false! See ECMAScript 11.8.5.
@@ -1106,8 +1100,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ li(kConstantPoolRegister, Operand::Zero());
__ push(kConstantPoolRegister);
}
- int marker = type();
- __ LoadSmiLiteral(r0, Smi::FromInt(marker));
+ StackFrame::Type marker = type();
+ __ mov(r0, Operand(StackFrame::TypeToMarker(marker)));
__ push(r0);
__ push(r0);
// Save copies of the top frame descriptor on the stack.
@@ -1126,11 +1120,11 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ cmpi(r9, Operand::Zero());
__ bne(&non_outermost_js);
__ StoreP(fp, MemOperand(r8));
- __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ __ mov(ip, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
Label cont;
__ b(&cont);
__ bind(&non_outermost_js);
- __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME));
+ __ mov(ip, Operand(StackFrame::INNER_JSENTRY_FRAME));
__ bind(&cont);
__ push(ip); // frame-type
@@ -1193,7 +1187,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Check if the current stack frame is marked as the outermost JS frame.
Label non_outermost_js_2;
__ pop(r8);
- __ CmpSmiLiteral(r8, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME), r0);
+ __ cmpi(r8, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ bne(&non_outermost_js_2);
__ mov(r9, Operand::Zero());
__ mov(r8, Operand(ExternalReference(js_entry_sp)));
@@ -1220,55 +1214,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ blr();
}
-
-void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
- Label miss;
- Register receiver = LoadDescriptor::ReceiverRegister();
- // Ensure that the vector and slot registers won't be clobbered before
- // calling the miss handler.
- DCHECK(!AreAliased(r7, r8, LoadWithVectorDescriptor::VectorRegister(),
- LoadWithVectorDescriptor::SlotRegister()));
-
- NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r7,
- r8, &miss);
- __ bind(&miss);
- PropertyAccessCompiler::TailCallBuiltin(
- masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
-}
-
-
-void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
- // Return address is in lr.
- Label miss;
-
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register index = LoadDescriptor::NameRegister();
- Register scratch = r8;
- Register result = r3;
- DCHECK(!scratch.is(receiver) && !scratch.is(index));
- DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
- result.is(LoadWithVectorDescriptor::SlotRegister()));
-
- // StringCharAtGenerator doesn't use the result register until it's passed
- // the different miss possibilities. If it did, we would have a conflict
- // when FLAG_vector_ics is true.
- StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
- &miss, // When not a string.
- &miss, // When not a number.
- &miss, // When index out of range.
- RECEIVER_IS_STRING);
- char_at_generator.GenerateFast(masm);
- __ Ret();
-
- StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
-
- __ bind(&miss);
- PropertyAccessCompiler::TailCallBuiltin(
- masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
-}
-
-
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
@@ -1373,7 +1318,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// (6) External string. Make it, offset-wise, look like a sequential string.
// Go to (4).
// (7) Short external string or not a string? If yes, bail out to runtime.
- // (8) Sliced string. Replace subject with parent. Go to (1).
+ // (8) Sliced or thin string. Replace subject with parent. Go to (1).
Label seq_string /* 4 */, external_string /* 6 */, check_underlying /* 1 */,
not_seq_nor_cons /* 5 */, not_long_external /* 7 */;
@@ -1385,7 +1330,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// (1) Sequential string? If yes, go to (4).
STATIC_ASSERT((kIsNotStringMask | kStringRepresentationMask |
- kShortExternalStringMask) == 0x93);
+ kShortExternalStringMask) == 0xa7);
__ andi(r4, r3, Operand(kIsNotStringMask | kStringRepresentationMask |
kShortExternalStringMask));
STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
@@ -1394,6 +1339,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// (2) Sequential or cons? If not, go to (5).
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+ STATIC_ASSERT(kThinStringTag > kExternalStringTag);
STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
STATIC_ASSERT(kExternalStringTag < 0xffffu);
@@ -1422,9 +1368,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ ble(&runtime);
__ SmiUntag(r4);
- STATIC_ASSERT(4 == kOneByteStringTag);
+ STATIC_ASSERT(8 == kOneByteStringTag);
STATIC_ASSERT(kTwoByteStringTag == 0);
- STATIC_ASSERT(kStringEncodingMask == 4);
+ STATIC_ASSERT(kStringEncodingMask == 8);
__ ExtractBitMask(r6, r3, kStringEncodingMask, SetRC);
__ beq(&encoding_type_UC16, cr0);
__ LoadP(code,
@@ -1670,12 +1616,19 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ andi(r0, r4, Operand(kIsNotStringMask | kShortExternalStringMask));
__ bne(&runtime, cr0);
- // (8) Sliced string. Replace subject with parent. Go to (4).
+ // (8) Sliced or thin string. Replace subject with parent. Go to (4).
+ Label thin_string;
+ __ cmpi(r4, Operand(kThinStringTag));
+ __ beq(&thin_string);
// Load offset into r11 and replace subject string with parent.
__ LoadP(r11, FieldMemOperand(subject, SlicedString::kOffsetOffset));
__ SmiUntag(r11);
__ LoadP(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
__ b(&check_underlying); // Go to (4).
+
+ __ bind(&thin_string);
+ __ LoadP(subject, FieldMemOperand(subject, ThinString::kActualOffset));
+ __ b(&check_underlying); // Go to (4).
#endif // V8_INTERPRETED_REGEXP
}
@@ -1851,190 +1804,6 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
-// Note: feedback_vector and slot are clobbered after the call.
-static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
- Register slot, Register temp) {
- const int count_offset = FixedArray::kHeaderSize + kPointerSize;
- __ SmiToPtrArrayOffset(temp, slot);
- __ add(feedback_vector, feedback_vector, temp);
- __ LoadP(slot, FieldMemOperand(feedback_vector, count_offset));
- __ AddSmiLiteral(slot, slot, Smi::FromInt(1), temp);
- __ StoreP(slot, FieldMemOperand(feedback_vector, count_offset), temp);
-}
-
-void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
- // r3 - number of arguments
- // r4 - function
- // r6 - slot id
- // r5 - vector
- // r7 - allocation site (loaded from vector[slot])
- __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r8);
- __ cmp(r4, r8);
- __ bne(miss);
-
- // Increment the call count for monomorphic function calls.
- IncrementCallCount(masm, r5, r6, r0);
-
- __ mr(r5, r7);
- __ mr(r6, r4);
- ArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
-}
-
-
-void CallICStub::Generate(MacroAssembler* masm) {
- // r3 - number of arguments
- // r4 - function
- // r6 - slot id (Smi)
- // r5 - vector
- Label extra_checks_or_miss, call, call_function, call_count_incremented;
-
- // The checks. First, does r4 match the recorded monomorphic target?
- __ SmiToPtrArrayOffset(r9, r6);
- __ add(r9, r5, r9);
- __ LoadP(r7, FieldMemOperand(r9, FixedArray::kHeaderSize));
-
- // We don't know that we have a weak cell. We might have a private symbol
- // or an AllocationSite, but the memory is safe to examine.
- // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
- // FixedArray.
- // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
- // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
- // computed, meaning that it can't appear to be a pointer. If the low bit is
- // 0, then hash is computed, but the 0 bit prevents the field from appearing
- // to be a pointer.
- STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
- STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
- WeakCell::kValueOffset &&
- WeakCell::kValueOffset == Symbol::kHashFieldSlot);
-
- __ LoadP(r8, FieldMemOperand(r7, WeakCell::kValueOffset));
- __ cmp(r4, r8);
- __ bne(&extra_checks_or_miss);
-
- // The compare above could have been a SMI/SMI comparison. Guard against this
- // convincing us that we have a monomorphic JSFunction.
- __ JumpIfSmi(r4, &extra_checks_or_miss);
-
- __ bind(&call_function);
-
- // Increment the call count for monomorphic function calls.
- IncrementCallCount(masm, r5, r6, r0);
-
- __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
- tail_call_mode()),
- RelocInfo::CODE_TARGET);
-
- __ bind(&extra_checks_or_miss);
- Label uninitialized, miss, not_allocation_site;
-
- __ CompareRoot(r7, Heap::kmegamorphic_symbolRootIndex);
- __ beq(&call);
-
- // Verify that r7 contains an AllocationSite
- __ LoadP(r8, FieldMemOperand(r7, HeapObject::kMapOffset));
- __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
- __ bne(&not_allocation_site);
-
- // We have an allocation site.
- HandleArrayCase(masm, &miss);
-
- __ bind(&not_allocation_site);
-
- // The following cases attempt to handle MISS cases without going to the
- // runtime.
- if (FLAG_trace_ic) {
- __ b(&miss);
- }
-
- __ CompareRoot(r7, Heap::kuninitialized_symbolRootIndex);
- __ beq(&uninitialized);
-
- // We are going megamorphic. If the feedback is a JSFunction, it is fine
- // to handle it here. More complex cases are dealt with in the runtime.
- __ AssertNotSmi(r7);
- __ CompareObjectType(r7, r8, r8, JS_FUNCTION_TYPE);
- __ bne(&miss);
- __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
- __ StoreP(ip, FieldMemOperand(r9, FixedArray::kHeaderSize), r0);
-
- __ bind(&call);
-
- // Increment the call count for megamorphic function calls.
- IncrementCallCount(masm, r5, r6, r0);
-
- __ bind(&call_count_incremented);
- __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
- RelocInfo::CODE_TARGET);
-
- __ bind(&uninitialized);
-
- // We are going monomorphic, provided we actually have a JSFunction.
- __ JumpIfSmi(r4, &miss);
-
- // Goto miss case if we do not have a function.
- __ CompareObjectType(r4, r7, r7, JS_FUNCTION_TYPE);
- __ bne(&miss);
-
- // Make sure the function is not the Array() function, which requires special
- // behavior on MISS.
- __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r7);
- __ cmp(r4, r7);
- __ beq(&miss);
-
- // Make sure the function belongs to the same native context.
- __ LoadP(r7, FieldMemOperand(r4, JSFunction::kContextOffset));
- __ LoadP(r7, ContextMemOperand(r7, Context::NATIVE_CONTEXT_INDEX));
- __ LoadP(ip, NativeContextMemOperand());
- __ cmp(r7, ip);
- __ bne(&miss);
-
- // Store the function. Use a stub since we need a frame for allocation.
- // r5 - vector
- // r6 - slot
- // r4 - function
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- CreateWeakCellStub create_stub(masm->isolate());
- __ SmiTag(r3);
- __ Push(r3, r5, r6, cp, r4);
- __ CallStub(&create_stub);
- __ Pop(r5, r6, cp, r4);
- __ Pop(r3);
- __ SmiUntag(r3);
- }
-
- __ b(&call_function);
-
- // We are here because tracing is on or we encountered a MISS case we can't
- // handle here.
- __ bind(&miss);
- GenerateMiss(masm);
-
- __ b(&call_count_incremented);
-}
-
-
-void CallICStub::GenerateMiss(MacroAssembler* masm) {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve the number of arguments as Smi.
- __ SmiTag(r3);
-
- // Push the receiver and the function and feedback info.
- __ Push(r3, r4, r5, r6);
-
- // Call the entry.
- __ CallRuntime(Runtime::kCallIC_Miss);
-
- // Move result to r4 and exit the internal frame.
- __ mr(r4, r3);
-
- // Restore number of arguments.
- __ Pop(r3);
- __ SmiUntag(r3);
-}
-
// StringCharCodeAtGenerator
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
@@ -2121,46 +1890,6 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
}
-
-// -------------------------------------------------------------------------
-// StringCharFromCodeGenerator
-
-void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
- // Fast case of Heap::LookupSingleCharacterStringFromCode.
- DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU + 1));
- __ LoadSmiLiteral(r0, Smi::FromInt(~String::kMaxOneByteCharCodeU));
- __ ori(r0, r0, Operand(kSmiTagMask));
- __ and_(r0, code_, r0, SetRC);
- __ bne(&slow_case_, cr0);
-
- __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
- // At this point code register contains smi tagged one-byte char code.
- __ mr(r0, code_);
- __ SmiToPtrArrayOffset(code_, code_);
- __ add(result_, result_, code_);
- __ mr(code_, r0);
- __ LoadP(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
- __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
- __ beq(&slow_case_);
- __ bind(&exit_);
-}
-
-
-void StringCharFromCodeGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
- __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
-
- __ bind(&slow_case_);
- call_helper.BeforeCall(masm);
- __ push(code_);
- __ CallRuntime(Runtime::kStringCharFromCode);
- __ Move(result_, r3);
- call_helper.AfterCall(masm);
- __ b(&exit_);
-
- __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
-}
-
void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
Register left,
Register right,
@@ -3070,12 +2799,6 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
__ Ret();
}
-void CallICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadFeedbackVector(r5);
- CallICStub stub(isolate(), state());
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
-}
-
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
PredictableCodeSizeScope predictable(masm,
@@ -3446,549 +3169,6 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
GenerateCase(masm, FAST_ELEMENTS);
}
-void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r4 : function
- // -- cp : context
- // -- fp : frame pointer
- // -- lr : return address
- // -----------------------------------
- __ AssertFunction(r4);
-
- // Make r5 point to the JavaScript frame.
- __ mr(r5, fp);
- if (skip_stub_frame()) {
- // For Ignition we need to skip the handler/stub frame to reach the
- // JavaScript frame for the function.
- __ LoadP(r5, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
- }
- if (FLAG_debug_code) {
- Label ok;
- __ LoadP(ip, MemOperand(r5, StandardFrameConstants::kFunctionOffset));
- __ cmp(ip, r4);
- __ beq(&ok);
- __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
- __ bind(&ok);
- }
-
- // Check if we have rest parameters (only possible if we have an
- // arguments adaptor frame below the function frame).
- Label no_rest_parameters;
- __ LoadP(r5, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(ip, MemOperand(r5, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ CmpSmiLiteral(ip, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
- __ bne(&no_rest_parameters);
-
- // Check if the arguments adaptor frame contains more arguments than
- // specified by the function's internal formal parameter count.
- Label rest_parameters;
- __ LoadP(r3, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ LoadP(r6, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ LoadWordArith(
- r6, FieldMemOperand(r6, SharedFunctionInfo::kFormalParameterCountOffset));
-#if V8_TARGET_ARCH_PPC64
- __ SmiTag(r6);
-#endif
- __ sub(r3, r3, r6, LeaveOE, SetRC);
- __ bgt(&rest_parameters, cr0);
-
- // Return an empty rest parameter array.
- __ bind(&no_rest_parameters);
- {
- // ----------- S t a t e -------------
- // -- cp : context
- // -- lr : return address
- // -----------------------------------
-
- // Allocate an empty rest parameter array.
- Label allocate, done_allocate;
- __ Allocate(JSArray::kSize, r3, r4, r5, &allocate, NO_ALLOCATION_FLAGS);
- __ bind(&done_allocate);
-
- // Setup the rest parameter array in r0.
- __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r4);
- __ StoreP(r4, FieldMemOperand(r3, JSArray::kMapOffset), r0);
- __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
- __ StoreP(r4, FieldMemOperand(r3, JSArray::kPropertiesOffset), r0);
- __ StoreP(r4, FieldMemOperand(r3, JSArray::kElementsOffset), r0);
- __ li(r4, Operand::Zero());
- __ StoreP(r4, FieldMemOperand(r3, JSArray::kLengthOffset), r0);
- STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
- __ Ret();
-
- // Fall back to %AllocateInNewSpace.
- __ bind(&allocate);
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(Smi::FromInt(JSArray::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- }
- __ b(&done_allocate);
- }
-
- __ bind(&rest_parameters);
- {
- // Compute the pointer to the first rest parameter (skippping the receiver).
- __ SmiToPtrArrayOffset(r9, r3);
- __ add(r5, r5, r9);
- __ addi(r5, r5, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // ----------- S t a t e -------------
- // -- cp : context
- // -- r3 : number of rest parameters (tagged)
- // -- r4 : function
- // -- r5 : pointer just past first rest parameters
- // -- r9 : size of rest parameters
- // -- lr : return address
- // -----------------------------------
-
- // Allocate space for the rest parameter array plus the backing store.
- Label allocate, done_allocate;
- __ mov(r10, Operand(JSArray::kSize + FixedArray::kHeaderSize));
- __ add(r10, r10, r9);
- __ Allocate(r10, r6, r7, r8, &allocate, NO_ALLOCATION_FLAGS);
- __ bind(&done_allocate);
-
- // Setup the elements array in r6.
- __ LoadRoot(r4, Heap::kFixedArrayMapRootIndex);
- __ StoreP(r4, FieldMemOperand(r6, FixedArray::kMapOffset), r0);
- __ StoreP(r3, FieldMemOperand(r6, FixedArray::kLengthOffset), r0);
- __ addi(r7, r6,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
- {
- Label loop;
- __ SmiUntag(r0, r3);
- __ mtctr(r0);
- __ bind(&loop);
- __ LoadPU(ip, MemOperand(r5, -kPointerSize));
- __ StorePU(ip, MemOperand(r7, kPointerSize));
- __ bdnz(&loop);
- __ addi(r7, r7, Operand(kPointerSize));
- }
-
- // Setup the rest parameter array in r7.
- __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r4);
- __ StoreP(r4, MemOperand(r7, JSArray::kMapOffset));
- __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
- __ StoreP(r4, MemOperand(r7, JSArray::kPropertiesOffset));
- __ StoreP(r6, MemOperand(r7, JSArray::kElementsOffset));
- __ StoreP(r3, MemOperand(r7, JSArray::kLengthOffset));
- STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
- __ addi(r3, r7, Operand(kHeapObjectTag));
- __ Ret();
-
- // Fall back to %AllocateInNewSpace (if not too big).
- Label too_big_for_new_space;
- __ bind(&allocate);
- __ Cmpi(r10, Operand(kMaxRegularHeapObjectSize), r0);
- __ bgt(&too_big_for_new_space);
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(r10);
- __ Push(r3, r5, r10);
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- __ mr(r6, r3);
- __ Pop(r3, r5);
- }
- __ b(&done_allocate);
-
- // Fall back to %NewRestParameter.
- __ bind(&too_big_for_new_space);
- __ push(r4);
- __ TailCallRuntime(Runtime::kNewRestParameter);
- }
-}
-
-void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r4 : function
- // -- cp : context
- // -- fp : frame pointer
- // -- lr : return address
- // -----------------------------------
- __ AssertFunction(r4);
-
- // Make r10 point to the JavaScript frame.
- __ mr(r10, fp);
- if (skip_stub_frame()) {
- // For Ignition we need to skip the handler/stub frame to reach the
- // JavaScript frame for the function.
- __ LoadP(r10, MemOperand(r10, StandardFrameConstants::kCallerFPOffset));
- }
- if (FLAG_debug_code) {
- Label ok;
- __ LoadP(ip, MemOperand(r10, StandardFrameConstants::kFunctionOffset));
- __ cmp(ip, r4);
- __ beq(&ok);
- __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
- __ bind(&ok);
- }
-
-
- // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
- __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ LoadWordArith(
- r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
-#if V8_TARGET_ARCH_PPC64
- __ SmiTag(r5);
-#endif
- __ SmiToPtrArrayOffset(r6, r5);
- __ add(r6, r10, r6);
- __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // r4 : function
- // r5 : number of parameters (tagged)
- // r6 : parameters pointer
- // r10 : JavaScript frame pointer
- // Registers used over whole function:
- // r8 : arguments count (tagged)
- // r9 : mapped parameter count (tagged)
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ LoadP(r7, MemOperand(r10, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(r3, MemOperand(r7, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ CmpSmiLiteral(r3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
- __ beq(&adaptor_frame);
-
- // No adaptor, parameter count = argument count.
- __ mr(r8, r5);
- __ mr(r9, r5);
- __ b(&try_allocate);
-
- // We have an adaptor frame. Patch the parameters pointer.
- __ bind(&adaptor_frame);
- __ LoadP(r8, MemOperand(r7, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiToPtrArrayOffset(r6, r8);
- __ add(r6, r6, r7);
- __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // r8 = argument count (tagged)
- // r9 = parameter count (tagged)
- // Compute the mapped parameter count = min(r5, r8) in r9.
- __ cmp(r5, r8);
- if (CpuFeatures::IsSupported(ISELECT)) {
- __ isel(lt, r9, r5, r8);
- } else {
- Label skip;
- __ mr(r9, r5);
- __ blt(&skip);
- __ mr(r9, r8);
- __ bind(&skip);
- }
-
- __ bind(&try_allocate);
-
- // Compute the sizes of backing store, parameter map, and arguments object.
- // 1. Parameter map, has 2 extra words containing context and backing store.
- const int kParameterMapHeaderSize =
- FixedArray::kHeaderSize + 2 * kPointerSize;
- // If there are no mapped parameters, we do not need the parameter_map.
- __ CmpSmiLiteral(r9, Smi::kZero, r0);
- if (CpuFeatures::IsSupported(ISELECT)) {
- __ SmiToPtrArrayOffset(r11, r9);
- __ addi(r11, r11, Operand(kParameterMapHeaderSize));
- __ isel(eq, r11, r0, r11);
- } else {
- Label skip2, skip3;
- __ bne(&skip2);
- __ li(r11, Operand::Zero());
- __ b(&skip3);
- __ bind(&skip2);
- __ SmiToPtrArrayOffset(r11, r9);
- __ addi(r11, r11, Operand(kParameterMapHeaderSize));
- __ bind(&skip3);
- }
-
- // 2. Backing store.
- __ SmiToPtrArrayOffset(r7, r8);
- __ add(r11, r11, r7);
- __ addi(r11, r11, Operand(FixedArray::kHeaderSize));
-
- // 3. Arguments object.
- __ addi(r11, r11, Operand(JSSloppyArgumentsObject::kSize));
-
- // Do the allocation of all three objects in one go.
- __ Allocate(r11, r3, r11, r7, &runtime, NO_ALLOCATION_FLAGS);
-
- // r3 = address of new object(s) (tagged)
- // r5 = argument count (smi-tagged)
- // Get the arguments boilerplate from the current native context into r4.
- const int kNormalOffset =
- Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
- const int kAliasedOffset =
- Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
-
- __ LoadP(r7, NativeContextMemOperand());
- __ cmpi(r9, Operand::Zero());
- if (CpuFeatures::IsSupported(ISELECT)) {
- __ LoadP(r11, MemOperand(r7, kNormalOffset));
- __ LoadP(r7, MemOperand(r7, kAliasedOffset));
- __ isel(eq, r7, r11, r7);
- } else {
- Label skip4, skip5;
- __ bne(&skip4);
- __ LoadP(r7, MemOperand(r7, kNormalOffset));
- __ b(&skip5);
- __ bind(&skip4);
- __ LoadP(r7, MemOperand(r7, kAliasedOffset));
- __ bind(&skip5);
- }
-
- // r3 = address of new object (tagged)
- // r5 = argument count (smi-tagged)
- // r7 = address of arguments map (tagged)
- // r9 = mapped parameter count (tagged)
- __ StoreP(r7, FieldMemOperand(r3, JSObject::kMapOffset), r0);
- __ LoadRoot(r11, Heap::kEmptyFixedArrayRootIndex);
- __ StoreP(r11, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
- __ StoreP(r11, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
-
- // Set up the callee in-object property.
- __ AssertNotSmi(r4);
- __ StoreP(r4, FieldMemOperand(r3, JSSloppyArgumentsObject::kCalleeOffset),
- r0);
-
- // Use the length (smi tagged) and set that as an in-object property too.
- __ AssertSmi(r8);
- __ StoreP(r8, FieldMemOperand(r3, JSSloppyArgumentsObject::kLengthOffset),
- r0);
-
- // Set up the elements pointer in the allocated arguments object.
- // If we allocated a parameter map, r7 will point there, otherwise
- // it will point to the backing store.
- __ addi(r7, r3, Operand(JSSloppyArgumentsObject::kSize));
- __ StoreP(r7, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
-
- // r3 = address of new object (tagged)
- // r5 = argument count (tagged)
- // r7 = address of parameter map or backing store (tagged)
- // r9 = mapped parameter count (tagged)
- // Initialize parameter map. If there are no mapped arguments, we're done.
- Label skip_parameter_map;
- __ CmpSmiLiteral(r9, Smi::kZero, r0);
- if (CpuFeatures::IsSupported(ISELECT)) {
- __ isel(eq, r4, r7, r4);
- __ beq(&skip_parameter_map);
- } else {
- Label skip6;
- __ bne(&skip6);
- // Move backing store address to r4, because it is
- // expected there when filling in the unmapped arguments.
- __ mr(r4, r7);
- __ b(&skip_parameter_map);
- __ bind(&skip6);
- }
-
- __ LoadRoot(r8, Heap::kSloppyArgumentsElementsMapRootIndex);
- __ StoreP(r8, FieldMemOperand(r7, FixedArray::kMapOffset), r0);
- __ AddSmiLiteral(r8, r9, Smi::FromInt(2), r0);
- __ StoreP(r8, FieldMemOperand(r7, FixedArray::kLengthOffset), r0);
- __ StoreP(cp, FieldMemOperand(r7, FixedArray::kHeaderSize + 0 * kPointerSize),
- r0);
- __ SmiToPtrArrayOffset(r8, r9);
- __ add(r8, r8, r7);
- __ addi(r8, r8, Operand(kParameterMapHeaderSize));
- __ StoreP(r8, FieldMemOperand(r7, FixedArray::kHeaderSize + 1 * kPointerSize),
- r0);
-
- // Copy the parameter slots and the holes in the arguments.
- // We need to fill in mapped_parameter_count slots. They index the context,
- // where parameters are stored in reverse order, at
- // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
- // The mapped parameter thus need to get indices
- // MIN_CONTEXT_SLOTS+parameter_count-1 ..
- // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
- // We loop from right to left.
- Label parameters_loop;
- __ mr(r8, r9);
- __ AddSmiLiteral(r11, r5, Smi::FromInt(Context::MIN_CONTEXT_SLOTS), r0);
- __ sub(r11, r11, r9);
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ SmiToPtrArrayOffset(r4, r8);
- __ add(r4, r4, r7);
- __ addi(r4, r4, Operand(kParameterMapHeaderSize));
-
- // r4 = address of backing store (tagged)
- // r7 = address of parameter map (tagged)
- // r8 = temporary scratch (a.o., for address calculation)
- // r10 = temporary scratch (a.o., for address calculation)
- // ip = the hole value
- __ SmiUntag(r8);
- __ mtctr(r8);
- __ ShiftLeftImm(r8, r8, Operand(kPointerSizeLog2));
- __ add(r10, r4, r8);
- __ add(r8, r7, r8);
- __ addi(r10, r10, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ addi(r8, r8, Operand(kParameterMapHeaderSize - kHeapObjectTag));
-
- __ bind(&parameters_loop);
- __ StorePU(r11, MemOperand(r8, -kPointerSize));
- __ StorePU(ip, MemOperand(r10, -kPointerSize));
- __ AddSmiLiteral(r11, r11, Smi::FromInt(1), r0);
- __ bdnz(&parameters_loop);
-
- // Restore r8 = argument count (tagged).
- __ LoadP(r8, FieldMemOperand(r3, JSSloppyArgumentsObject::kLengthOffset));
-
- __ bind(&skip_parameter_map);
- // r3 = address of new object (tagged)
- // r4 = address of backing store (tagged)
- // r8 = argument count (tagged)
- // r9 = mapped parameter count (tagged)
- // r11 = scratch
- // Copy arguments header and remaining slots (if there are any).
- __ LoadRoot(r11, Heap::kFixedArrayMapRootIndex);
- __ StoreP(r11, FieldMemOperand(r4, FixedArray::kMapOffset), r0);
- __ StoreP(r8, FieldMemOperand(r4, FixedArray::kLengthOffset), r0);
- __ sub(r11, r8, r9, LeaveOE, SetRC);
- __ Ret(eq, cr0);
-
- Label arguments_loop;
- __ SmiUntag(r11);
- __ mtctr(r11);
-
- __ SmiToPtrArrayOffset(r0, r9);
- __ sub(r6, r6, r0);
- __ add(r11, r4, r0);
- __ addi(r11, r11,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
-
- __ bind(&arguments_loop);
- __ LoadPU(r7, MemOperand(r6, -kPointerSize));
- __ StorePU(r7, MemOperand(r11, kPointerSize));
- __ bdnz(&arguments_loop);
-
- // Return.
- __ Ret();
-
- // Do the runtime call to allocate the arguments object.
- // r8 = argument count (tagged)
- __ bind(&runtime);
- __ Push(r4, r6, r8);
- __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r4 : function
- // -- cp : context
- // -- fp : frame pointer
- // -- lr : return address
- // -----------------------------------
- __ AssertFunction(r4);
-
- // Make r5 point to the JavaScript frame.
- __ mr(r5, fp);
- if (skip_stub_frame()) {
- // For Ignition we need to skip the handler/stub frame to reach the
- // JavaScript frame for the function.
- __ LoadP(r5, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
- }
- if (FLAG_debug_code) {
- Label ok;
- __ LoadP(ip, MemOperand(r5, StandardFrameConstants::kFunctionOffset));
- __ cmp(ip, r4);
- __ b(&ok);
- __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
- __ bind(&ok);
- }
-
- // Check if we have an arguments adaptor frame below the function frame.
- Label arguments_adaptor, arguments_done;
- __ LoadP(r6, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(ip, MemOperand(r6, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ CmpSmiLiteral(ip, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
- __ beq(&arguments_adaptor);
- {
- __ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ LoadWordArith(
- r3,
- FieldMemOperand(r7, SharedFunctionInfo::kFormalParameterCountOffset));
-#if V8_TARGET_ARCH_PPC64
- __ SmiTag(r3);
-#endif
- __ SmiToPtrArrayOffset(r9, r3);
- __ add(r5, r5, r9);
- }
- __ b(&arguments_done);
- __ bind(&arguments_adaptor);
- {
- __ LoadP(r3, MemOperand(r6, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiToPtrArrayOffset(r9, r3);
- __ add(r5, r6, r9);
- }
- __ bind(&arguments_done);
- __ addi(r5, r5, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // ----------- S t a t e -------------
- // -- cp : context
- // -- r3 : number of rest parameters (tagged)
- // -- r4 : function
- // -- r5 : pointer just past first rest parameters
- // -- r9 : size of rest parameters
- // -- lr : return address
- // -----------------------------------
-
- // Allocate space for the strict arguments object plus the backing store.
- Label allocate, done_allocate;
- __ mov(r10,
- Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
- __ add(r10, r10, r9);
- __ Allocate(r10, r6, r7, r8, &allocate, NO_ALLOCATION_FLAGS);
- __ bind(&done_allocate);
-
- // Setup the elements array in r6.
- __ LoadRoot(r4, Heap::kFixedArrayMapRootIndex);
- __ StoreP(r4, FieldMemOperand(r6, FixedArray::kMapOffset), r0);
- __ StoreP(r3, FieldMemOperand(r6, FixedArray::kLengthOffset), r0);
- __ addi(r7, r6,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
- {
- Label loop, done_loop;
- __ SmiUntag(r0, r3, SetRC);
- __ beq(&done_loop, cr0);
- __ mtctr(r0);
- __ bind(&loop);
- __ LoadPU(ip, MemOperand(r5, -kPointerSize));
- __ StorePU(ip, MemOperand(r7, kPointerSize));
- __ bdnz(&loop);
- __ bind(&done_loop);
- __ addi(r7, r7, Operand(kPointerSize));
- }
-
- // Setup the rest parameter array in r7.
- __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, r4);
- __ StoreP(r4, MemOperand(r7, JSStrictArgumentsObject::kMapOffset));
- __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
- __ StoreP(r4, MemOperand(r7, JSStrictArgumentsObject::kPropertiesOffset));
- __ StoreP(r6, MemOperand(r7, JSStrictArgumentsObject::kElementsOffset));
- __ StoreP(r3, MemOperand(r7, JSStrictArgumentsObject::kLengthOffset));
- STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
- __ addi(r3, r7, Operand(kHeapObjectTag));
- __ Ret();
-
- // Fall back to %AllocateInNewSpace (if not too big).
- Label too_big_for_new_space;
- __ bind(&allocate);
- __ Cmpi(r10, Operand(kMaxRegularHeapObjectSize), r0);
- __ bgt(&too_big_for_new_space);
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(r10);
- __ Push(r3, r5, r10);
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- __ mr(r6, r3);
- __ Pop(r3, r5);
- }
- __ b(&done_allocate);
-
- // Fall back to %NewStrictArguments.
- __ bind(&too_big_for_new_space);
- __ push(r4);
- __ TailCallRuntime(Runtime::kNewStrictArguments);
-}
-
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
return ref0.address() - ref1.address();
}
diff --git a/deps/v8/src/ppc/codegen-ppc.cc b/deps/v8/src/ppc/codegen-ppc.cc
index bb365b4e63..212e6db11d 100644
--- a/deps/v8/src/ppc/codegen-ppc.cc
+++ b/deps/v8/src/ppc/codegen-ppc.cc
@@ -77,6 +77,9 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string,
Register index, Register result,
Label* call_runtime) {
+ Label indirect_string_loaded;
+ __ bind(&indirect_string_loaded);
+
// Fetch the instance type of the receiver into result register.
__ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
__ lbz(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
@@ -86,20 +89,26 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string,
__ andi(r0, result, Operand(kIsIndirectStringMask));
__ beq(&check_sequential, cr0);
- // Dispatch on the indirect string shape: slice or cons.
- Label cons_string;
- __ mov(ip, Operand(kSlicedNotConsMask));
- __ and_(r0, result, ip, SetRC);
- __ beq(&cons_string, cr0);
+ // Dispatch on the indirect string shape: slice or cons or thin.
+ Label cons_string, thin_string;
+ __ andi(ip, result, Operand(kStringRepresentationMask));
+ __ cmpi(ip, Operand(kConsStringTag));
+ __ beq(&cons_string);
+ __ cmpi(ip, Operand(kThinStringTag));
+ __ beq(&thin_string);
// Handle slices.
- Label indirect_string_loaded;
__ LoadP(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
__ LoadP(string, FieldMemOperand(string, SlicedString::kParentOffset));
__ SmiUntag(ip, result);
__ add(index, index, ip);
__ b(&indirect_string_loaded);
+ // Handle thin strings.
+ __ bind(&thin_string);
+ __ LoadP(string, FieldMemOperand(string, ThinString::kActualOffset));
+ __ b(&indirect_string_loaded);
+
// Handle cons strings.
// Check whether the right hand side is the empty string (i.e. if
// this is really a flat string in a cons string). If that is not
@@ -111,10 +120,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string,
__ bne(call_runtime);
// Get the first of the two strings and load its instance type.
__ LoadP(string, FieldMemOperand(string, ConsString::kFirstOffset));
-
- __ bind(&indirect_string_loaded);
- __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
- __ lbz(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+ __ b(&indirect_string_loaded);
// Distinguish sequential and external strings. Only these two string
// representations can reach here (slices and flat cons strings have been
diff --git a/deps/v8/src/ppc/constants-ppc.h b/deps/v8/src/ppc/constants-ppc.h
index daa52257d6..d131438139 100644
--- a/deps/v8/src/ppc/constants-ppc.h
+++ b/deps/v8/src/ppc/constants-ppc.h
@@ -91,258 +91,2489 @@ inline Condition CommuteCondition(Condition cond) {
// representing instructions from usual 32 bit values.
// Instruction objects are pointers to 32bit values, and provide methods to
// access the various ISA fields.
-typedef int32_t Instr;
-
-// Opcodes as defined in section 4.2 table 34 (32bit PowerPC)
-enum Opcode {
- TWI = 3 << 26, // Trap Word Immediate
- MULLI = 7 << 26, // Multiply Low Immediate
- SUBFIC = 8 << 26, // Subtract from Immediate Carrying
- CMPLI = 10 << 26, // Compare Logical Immediate
- CMPI = 11 << 26, // Compare Immediate
- ADDIC = 12 << 26, // Add Immediate Carrying
- ADDICx = 13 << 26, // Add Immediate Carrying and Record
- ADDI = 14 << 26, // Add Immediate
- ADDIS = 15 << 26, // Add Immediate Shifted
- BCX = 16 << 26, // Branch Conditional
- SC = 17 << 26, // System Call
- BX = 18 << 26, // Branch
- EXT1 = 19 << 26, // Extended code set 1
- RLWIMIX = 20 << 26, // Rotate Left Word Immediate then Mask Insert
- RLWINMX = 21 << 26, // Rotate Left Word Immediate then AND with Mask
- RLWNMX = 23 << 26, // Rotate Left Word then AND with Mask
- ORI = 24 << 26, // OR Immediate
- ORIS = 25 << 26, // OR Immediate Shifted
- XORI = 26 << 26, // XOR Immediate
- XORIS = 27 << 26, // XOR Immediate Shifted
- ANDIx = 28 << 26, // AND Immediate
- ANDISx = 29 << 26, // AND Immediate Shifted
- EXT5 = 30 << 26, // Extended code set 5 - 64bit only
- EXT2 = 31 << 26, // Extended code set 2
- LWZ = 32 << 26, // Load Word and Zero
- LWZU = 33 << 26, // Load Word with Zero Update
- LBZ = 34 << 26, // Load Byte and Zero
- LBZU = 35 << 26, // Load Byte and Zero with Update
- STW = 36 << 26, // Store
- STWU = 37 << 26, // Store Word with Update
- STB = 38 << 26, // Store Byte
- STBU = 39 << 26, // Store Byte with Update
- LHZ = 40 << 26, // Load Half and Zero
- LHZU = 41 << 26, // Load Half and Zero with Update
- LHA = 42 << 26, // Load Half Algebraic
- LHAU = 43 << 26, // Load Half Algebraic with Update
- STH = 44 << 26, // Store Half
- STHU = 45 << 26, // Store Half with Update
- LMW = 46 << 26, // Load Multiple Word
- STMW = 47 << 26, // Store Multiple Word
- LFS = 48 << 26, // Load Floating-Point Single
- LFSU = 49 << 26, // Load Floating-Point Single with Update
- LFD = 50 << 26, // Load Floating-Point Double
- LFDU = 51 << 26, // Load Floating-Point Double with Update
- STFS = 52 << 26, // Store Floating-Point Single
- STFSU = 53 << 26, // Store Floating-Point Single with Update
- STFD = 54 << 26, // Store Floating-Point Double
- STFDU = 55 << 26, // Store Floating-Point Double with Update
- LD = 58 << 26, // Load Double Word
- EXT3 = 59 << 26, // Extended code set 3
- EXT6 = 60 << 26, // Extended code set 6
- STD = 62 << 26, // Store Double Word (optionally with Update)
- EXT4 = 63 << 26 // Extended code set 4
-};
-
-// Bits 10-1
-enum OpcodeExt1 {
- MCRF = 0 << 1, // Move Condition Register Field
- BCLRX = 16 << 1, // Branch Conditional Link Register
- CRNOR = 33 << 1, // Condition Register NOR)
- RFI = 50 << 1, // Return from Interrupt
- CRANDC = 129 << 1, // Condition Register AND with Complement
- ISYNC = 150 << 1, // Instruction Synchronize
- CRXOR = 193 << 1, // Condition Register XOR
- CRNAND = 225 << 1, // Condition Register NAND
- CRAND = 257 << 1, // Condition Register AND
- CREQV = 289 << 1, // Condition Register Equivalent
- CRORC = 417 << 1, // Condition Register OR with Complement
- CROR = 449 << 1, // Condition Register OR
- BCCTRX = 528 << 1 // Branch Conditional to Count Register
-};
-
-// Bits 9-1 or 10-1
-enum OpcodeExt2 {
- CMP = 0 << 1,
- TW = 4 << 1,
- SUBFCX = 8 << 1,
- ADDCX = 10 << 1,
- MULHWUX = 11 << 1,
- ISEL = 15 << 1,
- MFCR = 19 << 1,
- LWARX = 20 << 1,
- LDX = 21 << 1,
- LWZX = 23 << 1, // load word zero w/ x-form
- SLWX = 24 << 1,
- CNTLZWX = 26 << 1,
- SLDX = 27 << 1,
- ANDX = 28 << 1,
- CMPL = 32 << 1,
- SUBFX = 40 << 1,
- MFVSRD = 51 << 1, // Move From VSR Doubleword
- LDUX = 53 << 1,
- DCBST = 54 << 1,
- LWZUX = 55 << 1, // load word zero w/ update x-form
- CNTLZDX = 58 << 1,
- ANDCX = 60 << 1,
- MULHWX = 75 << 1,
- DCBF = 86 << 1,
- LBZX = 87 << 1, // load byte zero w/ x-form
- NEGX = 104 << 1,
- MFVSRWZ = 115 << 1, // Move From VSR Word And Zero
- LBZUX = 119 << 1, // load byte zero w/ update x-form
- NORX = 124 << 1,
- SUBFEX = 136 << 1,
- ADDEX = 138 << 1,
- STDX = 149 << 1,
- STWX = 151 << 1, // store word w/ x-form
- MTVSRD = 179 << 1, // Move To VSR Doubleword
- STDUX = 181 << 1,
- STWUX = 183 << 1, // store word w/ update x-form
- /*
- MTCRF
- MTMSR
- STWCXx
- SUBFZEX
- */
- ADDZEX = 202 << 1, // Add to Zero Extended
- /*
- MTSR
- */
-
- MTVSRWA = 211 << 1, // Move To VSR Word Algebraic
- STBX = 215 << 1, // store byte w/ x-form
- MULLD = 233 << 1, // Multiply Low Double Word
- MULLW = 235 << 1, // Multiply Low Word
- MTVSRWZ = 243 << 1, // Move To VSR Word And Zero
- STBUX = 247 << 1, // store byte w/ update x-form
- MODUD = 265 << 1, // Modulo Unsigned Dword
- ADDX = 266 << 1, // Add
- MODUW = 267 << 1, // Modulo Unsigned Word
- LHZX = 279 << 1, // load half-word zero w/ x-form
- LHZUX = 311 << 1, // load half-word zero w/ update x-form
- LWAX = 341 << 1, // load word algebraic w/ x-form
- LHAX = 343 << 1, // load half-word algebraic w/ x-form
- LHAUX = 375 << 1, // load half-word algebraic w/ update x-form
- XORX = 316 << 1, // Exclusive OR
- MFSPR = 339 << 1, // Move from Special-Purpose-Register
- POPCNTW = 378 << 1, // Population Count Words
- STHX = 407 << 1, // store half-word w/ x-form
- ORC = 412 << 1, // Or with Complement
- STHUX = 439 << 1, // store half-word w/ update x-form
- ORX = 444 << 1, // Or
- DIVDU = 457 << 1, // Divide Double Word Unsigned
- DIVWU = 459 << 1, // Divide Word Unsigned
- MTSPR = 467 << 1, // Move to Special-Purpose-Register
- DIVD = 489 << 1, // Divide Double Word
- DIVW = 491 << 1, // Divide Word
- POPCNTD = 506 << 1, // Population Count Doubleword
-
- // Below represent bits 10-1 (any value >= 512)
- LDBRX = 532 << 1, // load double word byte reversed w/ x-form
- LWBRX = 534 << 1, // load word byte reversed w/ x-form
- LFSX = 535 << 1, // load float-single w/ x-form
- SRWX = 536 << 1, // Shift Right Word
- SRDX = 539 << 1, // Shift Right Double Word
- LFSUX = 567 << 1, // load float-single w/ update x-form
- SYNC = 598 << 1, // Synchronize
- LFDX = 599 << 1, // load float-double w/ x-form
- LFDUX = 631 << 1, // load float-double w/ update X-form
- STFSX = 663 << 1, // store float-single w/ x-form
- STFSUX = 695 << 1, // store float-single w/ update x-form
- STFDX = 727 << 1, // store float-double w/ x-form
- STFDUX = 759 << 1, // store float-double w/ update x-form
- MODSD = 777 << 1, // Modulo Signed Dword
- MODSW = 779 << 1, // Modulo Signed Word
- LHBRX = 790 << 1, // load half word byte reversed w/ x-form
- SRAW = 792 << 1, // Shift Right Algebraic Word
- SRAD = 794 << 1, // Shift Right Algebraic Double Word
- SRAWIX = 824 << 1, // Shift Right Algebraic Word Immediate
- SRADIX = 413 << 2, // Shift Right Algebraic Double Word Immediate
- EXTSH = 922 << 1, // Extend Sign Halfword
- EXTSB = 954 << 1, // Extend Sign Byte
- ICBI = 982 << 1, // Instruction Cache Block Invalidate
- EXTSW = 986 << 1 // Extend Sign Word
-};
-
-// Some use Bits 10-1 and other only 5-1 for the opcode
-enum OpcodeExt4 {
- // Bits 5-1
- FDIV = 18 << 1, // Floating Divide
- FSUB = 20 << 1, // Floating Subtract
- FADD = 21 << 1, // Floating Add
- FSQRT = 22 << 1, // Floating Square Root
- FSEL = 23 << 1, // Floating Select
- FMUL = 25 << 1, // Floating Multiply
- FMSUB = 28 << 1, // Floating Multiply-Subtract
- FMADD = 29 << 1, // Floating Multiply-Add
-
- // Bits 10-1
- FCMPU = 0 << 1, // Floating Compare Unordered
- FRSP = 12 << 1, // Floating-Point Rounding
- FCTIW = 14 << 1, // Floating Convert to Integer Word X-form
- FCTIWZ = 15 << 1, // Floating Convert to Integer Word with Round to Zero
- MTFSB1 = 38 << 1, // Move to FPSCR Bit 1
- FNEG = 40 << 1, // Floating Negate
- MCRFS = 64 << 1, // Move to Condition Register from FPSCR
- MTFSB0 = 70 << 1, // Move to FPSCR Bit 0
- FMR = 72 << 1, // Floating Move Register
- MTFSFI = 134 << 1, // Move to FPSCR Field Immediate
- FABS = 264 << 1, // Floating Absolute Value
- FRIN = 392 << 1, // Floating Round to Integer Nearest
- FRIZ = 424 << 1, // Floating Round to Integer Toward Zero
- FRIP = 456 << 1, // Floating Round to Integer Plus
- FRIM = 488 << 1, // Floating Round to Integer Minus
- MFFS = 583 << 1, // move from FPSCR x-form
- MTFSF = 711 << 1, // move to FPSCR fields XFL-form
- FCTID = 814 << 1, // Floating convert to integer doubleword
- FCTIDZ = 815 << 1, // ^^^ with round toward zero
- FCFID = 846 << 1, // Floating convert from integer doubleword
- FCTIDU = 942 << 1, // Floating convert to integer doubleword unsigned
- FCTIDUZ = 943 << 1, // ^^^ with round toward zero
- FCFIDU = 974 << 1 // Floating convert from integer doubleword unsigned
-};
-
-enum OpcodeExt5 {
- // Bits 4-2
- RLDICL = 0 << 1, // Rotate Left Double Word Immediate then Clear Left
- RLDICR = 2 << 1, // Rotate Left Double Word Immediate then Clear Right
- RLDIC = 4 << 1, // Rotate Left Double Word Immediate then Clear
- RLDIMI = 6 << 1, // Rotate Left Double Word Immediate then Mask Insert
- // Bits 4-1
- RLDCL = 8 << 1, // Rotate Left Double Word then Clear Left
- RLDCR = 9 << 1 // Rotate Left Double Word then Clear Right
-};
-
-// Bits 10-3
-#define XX3_OPCODE_LIST(V) \
- V(xsaddsp, XSADDSP, 0 << 3) /* VSX Scalar Add SP */ \
- V(xssubsp, XSSUBSP, 8 << 3) /* VSX Scalar Subtract SP */ \
- V(xsmulsp, XSMULSP, 16 << 3) /* VSX Scalar Multiply SP */ \
- V(xsdivsp, XSDIVSP, 24 << 3) /* VSX Scalar Divide SP */ \
- V(xsadddp, XSADDDP, 32 << 3) /* VSX Scalar Add DP */ \
- V(xssubdp, XSSUBDP, 40 << 3) /* VSX Scalar Subtract DP */ \
- V(xsmuldp, XSMULDP, 48 << 3) /* VSX Scalar Multiply DP */ \
- V(xsdivdp, XSDIVDP, 56 << 3) /* VSX Scalar Divide DP */ \
- V(xsmaxdp, XSMAXDP, 160 << 3) /* VSX Scalar Maximum DP */ \
- V(xsmindp, XSMINDP, 168 << 3) /* VSX Scalar Minimum DP */
-
-// Bits 10-2
-#define XX2_OPCODE_LIST(V) \
- V(XSCVDPSP, XSCVDPSP, 265 << 2) /* VSX Scalar Convert DP to SP */ \
- V(XSCVSPDP, XSCVSPDP, 329 << 2) /* VSX Scalar Convert SP to DP */
-
-enum OpcodeExt6 {
-#define DECLARE_OPCODES(name, opcode_name, opcode_value) \
+typedef uint32_t Instr;
+
+#define PPC_XX3_OPCODE_LIST(V) \
+ /* VSX Scalar Add Double-Precision */ \
+ V(xsadddp, XSADDDP, 0xF0000100) \
+ /* VSX Scalar Add Single-Precision */ \
+ V(xsaddsp, XSADDSP, 0xF0000000) \
+ /* VSX Scalar Compare Ordered Double-Precision */ \
+ V(xscmpodp, XSCMPODP, 0xF0000158) \
+ /* VSX Scalar Compare Unordered Double-Precision */ \
+ V(xscmpudp, XSCMPUDP, 0xF0000118) \
+ /* VSX Scalar Copy Sign Double-Precision */ \
+ V(xscpsgndp, XSCPSGNDP, 0xF0000580) \
+ /* VSX Scalar Divide Double-Precision */ \
+ V(xsdivdp, XSDIVDP, 0xF00001C0) \
+ /* VSX Scalar Divide Single-Precision */ \
+ V(xsdivsp, XSDIVSP, 0xF00000C0) \
+ /* VSX Scalar Multiply-Add Type-A Double-Precision */ \
+ V(xsmaddadp, XSMADDADP, 0xF0000108) \
+ /* VSX Scalar Multiply-Add Type-A Single-Precision */ \
+ V(xsmaddasp, XSMADDASP, 0xF0000008) \
+ /* VSX Scalar Multiply-Add Type-M Double-Precision */ \
+ V(xsmaddmdp, XSMADDMDP, 0xF0000148) \
+ /* VSX Scalar Multiply-Add Type-M Single-Precision */ \
+ V(xsmaddmsp, XSMADDMSP, 0xF0000048) \
+ /* VSX Scalar Maximum Double-Precision */ \
+ V(xsmaxdp, XSMAXDP, 0xF0000500) \
+ /* VSX Scalar Minimum Double-Precision */ \
+ V(xsmindp, XSMINDP, 0xF0000540) \
+ /* VSX Scalar Multiply-Subtract Type-A Double-Precision */ \
+ V(xsmsubadp, XSMSUBADP, 0xF0000188) \
+ /* VSX Scalar Multiply-Subtract Type-A Single-Precision */ \
+ V(xsmsubasp, XSMSUBASP, 0xF0000088) \
+ /* VSX Scalar Multiply-Subtract Type-M Double-Precision */ \
+ V(xsmsubmdp, XSMSUBMDP, 0xF00001C8) \
+ /* VSX Scalar Multiply-Subtract Type-M Single-Precision */ \
+ V(xsmsubmsp, XSMSUBMSP, 0xF00000C8) \
+ /* VSX Scalar Multiply Double-Precision */ \
+ V(xsmuldp, XSMULDP, 0xF0000180) \
+ /* VSX Scalar Multiply Single-Precision */ \
+ V(xsmulsp, XSMULSP, 0xF0000080) \
+ /* VSX Scalar Negative Multiply-Add Type-A Double-Precision */ \
+ V(xsnmaddadp, XSNMADDADP, 0xF0000508) \
+ /* VSX Scalar Negative Multiply-Add Type-A Single-Precision */ \
+ V(xsnmaddasp, XSNMADDASP, 0xF0000408) \
+ /* VSX Scalar Negative Multiply-Add Type-M Double-Precision */ \
+ V(xsnmaddmdp, XSNMADDMDP, 0xF0000548) \
+ /* VSX Scalar Negative Multiply-Add Type-M Single-Precision */ \
+ V(xsnmaddmsp, XSNMADDMSP, 0xF0000448) \
+ /* VSX Scalar Negative Multiply-Subtract Type-A Double-Precision */ \
+ V(xsnmsubadp, XSNMSUBADP, 0xF0000588) \
+ /* VSX Scalar Negative Multiply-Subtract Type-A Single-Precision */ \
+ V(xsnmsubasp, XSNMSUBASP, 0xF0000488) \
+ /* VSX Scalar Negative Multiply-Subtract Type-M Double-Precision */ \
+ V(xsnmsubmdp, XSNMSUBMDP, 0xF00005C8) \
+ /* VSX Scalar Negative Multiply-Subtract Type-M Single-Precision */ \
+ V(xsnmsubmsp, XSNMSUBMSP, 0xF00004C8) \
+ /* VSX Scalar Reciprocal Estimate Double-Precision */ \
+ V(xsredp, XSREDP, 0xF0000168) \
+ /* VSX Scalar Reciprocal Estimate Single-Precision */ \
+ V(xsresp, XSRESP, 0xF0000068) \
+ /* VSX Scalar Subtract Double-Precision */ \
+ V(xssubdp, XSSUBDP, 0xF0000140) \
+ /* VSX Scalar Subtract Single-Precision */ \
+ V(xssubsp, XSSUBSP, 0xF0000040) \
+ /* VSX Scalar Test for software Divide Double-Precision */ \
+ V(xstdivdp, XSTDIVDP, 0xF00001E8) \
+ /* VSX Vector Add Double-Precision */ \
+ V(xvadddp, XVADDDP, 0xF0000300) \
+ /* VSX Vector Add Single-Precision */ \
+ V(xvaddsp, XVADDSP, 0xF0000200) \
+ /* VSX Vector Compare Equal To Double-Precision */ \
+ V(xvcmpeqdp, XVCMPEQDP, 0xF0000318) \
+ /* VSX Vector Compare Equal To Double-Precision & record CR6 */ \
+ V(xvcmpeqdpx, XVCMPEQDPx, 0xF0000718) \
+ /* VSX Vector Compare Equal To Single-Precision */ \
+ V(xvcmpeqsp, XVCMPEQSP, 0xF0000218) \
+ /* VSX Vector Compare Equal To Single-Precision & record CR6 */ \
+ V(xvcmpeqspx, XVCMPEQSPx, 0xF0000618) \
+ /* VSX Vector Compare Greater Than or Equal To Double-Precision */ \
+ V(xvcmpgedp, XVCMPGEDP, 0xF0000398) \
+ /* VSX Vector Compare Greater Than or Equal To Double-Precision & record */ \
+ /* CR6 */ \
+ V(xvcmpgedpx, XVCMPGEDPx, 0xF0000798) \
+ /* VSX Vector Compare Greater Than or Equal To Single-Precision */ \
+ V(xvcmpgesp, XVCMPGESP, 0xF0000298) \
+ /* VSX Vector Compare Greater Than or Equal To Single-Precision & record */ \
+ /* CR6 */ \
+ V(xvcmpgespx, XVCMPGESPx, 0xF0000698) \
+ /* VSX Vector Compare Greater Than Double-Precision */ \
+ V(xvcmpgtdp, XVCMPGTDP, 0xF0000358) \
+ /* VSX Vector Compare Greater Than Double-Precision & record CR6 */ \
+ V(xvcmpgtdpx, XVCMPGTDPx, 0xF0000758) \
+ /* VSX Vector Compare Greater Than Single-Precision */ \
+ V(xvcmpgtsp, XVCMPGTSP, 0xF0000258) \
+ /* VSX Vector Compare Greater Than Single-Precision & record CR6 */ \
+ V(xvcmpgtspx, XVCMPGTSPx, 0xF0000658) \
+ /* VSX Vector Copy Sign Double-Precision */ \
+ V(xvcpsgndp, XVCPSGNDP, 0xF0000780) \
+ /* VSX Vector Copy Sign Single-Precision */ \
+ V(xvcpsgnsp, XVCPSGNSP, 0xF0000680) \
+ /* VSX Vector Divide Double-Precision */ \
+ V(xvdivdp, XVDIVDP, 0xF00003C0) \
+ /* VSX Vector Divide Single-Precision */ \
+ V(xvdivsp, XVDIVSP, 0xF00002C0) \
+ /* VSX Vector Multiply-Add Type-A Double-Precision */ \
+ V(xvmaddadp, XVMADDADP, 0xF0000308) \
+ /* VSX Vector Multiply-Add Type-A Single-Precision */ \
+ V(xvmaddasp, XVMADDASP, 0xF0000208) \
+ /* VSX Vector Multiply-Add Type-M Double-Precision */ \
+ V(xvmaddmdp, XVMADDMDP, 0xF0000348) \
+ /* VSX Vector Multiply-Add Type-M Single-Precision */ \
+ V(xvmaddmsp, XVMADDMSP, 0xF0000248) \
+ /* VSX Vector Maximum Double-Precision */ \
+ V(xvmaxdp, XVMAXDP, 0xF0000700) \
+ /* VSX Vector Maximum Single-Precision */ \
+ V(xvmaxsp, XVMAXSP, 0xF0000600) \
+ /* VSX Vector Minimum Double-Precision */ \
+ V(xvmindp, XVMINDP, 0xF0000740) \
+ /* VSX Vector Minimum Single-Precision */ \
+ V(xvminsp, XVMINSP, 0xF0000640) \
+ /* VSX Vector Multiply-Subtract Type-A Double-Precision */ \
+ V(xvmsubadp, XVMSUBADP, 0xF0000388) \
+ /* VSX Vector Multiply-Subtract Type-A Single-Precision */ \
+ V(xvmsubasp, XVMSUBASP, 0xF0000288) \
+ /* VSX Vector Multiply-Subtract Type-M Double-Precision */ \
+ V(xvmsubmdp, XVMSUBMDP, 0xF00003C8) \
+ /* VSX Vector Multiply-Subtract Type-M Single-Precision */ \
+ V(xvmsubmsp, XVMSUBMSP, 0xF00002C8) \
+ /* VSX Vector Multiply Double-Precision */ \
+ V(xvmuldp, XVMULDP, 0xF0000380) \
+ /* VSX Vector Multiply Single-Precision */ \
+ V(xvmulsp, XVMULSP, 0xF0000280) \
+ /* VSX Vector Negative Multiply-Add Type-A Double-Precision */ \
+ V(xvnmaddadp, XVNMADDADP, 0xF0000708) \
+ /* VSX Vector Negative Multiply-Add Type-A Single-Precision */ \
+ V(xvnmaddasp, XVNMADDASP, 0xF0000608) \
+ /* VSX Vector Negative Multiply-Add Type-M Double-Precision */ \
+ V(xvnmaddmdp, XVNMADDMDP, 0xF0000748) \
+ /* VSX Vector Negative Multiply-Add Type-M Single-Precision */ \
+ V(xvnmaddmsp, XVNMADDMSP, 0xF0000648) \
+ /* VSX Vector Negative Multiply-Subtract Type-A Double-Precision */ \
+ V(xvnmsubadp, XVNMSUBADP, 0xF0000788) \
+ /* VSX Vector Negative Multiply-Subtract Type-A Single-Precision */ \
+ V(xvnmsubasp, XVNMSUBASP, 0xF0000688) \
+ /* VSX Vector Negative Multiply-Subtract Type-M Double-Precision */ \
+ V(xvnmsubmdp, XVNMSUBMDP, 0xF00007C8) \
+ /* VSX Vector Negative Multiply-Subtract Type-M Single-Precision */ \
+ V(xvnmsubmsp, XVNMSUBMSP, 0xF00006C8) \
+ /* VSX Vector Reciprocal Estimate Double-Precision */ \
+ V(xvredp, XVREDP, 0xF0000368) \
+ /* VSX Vector Reciprocal Estimate Single-Precision */ \
+ V(xvresp, XVRESP, 0xF0000268) \
+ /* VSX Vector Subtract Double-Precision */ \
+ V(xvsubdp, XVSUBDP, 0xF0000340) \
+ /* VSX Vector Subtract Single-Precision */ \
+ V(xvsubsp, XVSUBSP, 0xF0000240) \
+ /* VSX Vector Test for software Divide Double-Precision */ \
+ V(xvtdivdp, XVTDIVDP, 0xF00003E8) \
+ /* VSX Vector Test for software Divide Single-Precision */ \
+ V(xvtdivsp, XVTDIVSP, 0xF00002E8) \
+ /* VSX Logical AND */ \
+ V(xxland, XXLAND, 0xF0000410) \
+ /* VSX Logical AND with Complement */ \
+ V(xxlandc, XXLANDC, 0xF0000450) \
+ /* VSX Logical Equivalence */ \
+ V(xxleqv, XXLEQV, 0xF00005D0) \
+ /* VSX Logical NAND */ \
+ V(xxlnand, XXLNAND, 0xF0000590) \
+ /* VSX Logical NOR */ \
+ V(xxlnor, XXLNOR, 0xF0000510) \
+ /* VSX Logical OR */ \
+ V(xxlor, XXLOR, 0xF0000490) \
+ /* VSX Logical OR with Complement */ \
+ V(xxlorc, XXLORC, 0xF0000550) \
+ /* VSX Logical XOR */ \
+ V(xxlxor, XXLXOR, 0xF00004D0) \
+ /* VSX Merge High Word */ \
+ V(xxmrghw, XXMRGHW, 0xF0000090) \
+ /* VSX Merge Low Word */ \
+ V(xxmrglw, XXMRGLW, 0xF0000190) \
+ /* VSX Permute Doubleword Immediate */ \
+ V(xxpermdi, XXPERMDI, 0xF0000050) \
+ /* VSX Shift Left Double by Word Immediate */ \
+ V(xxsldwi, XXSLDWI, 0xF0000010) \
+ /* VSX Splat Word */ \
+ V(xxspltw, XXSPLTW, 0xF0000290)
+
+#define PPC_Z23_OPCODE_LIST(V) \
+ /* Decimal Quantize */ \
+ V(dqua, DQUA, 0xEC000006) \
+ /* Decimal Quantize Immediate */ \
+ V(dquai, DQUAI, 0xEC000086) \
+ /* Decimal Quantize Immediate Quad */ \
+ V(dquaiq, DQUAIQ, 0xFC000086) \
+ /* Decimal Quantize Quad */ \
+ V(dquaq, DQUAQ, 0xFC000006) \
+ /* Decimal Floating Round To FP Integer Without Inexact */ \
+ V(drintn, DRINTN, 0xEC0001C6) \
+ /* Decimal Floating Round To FP Integer Without Inexact Quad */ \
+ V(drintnq, DRINTNQ, 0xFC0001C6) \
+ /* Decimal Floating Round To FP Integer With Inexact */ \
+ V(drintx, DRINTX, 0xEC0000C6) \
+ /* Decimal Floating Round To FP Integer With Inexact Quad */ \
+ V(drintxq, DRINTXQ, 0xFC0000C6) \
+ /* Decimal Floating Reround */ \
+ V(drrnd, DRRND, 0xEC000046) \
+ /* Decimal Floating Reround Quad */ \
+ V(drrndq, DRRNDQ, 0xFC000046)
+
+#define PPC_Z22_OPCODE_LIST(V) \
+ /* Decimal Floating Shift Coefficient Left Immediate */ \
+ V(dscli, DSCLI, 0xEC000084) \
+ /* Decimal Floating Shift Coefficient Left Immediate Quad */ \
+ V(dscliq, DSCLIQ, 0xFC000084) \
+ /* Decimal Floating Shift Coefficient Right Immediate */ \
+ V(dscri, DSCRI, 0xEC0000C4) \
+ /* Decimal Floating Shift Coefficient Right Immediate Quad */ \
+ V(dscriq, DSCRIQ, 0xFC0000C4) \
+ /* Decimal Floating Test Data Class */ \
+ V(dtstdc, DTSTDC, 0xEC000184) \
+ /* Decimal Floating Test Data Class Quad */ \
+ V(dtstdcq, DTSTDCQ, 0xFC000184) \
+ /* Decimal Floating Test Data Group */ \
+ V(dtstdg, DTSTDG, 0xEC0001C4) \
+ /* Decimal Floating Test Data Group Quad */ \
+ V(dtstdgq, DTSTDGQ, 0xFC0001C4)
+
+#define PPC_XX2_OPCODE_LIST(V) \
+ /* Move To VSR Doubleword */ \
+ V(mtvsrd, MTVSRD, 0x7C000166) \
+ /* Move To VSR Word Algebraic */ \
+ V(mtvsrwa, MTVSRWA, 0x7C0001A6) \
+ /* Move To VSR Word and Zero */ \
+ V(mtvsrwz, MTVSRWZ, 0x7C0001E6) \
+ /* VSX Scalar Absolute Value Double-Precision */ \
+ V(xsabsdp, XSABSDP, 0xF0000564) \
+ /* VSX Scalar Convert Double-Precision to Single-Precision */ \
+ V(xscvdpsp, XSCVDPSP, 0xF0000424) \
+ /* VSX Scalar Convert Double-Precision to Single-Precision format Non- */ \
+ /* signalling */ \
+ V(xscvdpspn, XSCVDPSPN, 0xF000042C) \
+ /* VSX Scalar Convert Double-Precision to Signed Fixed-Point Doubleword */ \
+ /* Saturate */ \
+ V(xscvdpsxds, XSCVDPSXDS, 0xF0000560) \
+ /* VSX Scalar Convert Double-Precision to Signed Fixed-Point Word */ \
+ /* Saturate */ \
+ V(xscvdpsxws, XSCVDPSXWS, 0xF0000160) \
+ /* VSX Scalar Convert Double-Precision to Unsigned Fixed-Point */ \
+ /* Doubleword Saturate */ \
+ V(xscvdpuxds, XSCVDPUXDS, 0xF0000520) \
+ /* VSX Scalar Convert Double-Precision to Unsigned Fixed-Point Word */ \
+ /* Saturate */ \
+ V(xscvdpuxws, XSCVDPUXWS, 0xF0000120) \
+ /* VSX Scalar Convert Single-Precision to Double-Precision (p=1) */ \
+ V(xscvspdp, XSCVSPDP, 0xF0000524) \
+ /* Scalar Convert Single-Precision to Double-Precision format Non- */ \
+ /* signalling */ \
+ V(xscvspdpn, XSCVSPDPN, 0xF000052C) \
+ /* VSX Scalar Convert Signed Fixed-Point Doubleword to Double-Precision */ \
+ V(xscvsxddp, XSCVSXDDP, 0xF00005E0) \
+ /* VSX Scalar Convert Signed Fixed-Point Doubleword to Single-Precision */ \
+ V(xscvsxdsp, XSCVSXDSP, 0xF00004E0) \
+ /* VSX Scalar Convert Unsigned Fixed-Point Doubleword to Double- */ \
+ /* Precision */ \
+ V(xscvuxddp, XSCVUXDDP, 0xF00005A0) \
+ /* VSX Scalar Convert Unsigned Fixed-Point Doubleword to Single- */ \
+ /* Precision */ \
+ V(xscvuxdsp, XSCVUXDSP, 0xF00004A0) \
+ /* VSX Scalar Negative Absolute Value Double-Precision */ \
+ V(xsnabsdp, XSNABSDP, 0xF00005A4) \
+ /* VSX Scalar Negate Double-Precision */ \
+ V(xsnegdp, XSNEGDP, 0xF00005E4) \
+ /* VSX Scalar Round to Double-Precision Integer */ \
+ V(xsrdpi, XSRDPI, 0xF0000124) \
+ /* VSX Scalar Round to Double-Precision Integer using Current rounding */ \
+ /* mode */ \
+ V(xsrdpic, XSRDPIC, 0xF00001AC) \
+ /* VSX Scalar Round to Double-Precision Integer toward -Infinity */ \
+ V(xsrdpim, XSRDPIM, 0xF00001E4) \
+ /* VSX Scalar Round to Double-Precision Integer toward +Infinity */ \
+ V(xsrdpip, XSRDPIP, 0xF00001A4) \
+ /* VSX Scalar Round to Double-Precision Integer toward Zero */ \
+ V(xsrdpiz, XSRDPIZ, 0xF0000164) \
+ /* VSX Scalar Round to Single-Precision */ \
+ V(xsrsp, XSRSP, 0xF0000464) \
+ /* VSX Scalar Reciprocal Square Root Estimate Double-Precision */ \
+ V(xsrsqrtedp, XSRSQRTEDP, 0xF0000128) \
+ /* VSX Scalar Reciprocal Square Root Estimate Single-Precision */ \
+ V(xsrsqrtesp, XSRSQRTESP, 0xF0000028) \
+ /* VSX Scalar Square Root Double-Precision */ \
+ V(xssqrtdp, XSSQRTDP, 0xF000012C) \
+ /* VSX Scalar Square Root Single-Precision */ \
+ V(xssqrtsp, XSSQRTSP, 0xF000002C) \
+ /* VSX Scalar Test for software Square Root Double-Precision */ \
+ V(xstsqrtdp, XSTSQRTDP, 0xF00001A8) \
+ /* VSX Vector Absolute Value Double-Precision */ \
+ V(xvabsdp, XVABSDP, 0xF0000764) \
+ /* VSX Vector Absolute Value Single-Precision */ \
+ V(xvabssp, XVABSSP, 0xF0000664) \
+ /* VSX Vector Convert Double-Precision to Single-Precision */ \
+ V(xvcvdpsp, XVCVDPSP, 0xF0000624) \
+ /* VSX Vector Convert Double-Precision to Signed Fixed-Point Doubleword */ \
+ /* Saturate */ \
+ V(xvcvdpsxds, XVCVDPSXDS, 0xF0000760) \
+ /* VSX Vector Convert Double-Precision to Signed Fixed-Point Word */ \
+ /* Saturate */ \
+ V(xvcvdpsxws, XVCVDPSXWS, 0xF0000360) \
+ /* VSX Vector Convert Double-Precision to Unsigned Fixed-Point */ \
+ /* Doubleword Saturate */ \
+ V(xvcvdpuxds, XVCVDPUXDS, 0xF0000720) \
+ /* VSX Vector Convert Double-Precision to Unsigned Fixed-Point Word */ \
+ /* Saturate */ \
+ V(xvcvdpuxws, XVCVDPUXWS, 0xF0000320) \
+ /* VSX Vector Convert Single-Precision to Double-Precision */ \
+ V(xvcvspdp, XVCVSPDP, 0xF0000724) \
+ /* VSX Vector Convert Single-Precision to Signed Fixed-Point Doubleword */ \
+ /* Saturate */ \
+ V(xvcvspsxds, XVCVSPSXDS, 0xF0000660) \
+ /* VSX Vector Convert Single-Precision to Signed Fixed-Point Word */ \
+ /* Saturate */ \
+ V(xvcvspsxws, XVCVSPSXWS, 0xF0000260) \
+ /* VSX Vector Convert Single-Precision to Unsigned Fixed-Point */ \
+ /* Doubleword Saturate */ \
+ V(xvcvspuxds, XVCVSPUXDS, 0xF0000620) \
+ /* VSX Vector Convert Single-Precision to Unsigned Fixed-Point Word */ \
+ /* Saturate */ \
+ V(xvcvspuxws, XVCVSPUXWS, 0xF0000220) \
+ /* VSX Vector Convert Signed Fixed-Point Doubleword to Double-Precision */ \
+ V(xvcvsxddp, XVCVSXDDP, 0xF00007E0) \
+ /* VSX Vector Convert Signed Fixed-Point Doubleword to Single-Precision */ \
+ V(xvcvsxdsp, XVCVSXDSP, 0xF00006E0) \
+ /* VSX Vector Convert Signed Fixed-Point Word to Double-Precision */ \
+ V(xvcvsxwdp, XVCVSXWDP, 0xF00003E0) \
+ /* VSX Vector Convert Signed Fixed-Point Word to Single-Precision */ \
+ V(xvcvsxwsp, XVCVSXWSP, 0xF00002E0) \
+ /* VSX Vector Convert Unsigned Fixed-Point Doubleword to Double- */ \
+ /* Precision */ \
+ V(xvcvuxddp, XVCVUXDDP, 0xF00007A0) \
+ /* VSX Vector Convert Unsigned Fixed-Point Doubleword to Single- */ \
+ /* Precision */ \
+ V(xvcvuxdsp, XVCVUXDSP, 0xF00006A0) \
+ /* VSX Vector Convert Unsigned Fixed-Point Word to Double-Precision */ \
+ V(xvcvuxwdp, XVCVUXWDP, 0xF00003A0) \
+ /* VSX Vector Convert Unsigned Fixed-Point Word to Single-Precision */ \
+ V(xvcvuxwsp, XVCVUXWSP, 0xF00002A0) \
+ /* VSX Vector Negative Absolute Value Double-Precision */ \
+ V(xvnabsdp, XVNABSDP, 0xF00007A4) \
+ /* VSX Vector Negative Absolute Value Single-Precision */ \
+ V(xvnabssp, XVNABSSP, 0xF00006A4) \
+ /* VSX Vector Negate Double-Precision */ \
+ V(xvnegdp, XVNEGDP, 0xF00007E4) \
+ /* VSX Vector Negate Single-Precision */ \
+ V(xvnegsp, XVNEGSP, 0xF00006E4) \
+ /* VSX Vector Round to Double-Precision Integer */ \
+ V(xvrdpi, XVRDPI, 0xF0000324) \
+ /* VSX Vector Round to Double-Precision Integer using Current rounding */ \
+ /* mode */ \
+ V(xvrdpic, XVRDPIC, 0xF00003AC) \
+ /* VSX Vector Round to Double-Precision Integer toward -Infinity */ \
+ V(xvrdpim, XVRDPIM, 0xF00003E4) \
+ /* VSX Vector Round to Double-Precision Integer toward +Infinity */ \
+ V(xvrdpip, XVRDPIP, 0xF00003A4) \
+ /* VSX Vector Round to Double-Precision Integer toward Zero */ \
+ V(xvrdpiz, XVRDPIZ, 0xF0000364) \
+ /* VSX Vector Round to Single-Precision Integer */ \
+ V(xvrspi, XVRSPI, 0xF0000224) \
+ /* VSX Vector Round to Single-Precision Integer using Current rounding */ \
+ /* mode */ \
+ V(xvrspic, XVRSPIC, 0xF00002AC) \
+ /* VSX Vector Round to Single-Precision Integer toward -Infinity */ \
+ V(xvrspim, XVRSPIM, 0xF00002E4) \
+ /* VSX Vector Round to Single-Precision Integer toward +Infinity */ \
+ V(xvrspip, XVRSPIP, 0xF00002A4) \
+ /* VSX Vector Round to Single-Precision Integer toward Zero */ \
+ V(xvrspiz, XVRSPIZ, 0xF0000264) \
+ /* VSX Vector Reciprocal Square Root Estimate Double-Precision */ \
+ V(xvrsqrtedp, XVRSQRTEDP, 0xF0000328) \
+ /* VSX Vector Reciprocal Square Root Estimate Single-Precision */ \
+ V(xvrsqrtesp, XVRSQRTESP, 0xF0000228) \
+ /* VSX Vector Square Root Double-Precision */ \
+ V(xvsqrtdp, XVSQRTDP, 0xF000032C) \
+ /* VSX Vector Square Root Single-Precision */ \
+ V(xvsqrtsp, XVSQRTSP, 0xF000022C) \
+ /* VSX Vector Test for software Square Root Double-Precision */ \
+ V(xvtsqrtdp, XVTSQRTDP, 0xF00003A8) \
+ /* VSX Vector Test for software Square Root Single-Precision */ \
+ V(xvtsqrtsp, XVTSQRTSP, 0xF00002A8)
+
+#define PPC_EVX_OPCODE_LIST(V) \
+ /* Vector Load Double Word into Double Word by External PID Indexed */ \
+ V(evlddepx, EVLDDEPX, 0x7C00063E) \
+ /* Vector Store Double of Double by External PID Indexed */ \
+ V(evstddepx, EVSTDDEPX, 0x7C00073E) \
+ /* Bit Reversed Increment */ \
+ V(brinc, BRINC, 0x1000020F) \
+ /* Vector Absolute Value */ \
+ V(evabs, EVABS, 0x10000208) \
+ /* Vector Add Immediate Word */ \
+ V(evaddiw, EVADDIW, 0x10000202) \
+ /* Vector Add Signed, Modulo, Integer to Accumulator Word */ \
+ V(evaddsmiaaw, EVADDSMIAAW, 0x100004C9) \
+ /* Vector Add Signed, Saturate, Integer to Accumulator Word */ \
+ V(evaddssiaaw, EVADDSSIAAW, 0x100004C1) \
+ /* Vector Add Unsigned, Modulo, Integer to Accumulator Word */ \
+ V(evaddumiaaw, EVADDUMIAAW, 0x100004C8) \
+ /* Vector Add Unsigned, Saturate, Integer to Accumulator Word */ \
+ V(evaddusiaaw, EVADDUSIAAW, 0x100004C0) \
+ /* Vector Add Word */ \
+ V(evaddw, EVADDW, 0x10000200) \
+ /* Vector AND */ \
+ V(evand, EVAND, 0x10000211) \
+ /* Vector AND with Complement */ \
+ V(evandc, EVANDC, 0x10000212) \
+ /* Vector Compare Equal */ \
+ V(evcmpeq, EVCMPEQ, 0x10000234) \
+ /* Vector Compare Greater Than Signed */ \
+ V(evcmpgts, EVCMPGTS, 0x10000231) \
+ /* Vector Compare Greater Than Unsigned */ \
+ V(evcmpgtu, EVCMPGTU, 0x10000230) \
+ /* Vector Compare Less Than Signed */ \
+ V(evcmplts, EVCMPLTS, 0x10000233) \
+ /* Vector Compare Less Than Unsigned */ \
+ V(evcmpltu, EVCMPLTU, 0x10000232) \
+ /* Vector Count Leading Signed Bits Word */ \
+ V(evcntlsw, EVCNTLSW, 0x1000020E) \
+ /* Vector Count Leading Zeros Word */ \
+ V(evcntlzw, EVCNTLZW, 0x1000020D) \
+ /* Vector Divide Word Signed */ \
+ V(evdivws, EVDIVWS, 0x100004C6) \
+ /* Vector Divide Word Unsigned */ \
+ V(evdivwu, EVDIVWU, 0x100004C7) \
+ /* Vector Equivalent */ \
+ V(eveqv, EVEQV, 0x10000219) \
+ /* Vector Extend Sign Byte */ \
+ V(evextsb, EVEXTSB, 0x1000020A) \
+ /* Vector Extend Sign Half Word */ \
+ V(evextsh, EVEXTSH, 0x1000020B) \
+ /* Vector Load Double Word into Double Word */ \
+ V(evldd, EVLDD, 0x10000301) \
+ /* Vector Load Double Word into Double Word Indexed */ \
+ V(evlddx, EVLDDX, 0x10000300) \
+ /* Vector Load Double into Four Half Words */ \
+ V(evldh, EVLDH, 0x10000305) \
+ /* Vector Load Double into Four Half Words Indexed */ \
+ V(evldhx, EVLDHX, 0x10000304) \
+ /* Vector Load Double into Two Words */ \
+ V(evldw, EVLDW, 0x10000303) \
+ /* Vector Load Double into Two Words Indexed */ \
+ V(evldwx, EVLDWX, 0x10000302) \
+ /* Vector Load Half Word into Half Words Even and Splat */ \
+ V(evlhhesplat, EVLHHESPLAT, 0x10000309) \
+ /* Vector Load Half Word into Half Words Even and Splat Indexed */ \
+ V(evlhhesplatx, EVLHHESPLATX, 0x10000308) \
+ /* Vector Load Half Word into Half Word Odd Signed and Splat */ \
+ V(evlhhossplat, EVLHHOSSPLAT, 0x1000030F) \
+ /* Vector Load Half Word into Half Word Odd Signed and Splat Indexed */ \
+ V(evlhhossplatx, EVLHHOSSPLATX, 0x1000030E) \
+ /* Vector Load Half Word into Half Word Odd Unsigned and Splat */ \
+ V(evlhhousplat, EVLHHOUSPLAT, 0x1000030D) \
+ /* Vector Load Half Word into Half Word Odd Unsigned and Splat Indexed */ \
+ V(evlhhousplatx, EVLHHOUSPLATX, 0x1000030C) \
+ /* Vector Load Word into Two Half Words Even */ \
+ V(evlwhe, EVLWHE, 0x10000311) \
+ /* Vector Load Word into Two Half Words Even Indexed */ \
+ V(evlwhex, EVLWHEX, 0x10000310) \
+ /* Vector Load Word into Two Half Words Odd Signed (with sign extension) */ \
+ V(evlwhos, EVLWHOS, 0x10000317) \
+ /* Vector Load Word into Two Half Words Odd Signed Indexed (with sign */ \
+ /* extension) */ \
+ V(evlwhosx, EVLWHOSX, 0x10000316) \
+ /* Vector Load Word into Two Half Words Odd Unsigned (zero-extended) */ \
+ V(evlwhou, EVLWHOU, 0x10000315) \
+ /* Vector Load Word into Two Half Words Odd Unsigned Indexed (zero- */ \
+ /* extended) */ \
+ V(evlwhoux, EVLWHOUX, 0x10000314) \
+ /* Vector Load Word into Two Half Words and Splat */ \
+ V(evlwhsplat, EVLWHSPLAT, 0x1000031D) \
+ /* Vector Load Word into Two Half Words and Splat Indexed */ \
+ V(evlwhsplatx, EVLWHSPLATX, 0x1000031C) \
+ /* Vector Load Word into Word and Splat */ \
+ V(evlwwsplat, EVLWWSPLAT, 0x10000319) \
+ /* Vector Load Word into Word and Splat Indexed */ \
+ V(evlwwsplatx, EVLWWSPLATX, 0x10000318) \
+ /* Vector Merge High */ \
+ V(evmergehi, EVMERGEHI, 0x1000022C) \
+ /* Vector Merge High/Low */ \
+ V(evmergehilo, EVMERGEHILO, 0x1000022E) \
+ /* Vector Merge Low */ \
+ V(evmergelo, EVMERGELO, 0x1000022D) \
+ /* Vector Merge Low/High */ \
+ V(evmergelohi, EVMERGELOHI, 0x1000022F) \
+ /* Vector Multiply Half Words, Even, Guarded, Signed, Modulo, Fractional */ \
+ /* and Accumulate */ \
+ V(evmhegsmfaa, EVMHEGSMFAA, 0x1000052B) \
+ /* Vector Multiply Half Words, Even, Guarded, Signed, Modulo, Fractional */ \
+ /* and Accumulate Negative */ \
+ V(evmhegsmfan, EVMHEGSMFAN, 0x100005AB) \
+ /* Vector Multiply Half Words, Even, Guarded, Signed, Modulo, Integer */ \
+ /* and Accumulate */ \
+ V(evmhegsmiaa, EVMHEGSMIAA, 0x10000529) \
+ /* Vector Multiply Half Words, Even, Guarded, Signed, Modulo, Integer */ \
+ /* and Accumulate Negative */ \
+ V(evmhegsmian, EVMHEGSMIAN, 0x100005A9) \
+ /* Vector Multiply Half Words, Even, Guarded, Unsigned, Modulo, Integer */ \
+ /* and Accumulate */ \
+ V(evmhegumiaa, EVMHEGUMIAA, 0x10000528) \
+ /* Vector Multiply Half Words, Even, Guarded, Unsigned, Modulo, Integer */ \
+ /* and Accumulate Negative */ \
+ V(evmhegumian, EVMHEGUMIAN, 0x100005A8) \
+ /* Vector Multiply Half Words, Even, Signed, Modulo, Fractional */ \
+ V(evmhesmf, EVMHESMF, 0x1000040B) \
+ /* Vector Multiply Half Words, Even, Signed, Modulo, Fractional to */ \
+ /* Accumulator */ \
+ V(evmhesmfa, EVMHESMFA, 0x1000042B) \
+ /* Vector Multiply Half Words, Even, Signed, Modulo, Fractional and */ \
+ /* Accumulate into Words */ \
+ V(evmhesmfaaw, EVMHESMFAAW, 0x1000050B) \
+ /* Vector Multiply Half Words, Even, Signed, Modulo, Fractional and */ \
+ /* Accumulate Negative into Words */ \
+ V(evmhesmfanw, EVMHESMFANW, 0x1000058B) \
+ /* Vector Multiply Half Words, Even, Signed, Modulo, Integer */ \
+ V(evmhesmi, EVMHESMI, 0x10000409) \
+ /* Vector Multiply Half Words, Even, Signed, Modulo, Integer to */ \
+ /* Accumulator */ \
+ V(evmhesmia, EVMHESMIA, 0x10000429) \
+ /* Vector Multiply Half Words, Even, Signed, Modulo, Integer and */ \
+ /* Accumulate into Words */ \
+ V(evmhesmiaaw, EVMHESMIAAW, 0x10000509) \
+ /* Vector Multiply Half Words, Even, Signed, Modulo, Integer and */ \
+ /* Accumulate Negative into Words */ \
+ V(evmhesmianw, EVMHESMIANW, 0x10000589) \
+ /* Vector Multiply Half Words, Even, Signed, Saturate, Fractional */ \
+ V(evmhessf, EVMHESSF, 0x10000403) \
+ /* Vector Multiply Half Words, Even, Signed, Saturate, Fractional to */ \
+ /* Accumulator */ \
+ V(evmhessfa, EVMHESSFA, 0x10000423) \
+ /* Vector Multiply Half Words, Even, Signed, Saturate, Fractional and */ \
+ /* Accumulate into Words */ \
+ V(evmhessfaaw, EVMHESSFAAW, 0x10000503) \
+ /* Vector Multiply Half Words, Even, Signed, Saturate, Fractional and */ \
+ /* Accumulate Negative into Words */ \
+ V(evmhessfanw, EVMHESSFANW, 0x10000583) \
+ /* Vector Multiply Half Words, Even, Signed, Saturate, Integer and */ \
+ /* Accumulate into Words */ \
+ V(evmhessiaaw, EVMHESSIAAW, 0x10000501) \
+ /* Vector Multiply Half Words, Even, Signed, Saturate, Integer and */ \
+ /* Accumulate Negative into Words */ \
+ V(evmhessianw, EVMHESSIANW, 0x10000581) \
+ /* Vector Multiply Half Words, Even, Unsigned, Modulo, Integer */ \
+ V(evmheumi, EVMHEUMI, 0x10000408) \
+ /* Vector Multiply Half Words, Even, Unsigned, Modulo, Integer to */ \
+ /* Accumulator */ \
+ V(evmheumia, EVMHEUMIA, 0x10000428) \
+ /* Vector Multiply Half Words, Even, Unsigned, Modulo, Integer and */ \
+ /* Accumulate into Words */ \
+ V(evmheumiaaw, EVMHEUMIAAW, 0x10000508) \
+ /* Vector Multiply Half Words, Even, Unsigned, Modulo, Integer and */ \
+ /* Accumulate Negative into Words */ \
+ V(evmheumianw, EVMHEUMIANW, 0x10000588) \
+ /* Vector Multiply Half Words, Even, Unsigned, Saturate, Integer and */ \
+ /* Accumulate into Words */ \
+ V(evmheusiaaw, EVMHEUSIAAW, 0x10000500) \
+ /* Vector Multiply Half Words, Even, Unsigned, Saturate, Integer and */ \
+ /* Accumulate Negative into Words */ \
+ V(evmheusianw, EVMHEUSIANW, 0x10000580) \
+ /* Vector Multiply Half Words, Odd, Guarded, Signed, Modulo, Fractional */ \
+ /* and Accumulate */ \
+ V(evmhogsmfaa, EVMHOGSMFAA, 0x1000052F) \
+ /* Vector Multiply Half Words, Odd, Guarded, Signed, Modulo, Fractional */ \
+ /* and Accumulate Negative */ \
+ V(evmhogsmfan, EVMHOGSMFAN, 0x100005AF) \
+ /* Vector Multiply Half Words, Odd, Guarded, Signed, Modulo, Integer, */ \
+ /* and Accumulate */ \
+ V(evmhogsmiaa, EVMHOGSMIAA, 0x1000052D) \
+ /* Vector Multiply Half Words, Odd, Guarded, Signed, Modulo, Integer and */ \
+ /* Accumulate Negative */ \
+ V(evmhogsmian, EVMHOGSMIAN, 0x100005AD) \
+ /* Vector Multiply Half Words, Odd, Guarded, Unsigned, Modulo, Integer */ \
+ /* and Accumulate */ \
+ V(evmhogumiaa, EVMHOGUMIAA, 0x1000052C) \
+ /* Vector Multiply Half Words, Odd, Guarded, Unsigned, Modulo, Integer */ \
+ /* and Accumulate Negative */ \
+ V(evmhogumian, EVMHOGUMIAN, 0x100005AC) \
+ /* Vector Multiply Half Words, Odd, Signed, Modulo, Fractional */ \
+ V(evmhosmf, EVMHOSMF, 0x1000040F) \
+ /* Vector Multiply Half Words, Odd, Signed, Modulo, Fractional to */ \
+ /* Accumulator */ \
+ V(evmhosmfa, EVMHOSMFA, 0x1000042F) \
+ /* Vector Multiply Half Words, Odd, Signed, Modulo, Fractional and */ \
+ /* Accumulate into Words */ \
+ V(evmhosmfaaw, EVMHOSMFAAW, 0x1000050F) \
+ /* Vector Multiply Half Words, Odd, Signed, Modulo, Fractional and */ \
+ /* Accumulate Negative into Words */ \
+ V(evmhosmfanw, EVMHOSMFANW, 0x1000058F) \
+ /* Vector Multiply Half Words, Odd, Signed, Modulo, Integer */ \
+ V(evmhosmi, EVMHOSMI, 0x1000040D) \
+ /* Vector Multiply Half Words, Odd, Signed, Modulo, Integer to */ \
+ /* Accumulator */ \
+ V(evmhosmia, EVMHOSMIA, 0x1000042D) \
+ /* Vector Multiply Half Words, Odd, Signed, Modulo, Integer and */ \
+ /* Accumulate into Words */ \
+ V(evmhosmiaaw, EVMHOSMIAAW, 0x1000050D) \
+ /* Vector Multiply Half Words, Odd, Signed, Modulo, Integer and */ \
+ /* Accumulate Negative into Words */ \
+ V(evmhosmianw, EVMHOSMIANW, 0x1000058D) \
+ /* Vector Multiply Half Words, Odd, Signed, Saturate, Fractional */ \
+ V(evmhossf, EVMHOSSF, 0x10000407) \
+ /* Vector Multiply Half Words, Odd, Signed, Saturate, Fractional to */ \
+ /* Accumulator */ \
+ V(evmhossfa, EVMHOSSFA, 0x10000427) \
+ /* Vector Multiply Half Words, Odd, Signed, Saturate, Fractional and */ \
+ /* Accumulate into Words */ \
+ V(evmhossfaaw, EVMHOSSFAAW, 0x10000507) \
+ /* Vector Multiply Half Words, Odd, Signed, Saturate, Fractional and */ \
+ /* Accumulate Negative into Words */ \
+ V(evmhossfanw, EVMHOSSFANW, 0x10000587) \
+ /* Vector Multiply Half Words, Odd, Signed, Saturate, Integer and */ \
+ /* Accumulate into Words */ \
+ V(evmhossiaaw, EVMHOSSIAAW, 0x10000505) \
+ /* Vector Multiply Half Words, Odd, Signed, Saturate, Integer and */ \
+ /* Accumulate Negative into Words */ \
+ V(evmhossianw, EVMHOSSIANW, 0x10000585) \
+ /* Vector Multiply Half Words, Odd, Unsigned, Modulo, Integer */ \
+ V(evmhoumi, EVMHOUMI, 0x1000040C) \
+ /* Vector Multiply Half Words, Odd, Unsigned, Modulo, Integer to */ \
+ /* Accumulator */ \
+ V(evmhoumia, EVMHOUMIA, 0x1000042C) \
+ /* Vector Multiply Half Words, Odd, Unsigned, Modulo, Integer and */ \
+ /* Accumulate into Words */ \
+ V(evmhoumiaaw, EVMHOUMIAAW, 0x1000050C) \
+ /* Vector Multiply Half Words, Odd, Unsigned, Modulo, Integer and */ \
+ /* Accumulate Negative into Words */ \
+ V(evmhoumianw, EVMHOUMIANW, 0x1000058C) \
+ /* Vector Multiply Half Words, Odd, Unsigned, Saturate, Integer and */ \
+ /* Accumulate into Words */ \
+ V(evmhousiaaw, EVMHOUSIAAW, 0x10000504) \
+ /* Vector Multiply Half Words, Odd, Unsigned, Saturate, Integer and */ \
+ /* Accumulate Negative into Words */ \
+ V(evmhousianw, EVMHOUSIANW, 0x10000584) \
+ /* Initialize Accumulator */ \
+ V(evmra, EVMRA, 0x100004C4) \
+ /* Vector Multiply Word High Signed, Modulo, Fractional */ \
+ V(evmwhsmf, EVMWHSMF, 0x1000044F) \
+ /* Vector Multiply Word High Signed, Modulo, Fractional to Accumulator */ \
+ V(evmwhsmfa, EVMWHSMFA, 0x1000046F) \
+ /* Vector Multiply Word High Signed, Modulo, Integer */ \
+ V(evmwhsmi, EVMWHSMI, 0x1000044D) \
+ /* Vector Multiply Word High Signed, Modulo, Integer to Accumulator */ \
+ V(evmwhsmia, EVMWHSMIA, 0x1000046D) \
+ /* Vector Multiply Word High Signed, Saturate, Fractional */ \
+ V(evmwhssf, EVMWHSSF, 0x10000447) \
+ /* Vector Multiply Word High Signed, Saturate, Fractional to Accumulator */ \
+ V(evmwhssfa, EVMWHSSFA, 0x10000467) \
+ /* Vector Multiply Word High Unsigned, Modulo, Integer */ \
+ V(evmwhumi, EVMWHUMI, 0x1000044C) \
+ /* Vector Multiply Word High Unsigned, Modulo, Integer to Accumulator */ \
+ V(evmwhumia, EVMWHUMIA, 0x1000046C) \
+ /* Vector Multiply Word Low Signed, Modulo, Integer and Accumulate in */ \
+ /* Words */ \
+ V(evmwlsmiaaw, EVMWLSMIAAW, 0x10000549) \
+ /* Vector Multiply Word Low Signed, Modulo, Integer and Accumulate */ \
+ /* Negative in Words */ \
+ V(evmwlsmianw, EVMWLSMIANW, 0x100005C9) \
+ /* Vector Multiply Word Low Signed, Saturate, Integer and Accumulate in */ \
+ /* Words */ \
+ V(evmwlssiaaw, EVMWLSSIAAW, 0x10000541) \
+ /* Vector Multiply Word Low Signed, Saturate, Integer and Accumulate */ \
+ /* Negative in Words */ \
+ V(evmwlssianw, EVMWLSSIANW, 0x100005C1) \
+ /* Vector Multiply Word Low Unsigned, Modulo, Integer */ \
+ V(evmwlumi, EVMWLUMI, 0x10000448) \
+ /* Vector Multiply Word Low Unsigned, Modulo, Integer to Accumulator */ \
+ V(evmwlumia, EVMWLUMIA, 0x10000468) \
+ /* Vector Multiply Word Low Unsigned, Modulo, Integer and Accumulate in */ \
+ /* Words */ \
+ V(evmwlumiaaw, EVMWLUMIAAW, 0x10000548) \
+ /* Vector Multiply Word Low Unsigned, Modulo, Integer and Accumulate */ \
+ /* Negative in Words */ \
+ V(evmwlumianw, EVMWLUMIANW, 0x100005C8) \
+ /* Vector Multiply Word Low Unsigned, Saturate, Integer and Accumulate */ \
+ /* in Words */ \
+ V(evmwlusiaaw, EVMWLUSIAAW, 0x10000540) \
+ /* Vector Multiply Word Low Unsigned, Saturate, Integer and Accumulate */ \
+ /* Negative in Words */ \
+ V(evmwlusianw, EVMWLUSIANW, 0x100005C0) \
+ /* Vector Multiply Word Signed, Modulo, Fractional */ \
+ V(evmwsmf, EVMWSMF, 0x1000045B) \
+ /* Vector Multiply Word Signed, Modulo, Fractional to Accumulator */ \
+ V(evmwsmfa, EVMWSMFA, 0x1000047B) \
+ /* Vector Multiply Word Signed, Modulo, Fractional and Accumulate */ \
+ V(evmwsmfaa, EVMWSMFAA, 0x1000055B) \
+ /* Vector Multiply Word Signed, Modulo, Fractional and Accumulate */ \
+ /* Negative */ \
+ V(evmwsmfan, EVMWSMFAN, 0x100005DB) \
+ /* Vector Multiply Word Signed, Modulo, Integer */ \
+ V(evmwsmi, EVMWSMI, 0x10000459) \
+ /* Vector Multiply Word Signed, Modulo, Integer to Accumulator */ \
+ V(evmwsmia, EVMWSMIA, 0x10000479) \
+ /* Vector Multiply Word Signed, Modulo, Integer and Accumulate */ \
+ V(evmwsmiaa, EVMWSMIAA, 0x10000559) \
+ /* Vector Multiply Word Signed, Modulo, Integer and Accumulate Negative */ \
+ V(evmwsmian, EVMWSMIAN, 0x100005D9) \
+ /* Vector Multiply Word Signed, Saturate, Fractional */ \
+ V(evmwssf, EVMWSSF, 0x10000453) \
+ /* Vector Multiply Word Signed, Saturate, Fractional to Accumulator */ \
+ V(evmwssfa, EVMWSSFA, 0x10000473) \
+ /* Vector Multiply Word Signed, Saturate, Fractional and Accumulate */ \
+ V(evmwssfaa, EVMWSSFAA, 0x10000553) \
+ /* Vector Multiply Word Signed, Saturate, Fractional and Accumulate */ \
+ /* Negative */ \
+ V(evmwssfan, EVMWSSFAN, 0x100005D3) \
+ /* Vector Multiply Word Unsigned, Modulo, Integer */ \
+ V(evmwumi, EVMWUMI, 0x10000458) \
+ /* Vector Multiply Word Unsigned, Modulo, Integer to Accumulator */ \
+ V(evmwumia, EVMWUMIA, 0x10000478) \
+ /* Vector Multiply Word Unsigned, Modulo, Integer and Accumulate */ \
+ V(evmwumiaa, EVMWUMIAA, 0x10000558) \
+ /* Vector Multiply Word Unsigned, Modulo, Integer and Accumulate */ \
+ /* Negative */ \
+ V(evmwumian, EVMWUMIAN, 0x100005D8) \
+ /* Vector NAND */ \
+ V(evnand, EVNAND, 0x1000021E) \
+ /* Vector Negate */ \
+ V(evneg, EVNEG, 0x10000209) \
+ /* Vector NOR */ \
+ V(evnor, EVNOR, 0x10000218) \
+ /* Vector OR */ \
+ V(evor, EVOR, 0x10000217) \
+ /* Vector OR with Complement */ \
+ V(evorc, EVORC, 0x1000021B) \
+ /* Vector Rotate Left Word */ \
+ V(evrlw, EVRLW, 0x10000228) \
+ /* Vector Rotate Left Word Immediate */ \
+ V(evrlwi, EVRLWI, 0x1000022A) \
+ /* Vector Round Word */ \
+ V(evrndw, EVRNDW, 0x1000020C) \
+ /* Vector Shift Left Word */ \
+ V(evslw, EVSLW, 0x10000224) \
+ /* Vector Shift Left Word Immediate */ \
+ V(evslwi, EVSLWI, 0x10000226) \
+ /* Vector Splat Fractional Immediate */ \
+ V(evsplatfi, EVSPLATFI, 0x1000022B) \
+ /* Vector Splat Immediate */ \
+ V(evsplati, EVSPLATI, 0x10000229) \
+ /* Vector Shift Right Word Immediate Signed */ \
+ V(evsrwis, EVSRWIS, 0x10000223) \
+ /* Vector Shift Right Word Immediate Unsigned */ \
+ V(evsrwiu, EVSRWIU, 0x10000222) \
+ /* Vector Shift Right Word Signed */ \
+ V(evsrws, EVSRWS, 0x10000221) \
+ /* Vector Shift Right Word Unsigned */ \
+ V(evsrwu, EVSRWU, 0x10000220) \
+ /* Vector Store Double of Double */ \
+ V(evstdd, EVSTDD, 0x10000321) \
+ /* Vector Store Double of Double Indexed */ \
+ V(evstddx, EVSTDDX, 0x10000320) \
+ /* Vector Store Double of Four Half Words */ \
+ V(evstdh, EVSTDH, 0x10000325) \
+ /* Vector Store Double of Four Half Words Indexed */ \
+ V(evstdhx, EVSTDHX, 0x10000324) \
+ /* Vector Store Double of Two Words */ \
+ V(evstdw, EVSTDW, 0x10000323) \
+ /* Vector Store Double of Two Words Indexed */ \
+ V(evstdwx, EVSTDWX, 0x10000322) \
+ /* Vector Store Word of Two Half Words from Even */ \
+ V(evstwhe, EVSTWHE, 0x10000331) \
+ /* Vector Store Word of Two Half Words from Even Indexed */ \
+ V(evstwhex, EVSTWHEX, 0x10000330) \
+ /* Vector Store Word of Two Half Words from Odd */ \
+ V(evstwho, EVSTWHO, 0x10000335) \
+ /* Vector Store Word of Two Half Words from Odd Indexed */ \
+ V(evstwhox, EVSTWHOX, 0x10000334) \
+ /* Vector Store Word of Word from Even */ \
+ V(evstwwe, EVSTWWE, 0x10000339) \
+ /* Vector Store Word of Word from Even Indexed */ \
+ V(evstwwex, EVSTWWEX, 0x10000338) \
+ /* Vector Store Word of Word from Odd */ \
+ V(evstwwo, EVSTWWO, 0x1000033D) \
+ /* Vector Store Word of Word from Odd Indexed */ \
+ V(evstwwox, EVSTWWOX, 0x1000033C) \
+ /* Vector Subtract Signed, Modulo, Integer to Accumulator Word */ \
+ V(evsubfsmiaaw, EVSUBFSMIAAW, 0x100004CB) \
+ /* Vector Subtract Signed, Saturate, Integer to Accumulator Word */ \
+ V(evsubfssiaaw, EVSUBFSSIAAW, 0x100004C3) \
+ /* Vector Subtract Unsigned, Modulo, Integer to Accumulator Word */ \
+ V(evsubfumiaaw, EVSUBFUMIAAW, 0x100004CA) \
+ /* Vector Subtract Unsigned, Saturate, Integer to Accumulator Word */ \
+ V(evsubfusiaaw, EVSUBFUSIAAW, 0x100004C2) \
+ /* Vector Subtract from Word */ \
+ V(evsubfw, EVSUBFW, 0x10000204) \
+ /* Vector Subtract Immediate from Word */ \
+ V(evsubifw, EVSUBIFW, 0x10000206) \
+ /* Vector XOR */ \
+ V(evxor, EVXOR, 0x10000216) \
+ /* Floating-Point Double-Precision Absolute Value */ \
+ V(efdabs, EFDABS, 0x100002E4) \
+ /* Floating-Point Double-Precision Add */ \
+ V(efdadd, EFDADD, 0x100002E0) \
+ /* Floating-Point Double-Precision Convert from Single-Precision */ \
+ V(efdcfs, EFDCFS, 0x100002EF) \
+ /* Convert Floating-Point Double-Precision from Signed Fraction */ \
+ V(efdcfsf, EFDCFSF, 0x100002F3) \
+ /* Convert Floating-Point Double-Precision from Signed Integer */ \
+ V(efdcfsi, EFDCFSI, 0x100002F1) \
+ /* Convert Floating-Point Double-Precision from Signed Integer */ \
+ /* Doubleword */ \
+ V(efdcfsid, EFDCFSID, 0x100002E3) \
+ /* Convert Floating-Point Double-Precision from Unsigned Fraction */ \
+ V(efdcfuf, EFDCFUF, 0x100002F2) \
+ /* Convert Floating-Point Double-Precision from Unsigned Integer */ \
+ V(efdcfui, EFDCFUI, 0x100002F0) \
+ /* Convert Floating-Point Double-Precision fromUnsigned Integer */ \
+ /* Doubleword */ \
+ V(efdcfuid, EFDCFUID, 0x100002E2) \
+ /* Floating-Point Double-Precision Compare Equal */ \
+ V(efdcmpeq, EFDCMPEQ, 0x100002EE) \
+ /* Floating-Point Double-Precision Compare Greater Than */ \
+ V(efdcmpgt, EFDCMPGT, 0x100002EC) \
+ /* Floating-Point Double-Precision Compare Less Than */ \
+ V(efdcmplt, EFDCMPLT, 0x100002ED) \
+ /* Convert Floating-Point Double-Precision to Signed Fraction */ \
+ V(efdctsf, EFDCTSF, 0x100002F7) \
+ /* Convert Floating-Point Double-Precision to Signed Integer */ \
+ V(efdctsi, EFDCTSI, 0x100002F5) \
+ /* Convert Floating-Point Double-Precision to Signed Integer Doubleword */ \
+ /* with Round toward Zero */ \
+ V(efdctsidz, EFDCTSIDZ, 0x100002EB) \
+ /* Convert Floating-Point Double-Precision to Signed Integer with Round */ \
+ /* toward Zero */ \
+ V(efdctsiz, EFDCTSIZ, 0x100002FA) \
+ /* Convert Floating-Point Double-Precision to Unsigned Fraction */ \
+ V(efdctuf, EFDCTUF, 0x100002F6) \
+ /* Convert Floating-Point Double-Precision to Unsigned Integer */ \
+ V(efdctui, EFDCTUI, 0x100002F4) \
+ /* Convert Floating-Point Double-Precision to Unsigned Integer */ \
+ /* Doubleword with Round toward Zero */ \
+ V(efdctuidz, EFDCTUIDZ, 0x100002EA) \
+ /* Convert Floating-Point Double-Precision to Unsigned Integer with */ \
+ /* Round toward Zero */ \
+ V(efdctuiz, EFDCTUIZ, 0x100002F8) \
+ /* Floating-Point Double-Precision Divide */ \
+ V(efddiv, EFDDIV, 0x100002E9) \
+ /* Floating-Point Double-Precision Multiply */ \
+ V(efdmul, EFDMUL, 0x100002E8) \
+ /* Floating-Point Double-Precision Negative Absolute Value */ \
+ V(efdnabs, EFDNABS, 0x100002E5) \
+ /* Floating-Point Double-Precision Negate */ \
+ V(efdneg, EFDNEG, 0x100002E6) \
+ /* Floating-Point Double-Precision Subtract */ \
+ V(efdsub, EFDSUB, 0x100002E1) \
+ /* Floating-Point Double-Precision Test Equal */ \
+ V(efdtsteq, EFDTSTEQ, 0x100002FE) \
+ /* Floating-Point Double-Precision Test Greater Than */ \
+ V(efdtstgt, EFDTSTGT, 0x100002FC) \
+ /* Floating-Point Double-Precision Test Less Than */ \
+ V(efdtstlt, EFDTSTLT, 0x100002FD) \
+ /* Floating-Point Single-Precision Convert from Double-Precision */ \
+ V(efscfd, EFSCFD, 0x100002CF) \
+ /* Floating-Point Absolute Value */ \
+ V(efsabs, EFSABS, 0x100002C4) \
+ /* Floating-Point Add */ \
+ V(efsadd, EFSADD, 0x100002C0) \
+ /* Convert Floating-Point from Signed Fraction */ \
+ V(efscfsf, EFSCFSF, 0x100002D3) \
+ /* Convert Floating-Point from Signed Integer */ \
+ V(efscfsi, EFSCFSI, 0x100002D1) \
+ /* Convert Floating-Point from Unsigned Fraction */ \
+ V(efscfuf, EFSCFUF, 0x100002D2) \
+ /* Convert Floating-Point from Unsigned Integer */ \
+ V(efscfui, EFSCFUI, 0x100002D0) \
+ /* Floating-Point Compare Equal */ \
+ V(efscmpeq, EFSCMPEQ, 0x100002CE) \
+ /* Floating-Point Compare Greater Than */ \
+ V(efscmpgt, EFSCMPGT, 0x100002CC) \
+ /* Floating-Point Compare Less Than */ \
+ V(efscmplt, EFSCMPLT, 0x100002CD) \
+ /* Convert Floating-Point to Signed Fraction */ \
+ V(efsctsf, EFSCTSF, 0x100002D7) \
+ /* Convert Floating-Point to Signed Integer */ \
+ V(efsctsi, EFSCTSI, 0x100002D5) \
+ /* Convert Floating-Point to Signed Integer with Round toward Zero */ \
+ V(efsctsiz, EFSCTSIZ, 0x100002DA) \
+ /* Convert Floating-Point to Unsigned Fraction */ \
+ V(efsctuf, EFSCTUF, 0x100002D6) \
+ /* Convert Floating-Point to Unsigned Integer */ \
+ V(efsctui, EFSCTUI, 0x100002D4) \
+ /* Convert Floating-Point to Unsigned Integer with Round toward Zero */ \
+ V(efsctuiz, EFSCTUIZ, 0x100002D8) \
+ /* Floating-Point Divide */ \
+ V(efsdiv, EFSDIV, 0x100002C9) \
+ /* Floating-Point Multiply */ \
+ V(efsmul, EFSMUL, 0x100002C8) \
+ /* Floating-Point Negative Absolute Value */ \
+ V(efsnabs, EFSNABS, 0x100002C5) \
+ /* Floating-Point Negate */ \
+ V(efsneg, EFSNEG, 0x100002C6) \
+ /* Floating-Point Subtract */ \
+ V(efssub, EFSSUB, 0x100002C1) \
+ /* Floating-Point Test Equal */ \
+ V(efststeq, EFSTSTEQ, 0x100002DE) \
+ /* Floating-Point Test Greater Than */ \
+ V(efststgt, EFSTSTGT, 0x100002DC) \
+ /* Floating-Point Test Less Than */ \
+ V(efststlt, EFSTSTLT, 0x100002DD) \
+ /* Vector Floating-Point Absolute Value */ \
+ V(evfsabs, EVFSABS, 0x10000284) \
+ /* Vector Floating-Point Add */ \
+ V(evfsadd, EVFSADD, 0x10000280) \
+ /* Vector Convert Floating-Point from Signed Fraction */ \
+ V(evfscfsf, EVFSCFSF, 0x10000293) \
+ /* Vector Convert Floating-Point from Signed Integer */ \
+ V(evfscfsi, EVFSCFSI, 0x10000291) \
+ /* Vector Convert Floating-Point from Unsigned Fraction */ \
+ V(evfscfuf, EVFSCFUF, 0x10000292) \
+ /* Vector Convert Floating-Point from Unsigned Integer */ \
+ V(evfscfui, EVFSCFUI, 0x10000290) \
+ /* Vector Floating-Point Compare Equal */ \
+ V(evfscmpeq, EVFSCMPEQ, 0x1000028E) \
+ /* Vector Floating-Point Compare Greater Than */ \
+ V(evfscmpgt, EVFSCMPGT, 0x1000028C) \
+ /* Vector Floating-Point Compare Less Than */ \
+ V(evfscmplt, EVFSCMPLT, 0x1000028D) \
+ /* Vector Convert Floating-Point to Signed Fraction */ \
+ V(evfsctsf, EVFSCTSF, 0x10000297) \
+ /* Vector Convert Floating-Point to Signed Integer */ \
+ V(evfsctsi, EVFSCTSI, 0x10000295) \
+ /* Vector Convert Floating-Point to Signed Integer with Round toward */ \
+ /* Zero */ \
+ V(evfsctsiz, EVFSCTSIZ, 0x1000029A) \
+ /* Vector Convert Floating-Point to Unsigned Fraction */ \
+ V(evfsctuf, EVFSCTUF, 0x10000296) \
+ /* Vector Convert Floating-Point to Unsigned Integer */ \
+ V(evfsctui, EVFSCTUI, 0x10000294) \
+ /* Vector Convert Floating-Point to Unsigned Integer with Round toward */ \
+ /* Zero */ \
+ V(evfsctuiz, EVFSCTUIZ, 0x10000298) \
+ /* Vector Floating-Point Divide */ \
+ V(evfsdiv, EVFSDIV, 0x10000289) \
+ /* Vector Floating-Point Multiply */ \
+ V(evfsmul, EVFSMUL, 0x10000288) \
+ /* Vector Floating-Point Negative Absolute Value */ \
+ V(evfsnabs, EVFSNABS, 0x10000285) \
+ /* Vector Floating-Point Negate */ \
+ V(evfsneg, EVFSNEG, 0x10000286) \
+ /* Vector Floating-Point Subtract */ \
+ V(evfssub, EVFSSUB, 0x10000281) \
+ /* Vector Floating-Point Test Equal */ \
+ V(evfststeq, EVFSTSTEQ, 0x1000029E) \
+ /* Vector Floating-Point Test Greater Than */ \
+ V(evfststgt, EVFSTSTGT, 0x1000029C) \
+ /* Vector Floating-Point Test Less Than */ \
+ V(evfststlt, EVFSTSTLT, 0x1000029D)
+
+#define PPC_VC_OPCODE_LIST(V) \
+ /* Vector Compare Bounds Single-Precision */ \
+ V(vcmpbfp, VCMPBFP, 0x100003C6) \
+ /* Vector Compare Equal To Single-Precision */ \
+ V(vcmpeqfp, VCMPEQFP, 0x100000C6) \
+ /* Vector Compare Equal To Unsigned Byte */ \
+ V(vcmpequb, VCMPEQUB, 0x10000006) \
+ /* Vector Compare Equal To Unsigned Doubleword */ \
+ V(vcmpequd, VCMPEQUD, 0x100000C7) \
+ /* Vector Compare Equal To Unsigned Halfword */ \
+ V(vcmpequh, VCMPEQUH, 0x10000046) \
+ /* Vector Compare Equal To Unsigned Word */ \
+ V(vcmpequw, VCMPEQUW, 0x10000086) \
+ /* Vector Compare Greater Than or Equal To Single-Precision */ \
+ V(vcmpgefp, VCMPGEFP, 0x100001C6) \
+ /* Vector Compare Greater Than Single-Precision */ \
+ V(vcmpgtfp, VCMPGTFP, 0x100002C6) \
+ /* Vector Compare Greater Than Signed Byte */ \
+ V(vcmpgtsb, VCMPGTSB, 0x10000306) \
+ /* Vector Compare Greater Than Signed Doubleword */ \
+ V(vcmpgtsd, VCMPGTSD, 0x100003C7) \
+ /* Vector Compare Greater Than Signed Halfword */ \
+ V(vcmpgtsh, VCMPGTSH, 0x10000346) \
+ /* Vector Compare Greater Than Signed Word */ \
+ V(vcmpgtsw, VCMPGTSW, 0x10000386) \
+ /* Vector Compare Greater Than Unsigned Byte */ \
+ V(vcmpgtub, VCMPGTUB, 0x10000206) \
+ /* Vector Compare Greater Than Unsigned Doubleword */ \
+ V(vcmpgtud, VCMPGTUD, 0x100002C7) \
+ /* Vector Compare Greater Than Unsigned Halfword */ \
+ V(vcmpgtuh, VCMPGTUH, 0x10000246) \
+ /* Vector Compare Greater Than Unsigned Word */ \
+ V(vcmpgtuw, VCMPGTUW, 0x10000286)
+
+#define PPC_X_OPCODE_LIST(V) \
+ /* Bit Permute Doubleword */ \
+ V(bpermd, BPERMD, 0x7C0001F8) \
+ /* Count Leading Zeros Doubleword */ \
+ V(cntlzd, CNTLZDX, 0x7C000074) \
+ /* Extend Sign Word */ \
+ V(extsw, EXTSW, 0x7C0007B4) \
+ /* Load Doubleword And Reserve Indexed */ \
+ V(ldarx, LDARX, 0x7C0000A8) \
+ /* Load Doubleword Byte-Reverse Indexed */ \
+ V(ldbrx, LDBRX, 0x7C000428) \
+ /* Load Doubleword with Update Indexed */ \
+ V(ldux, LDUX, 0x7C00006A) \
+ /* Load Doubleword Indexed */ \
+ V(ldx, LDX, 0x7C00002A) \
+ /* Load Word Algebraic with Update Indexed */ \
+ V(lwaux, LWAUX, 0x7C0002EA) \
+ /* Load Word Algebraic Indexed */ \
+ V(lwax, LWAX, 0x7C0002AA) \
+ /* Modulo Signed Dword */ \
+ V(modsd, MODSD, 0x7C000612) \
+ /* Modulo Unsigned Dword */ \
+ V(modud, MODUD, 0x7C000212) \
+ /* Population Count Doubleword */ \
+ V(popcntd, POPCNTD, 0x7C0003F4) \
+ /* Parity Doubleword */ \
+ V(prtyd, PRTYD, 0x7C000174) \
+ /* Shift Left Doubleword */ \
+ V(sld, SLDX, 0x7C000036) \
+ /* Shift Right Algebraic Doubleword */ \
+ V(srad, SRAD, 0x7C000634) \
+ /* Shift Right Doubleword */ \
+ V(srd, SRDX, 0x7C000436) \
+ /* Store Doubleword Byte-Reverse Indexed */ \
+ V(stdbrx, STDBRX, 0x7C000528) \
+ /* Store Doubleword Conditional Indexed & record CR0 */ \
+ V(stdcx, STDCX, 0x7C0001AD) \
+ /* Store Doubleword with Update Indexed */ \
+ V(stdux, STDUX, 0x7C00016A) \
+ /* Store Doubleword Indexed */ \
+ V(stdx, STDX, 0x7C00012A) \
+ /* Trap Doubleword */ \
+ V(td, TD, 0x7C000088) \
+ /* AND */ \
+ V(andx, ANDX, 0x7C000038) \
+ /* AND with Complement */ \
+ V(andc, ANDCX, 0x7C000078) \
+ /* Branch Conditional to Branch Target Address Register */ \
+ V(bctar, BCTAR, 0x4C000460) \
+ /* Compare */ \
+ V(cmp, CMP, 0x7C000000) \
+ /* Compare Byte */ \
+ V(cmpb, CMPB, 0x7C0003F8) \
+ /* Compare Logical */ \
+ V(cmpl, CMPL, 0x7C000040) \
+ /* Count Leading Zeros Word */ \
+ V(cntlzw, CNTLZWX, 0x7C000034) \
+ /* Data Cache Block Flush */ \
+ V(dcbf, DCBF, 0x7C0000AC) \
+ /* Data Cache Block Store */ \
+ V(dcbst, DCBST, 0x7C00006C) \
+ /* Data Cache Block Touch */ \
+ V(dcbt, DCBT, 0x7C00022C) \
+ /* Data Cache Block Touch for Store */ \
+ V(dcbtst, DCBTST, 0x7C0001EC) \
+ /* Data Cache Block Zero */ \
+ V(dcbz, DCBZ, 0x7C0007EC) \
+ /* Equivalent */ \
+ V(eqv, EQV, 0x7C000238) \
+ /* Extend Sign Byte */ \
+ V(extsb, EXTSB, 0x7C000774) \
+ /* Extend Sign Halfword */ \
+ V(extsh, EXTSH, 0x7C000734) \
+ /* Instruction Cache Block Invalidate */ \
+ V(icbi, ICBI, 0x7C0007AC) \
+ /* Load Byte And Reserve Indexed */ \
+ V(lbarx, LBARX, 0x7C000068) \
+ /* Load Byte and Zero with Update Indexed */ \
+ V(lbzux, LBZUX, 0x7C0000EE) \
+ /* Load Byte and Zero Indexed */ \
+ V(lbzx, LBZX, 0x7C0000AE) \
+ /* Load Halfword And Reserve Indexed Xform */ \
+ V(lharx, LHARX, 0x7C0000E8) \
+ /* Load Halfword Algebraic with Update Indexed */ \
+ V(lhaux, LHAUX, 0x7C0002EE) \
+ /* Load Halfword Algebraic Indexed */ \
+ V(lhax, LHAX, 0x7C0002AE) \
+ /* Load Halfword Byte-Reverse Indexed */ \
+ V(lhbrx, LHBRX, 0x7C00062C) \
+ /* Load Halfword and Zero with Update Indexed */ \
+ V(lhzux, LHZUX, 0x7C00026E) \
+ /* Load Halfword and Zero Indexed */ \
+ V(lhzx, LHZX, 0x7C00022E) \
+ /* Load Word and Reserve Indexed */ \
+ V(lwarx, LWARX, 0x7C000028) \
+ /* Load Word Byte-Reverse Indexed */ \
+ V(lwbrx, LWBRX, 0x7C00042C) \
+ /* Load Word and Zero with Update Indexed */ \
+ V(lwzux, LWZUX, 0x7C00006E) \
+ /* Load Word and Zero Indexed */ \
+ V(lwzx, LWZX, 0x7C00002E) \
+ /* Modulo Signed Word */ \
+ V(mods, MODSW, 0x7C000616) \
+ /* Modulo Unsigned Word */ \
+ V(moduw, MODUW, 0x7C000216) \
+ /* NAND */ \
+ V(nand, NAND, 0x7C0003B8) \
+ /* NOR */ \
+ V(nor, NORX, 0x7C0000F8) \
+ /* OR */ \
+ V(orx, ORX, 0x7C000378) \
+ /* OR with Complement */ \
+ V(orc, ORC, 0x7C000338) \
+ /* Population Count Byte-wise */ \
+ V(popcntb, POPCNTB, 0x7C0000F4) \
+ /* Population Count Words */ \
+ V(popcntw, POPCNTW, 0x7C0002F4) \
+ /* Parity Word */ \
+ V(prtyw, PRTYW, 0x7C000134) \
+ /* Shift Left Word */ \
+ V(slw, SLWX, 0x7C000030) \
+ /* Shift Right Algebraic Word */ \
+ V(sraw, SRAW, 0x7C000630) \
+ /* Shift Right Algebraic Word Immediate */ \
+ V(srawi, SRAWIX, 0x7C000670) \
+ /* Shift Right Word */ \
+ V(srw, SRWX, 0x7C000430) \
+ /* Store Byte Conditional Indexed */ \
+ V(stbcx, STBCX, 0x7C00056D) \
+ /* Store Byte with Update Indexed */ \
+ V(stbux, STBUX, 0x7C0001EE) \
+ /* Store Byte Indexed */ \
+ V(stbx, STBX, 0x7C0001AE) \
+ /* Store Halfword Byte-Reverse Indexed */ \
+ V(sthbrx, STHBRX, 0x7C00072C) \
+ /* Store Halfword Conditional Indexed Xform */ \
+ V(sthcx, STHCX, 0x7C0005AD) \
+ /* Store Halfword with Update Indexed */ \
+ V(sthux, STHUX, 0x7C00036E) \
+ /* Store Halfword Indexed */ \
+ V(sthx, STHX, 0x7C00032E) \
+ /* Store Word Byte-Reverse Indexed */ \
+ V(stwbrx, STWBRX, 0x7C00052C) \
+ /* Store Word Conditional Indexed & record CR0 */ \
+ V(stwcx, STWCX, 0x7C00012D) \
+ /* Store Word with Update Indexed */ \
+ V(stwux, STWUX, 0x7C00016E) \
+ /* Store Word Indexed */ \
+ V(stwx, STWX, 0x7C00012E) \
+ /* Synchronize */ \
+ V(sync, SYNC, 0x7C0004AC) \
+ /* Trap Word */ \
+ V(tw, TW, 0x7C000008) \
+ /* ExecuExecuted No Operation */ \
+ V(xnop, XNOP, 0x68000000) \
+ /* XOR */ \
+ V(xorx, XORX, 0x7C000278) \
+ /* Convert Binary Coded Decimal To Declets */ \
+ V(cbcdtd, CBCDTD, 0x7C000274) \
+ /* Convert Declets To Binary Coded Decimal */ \
+ V(cdtbcd, CDTBCD, 0x7C000234) \
+ /* Decimal Floating Add */ \
+ V(dadd, DADD, 0xEC000004) \
+ /* Decimal Floating Add Quad */ \
+ V(daddq, DADDQ, 0xFC000004) \
+ /* Decimal Floating Convert From Fixed */ \
+ V(dcffix, DCFFIX, 0xEC000644) \
+ /* Decimal Floating Convert From Fixed Quad */ \
+ V(dcffixq, DCFFIXQ, 0xFC000644) \
+ /* Decimal Floating Compare Ordered */ \
+ V(dcmpo, DCMPO, 0xEC000104) \
+ /* Decimal Floating Compare Ordered Quad */ \
+ V(dcmpoq, DCMPOQ, 0xFC000104) \
+ /* Decimal Floating Compare Unordered */ \
+ V(dcmpu, DCMPU, 0xEC000504) \
+ /* Decimal Floating Compare Unordered Quad */ \
+ V(dcmpuq, DCMPUQ, 0xFC000504) \
+ /* Decimal Floating Convert To DFP Long */ \
+ V(dctdp, DCTDP, 0xEC000204) \
+ /* Decimal Floating Convert To Fixed */ \
+ V(dctfix, DCTFIX, 0xEC000244) \
+ /* Decimal Floating Convert To Fixed Quad */ \
+ V(dctfixq, DCTFIXQ, 0xFC000244) \
+ /* Decimal Floating Convert To DFP Extended */ \
+ V(dctqpq, DCTQPQ, 0xFC000204) \
+ /* Decimal Floating Decode DPD To BCD */ \
+ V(ddedpd, DDEDPD, 0xEC000284) \
+ /* Decimal Floating Decode DPD To BCD Quad */ \
+ V(ddedpdq, DDEDPDQ, 0xFC000284) \
+ /* Decimal Floating Divide */ \
+ V(ddiv, DDIV, 0xEC000444) \
+ /* Decimal Floating Divide Quad */ \
+ V(ddivq, DDIVQ, 0xFC000444) \
+ /* Decimal Floating Encode BCD To DPD */ \
+ V(denbcd, DENBCD, 0xEC000684) \
+ /* Decimal Floating Encode BCD To DPD Quad */ \
+ V(denbcdq, DENBCDQ, 0xFC000684) \
+ /* Decimal Floating Insert Exponent */ \
+ V(diex, DIEX, 0xEC0006C4) \
+ /* Decimal Floating Insert Exponent Quad */ \
+ V(diexq, DIEXQ, 0xFC0006C4) \
+ /* Decimal Floating Multiply */ \
+ V(dmul, DMUL, 0xEC000044) \
+ /* Decimal Floating Multiply Quad */ \
+ V(dmulq, DMULQ, 0xFC000044) \
+ /* Decimal Floating Round To DFP Long */ \
+ V(drdpq, DRDPQ, 0xFC000604) \
+ /* Decimal Floating Round To DFP Short */ \
+ V(drsp, DRSP, 0xEC000604) \
+ /* Decimal Floating Subtract */ \
+ V(dsub, DSUB, 0xEC000404) \
+ /* Decimal Floating Subtract Quad */ \
+ V(dsubq, DSUBQ, 0xFC000404) \
+ /* Decimal Floating Test Exponent */ \
+ V(dtstex, DTSTEX, 0xEC000144) \
+ /* Decimal Floating Test Exponent Quad */ \
+ V(dtstexq, DTSTEXQ, 0xFC000144) \
+ /* Decimal Floating Test Significance */ \
+ V(dtstsf, DTSTSF, 0xEC000544) \
+ /* Decimal Floating Test Significance Quad */ \
+ V(dtstsfq, DTSTSFQ, 0xFC000544) \
+ /* Decimal Floating Extract Exponent */ \
+ V(dxex, DXEX, 0xEC0002C4) \
+ /* Decimal Floating Extract Exponent Quad */ \
+ V(dxexq, DXEXQ, 0xFC0002C4) \
+ /* Decorated Storage Notify */ \
+ V(dsn, DSN, 0x7C0003C6) \
+ /* Load Byte with Decoration Indexed */ \
+ V(lbdx, LBDX, 0x7C000406) \
+ /* Load Doubleword with Decoration Indexed */ \
+ V(lddx, LDDX, 0x7C0004C6) \
+ /* Load Floating Doubleword with Decoration Indexed */ \
+ V(lfddx, LFDDX, 0x7C000646) \
+ /* Load Halfword with Decoration Indexed */ \
+ V(lhdx, LHDX, 0x7C000446) \
+ /* Load Word with Decoration Indexed */ \
+ V(lwdx, LWDX, 0x7C000486) \
+ /* Store Byte with Decoration Indexed */ \
+ V(stbdx, STBDX, 0x7C000506) \
+ /* Store Doubleword with Decoration Indexed */ \
+ V(stddx, STDDX, 0x7C0005C6) \
+ /* Store Floating Doubleword with Decoration Indexed */ \
+ V(stfddx, STFDDX, 0x7C000746) \
+ /* Store Halfword with Decoration Indexed */ \
+ V(sthdx, STHDX, 0x7C000546) \
+ /* Store Word with Decoration Indexed */ \
+ V(stwdx, STWDX, 0x7C000586) \
+ /* Data Cache Block Allocate */ \
+ V(dcba, DCBA, 0x7C0005EC) \
+ /* Data Cache Block Invalidate */ \
+ V(dcbi, DCBI, 0x7C0003AC) \
+ /* Instruction Cache Block Touch */ \
+ V(icbt, ICBT, 0x7C00002C) \
+ /* Memory Barrier */ \
+ V(mbar, MBAR, 0x7C0006AC) \
+ /* Move to Condition Register from XER */ \
+ V(mcrxr, MCRXR, 0x7C000400) \
+ /* TLB Invalidate Local Indexed */ \
+ V(tlbilx, TLBILX, 0x7C000024) \
+ /* TLB Invalidate Virtual Address Indexed */ \
+ V(tlbivax, TLBIVAX, 0x7C000624) \
+ /* TLB Read Entry */ \
+ V(tlbre, TLBRE, 0x7C000764) \
+ /* TLB Search Indexed */ \
+ V(tlbsx, TLBSX, 0x7C000724) \
+ /* TLB Write Entry */ \
+ V(tlbwe, TLBWE, 0x7C0007A4) \
+ /* Write External Enable */ \
+ V(wrtee, WRTEE, 0x7C000106) \
+ /* Write External Enable Immediate */ \
+ V(wrteei, WRTEEI, 0x7C000146) \
+ /* Data Cache Read */ \
+ V(dcread, DCREAD, 0x7C00028C) \
+ /* Instruction Cache Read */ \
+ V(icread, ICREAD, 0x7C0007CC) \
+ /* Data Cache Invalidate */ \
+ V(dci, DCI, 0x7C00038C) \
+ /* Instruction Cache Invalidate */ \
+ V(ici, ICI, 0x7C00078C) \
+ /* Move From Device Control Register User Mode Indexed */ \
+ V(mfdcrux, MFDCRUX, 0x7C000246) \
+ /* Move From Device Control Register Indexed */ \
+ V(mfdcrx, MFDCRX, 0x7C000206) \
+ /* Move To Device Control Register User Mode Indexed */ \
+ V(mtdcrux, MTDCRUX, 0x7C000346) \
+ /* Move To Device Control Register Indexed */ \
+ V(mtdcrx, MTDCRX, 0x7C000306) \
+ /* Return From Debug Interrupt */ \
+ V(rfdi, RFDI, 0x4C00004E) \
+ /* Data Cache Block Flush by External PID */ \
+ V(dcbfep, DCBFEP, 0x7C0000FE) \
+ /* Data Cache Block Store by External PID */ \
+ V(dcbstep, DCBSTEP, 0x7C00007E) \
+ /* Data Cache Block Touch by External PID */ \
+ V(dcbtep, DCBTEP, 0x7C00027E) \
+ /* Data Cache Block Touch for Store by External PID */ \
+ V(dcbtstep, DCBTSTEP, 0x7C0001FE) \
+ /* Data Cache Block Zero by External PID */ \
+ V(dcbzep, DCBZEP, 0x7C0007FE) \
+ /* Instruction Cache Block Invalidate by External PID */ \
+ V(icbiep, ICBIEP, 0x7C0007BE) \
+ /* Load Byte and Zero by External PID Indexed */ \
+ V(lbepx, LBEPX, 0x7C0000BE) \
+ /* Load Floating-Point Double by External PID Indexed */ \
+ V(lfdepx, LFDEPX, 0x7C0004BE) \
+ /* Load Halfword and Zero by External PID Indexed */ \
+ V(lhepx, LHEPX, 0x7C00023E) \
+ /* Load Vector by External PID Indexed */ \
+ V(lvepx, LVEPX, 0x7C00024E) \
+ /* Load Vector by External PID Indexed Last */ \
+ V(lvepxl, LVEPXL, 0x7C00020E) \
+ /* Load Word and Zero by External PID Indexed */ \
+ V(lwepx, LWEPX, 0x7C00003E) \
+ /* Store Byte by External PID Indexed */ \
+ V(stbepx, STBEPX, 0x7C0001BE) \
+ /* Store Floating-Point Double by External PID Indexed */ \
+ V(stfdepx, STFDEPX, 0x7C0005BE) \
+ /* Store Halfword by External PID Indexed */ \
+ V(sthepx, STHEPX, 0x7C00033E) \
+ /* Store Vector by External PID Indexed */ \
+ V(stvepx, STVEPX, 0x7C00064E) \
+ /* Store Vector by External PID Indexed Last */ \
+ V(stvepxl, STVEPXL, 0x7C00060E) \
+ /* Store Word by External PID Indexed */ \
+ V(stwepx, STWEPX, 0x7C00013E) \
+ /* Load Doubleword by External PID Indexed */ \
+ V(ldepx, LDEPX, 0x7C00003A) \
+ /* Store Doubleword by External PID Indexed */ \
+ V(stdepx, STDEPX, 0x7C00013A) \
+ /* TLB Search and Reserve Indexed */ \
+ V(tlbsrx, TLBSRX, 0x7C0006A5) \
+ /* External Control In Word Indexed */ \
+ V(eciwx, ECIWX, 0x7C00026C) \
+ /* External Control Out Word Indexed */ \
+ V(ecowx, ECOWX, 0x7C00036C) \
+ /* Data Cache Block Lock Clear */ \
+ V(dcblc, DCBLC, 0x7C00030C) \
+ /* Data Cache Block Lock Query */ \
+ V(dcblq, DCBLQ, 0x7C00034D) \
+ /* Data Cache Block Touch and Lock Set */ \
+ V(dcbtls, DCBTLS, 0x7C00014C) \
+ /* Data Cache Block Touch for Store and Lock Set */ \
+ V(dcbtstls, DCBTSTLS, 0x7C00010C) \
+ /* Instruction Cache Block Lock Clear */ \
+ V(icblc, ICBLC, 0x7C0001CC) \
+ /* Instruction Cache Block Lock Query */ \
+ V(icblq, ICBLQ, 0x7C00018D) \
+ /* Instruction Cache Block Touch and Lock Set */ \
+ V(icbtls, ICBTLS, 0x7C0003CC) \
+ /* Floating Compare Ordered */ \
+ V(fcmpo, FCMPO, 0xFC000040) \
+ /* Floating Compare Unordered */ \
+ V(fcmpu, FCMPU, 0xFC000000) \
+ /* Floating Test for software Divide */ \
+ V(ftdiv, FTDIV, 0xFC000100) \
+ /* Floating Test for software Square Root */ \
+ V(ftsqrt, FTSQRT, 0xFC000140) \
+ /* Load Floating-Point Double with Update Indexed */ \
+ V(lfdux, LFDUX, 0x7C0004EE) \
+ /* Load Floating-Point Double Indexed */ \
+ V(lfdx, LFDX, 0x7C0004AE) \
+ /* Load Floating-Point as Integer Word Algebraic Indexed */ \
+ V(lfiwax, LFIWAX, 0x7C0006AE) \
+ /* Load Floating-Point as Integer Word and Zero Indexed */ \
+ V(lfiwzx, LFIWZX, 0x7C0006EE) \
+ /* Load Floating-Point Single with Update Indexed */ \
+ V(lfsux, LFSUX, 0x7C00046E) \
+ /* Load Floating-Point Single Indexed */ \
+ V(lfsx, LFSX, 0x7C00042E) \
+ /* Move To Condition Register from FPSCR */ \
+ V(mcrfs, MCRFS, 0xFC000080) \
+ /* Store Floating-Point Double with Update Indexed */ \
+ V(stfdux, STFDUX, 0x7C0005EE) \
+ /* Store Floating-Point Double Indexed */ \
+ V(stfdx, STFDX, 0x7C0005AE) \
+ /* Store Floating-Point as Integer Word Indexed */ \
+ V(stfiwx, STFIWX, 0x7C0007AE) \
+ /* Store Floating-Point Single with Update Indexed */ \
+ V(stfsux, STFSUX, 0x7C00056E) \
+ /* Store Floating-Point Single Indexed */ \
+ V(stfsx, STFSX, 0x7C00052E) \
+ /* Load Floating-Point Double Pair Indexed */ \
+ V(lfdpx, LFDPX, 0x7C00062E) \
+ /* Store Floating-Point Double Pair Indexed */ \
+ V(stfdpx, STFDPX, 0x7C00072E) \
+ /* Floating Absolute Value */ \
+ V(fabs, FABS, 0xFC000210) \
+ /* Floating Convert From Integer Doubleword */ \
+ V(fcfid, FCFID, 0xFC00069C) \
+ /* Floating Convert From Integer Doubleword Single */ \
+ V(fcfids, FCFIDS, 0xEC00069C) \
+ /* Floating Convert From Integer Doubleword Unsigned */ \
+ V(fcfidu, FCFIDU, 0xFC00079C) \
+ /* Floating Convert From Integer Doubleword Unsigned Single */ \
+ V(fcfidus, FCFIDUS, 0xEC00079C) \
+ /* Floating Copy Sign */ \
+ V(fcpsgn, FCPSGN, 0xFC000010) \
+ /* Floating Convert To Integer Doubleword */ \
+ V(fctid, FCTID, 0xFC00065C) \
+ /* Floating Convert To Integer Doubleword Unsigned */ \
+ V(fctidu, FCTIDU, 0xFC00075C) \
+ /* Floating Convert To Integer Doubleword Unsigned with round toward */ \
+ /* Zero */ \
+ V(fctiduz, FCTIDUZ, 0xFC00075E) \
+ /* Floating Convert To Integer Doubleword with round toward Zero */ \
+ V(fctidz, FCTIDZ, 0xFC00065E) \
+ /* Floating Convert To Integer Word */ \
+ V(fctiw, FCTIW, 0xFC00001C) \
+ /* Floating Convert To Integer Word Unsigned */ \
+ V(fctiwu, FCTIWU, 0xFC00011C) \
+ /* Floating Convert To Integer Word Unsigned with round toward Zero */ \
+ V(fctiwuz, FCTIWUZ, 0xFC00011E) \
+ /* Floating Convert To Integer Word with round to Zero */ \
+ V(fctiwz, FCTIWZ, 0xFC00001E) \
+ /* Floating Move Register */ \
+ V(fmr, FMR, 0xFC000090) \
+ /* Floating Negative Absolute Value */ \
+ V(fnabs, FNABS, 0xFC000110) \
+ /* Floating Negate */ \
+ V(fneg, FNEG, 0xFC000050) \
+ /* Floating Round to Single-Precision */ \
+ V(frsp, FRSP, 0xFC000018) \
+ /* Move From FPSCR */ \
+ V(mffs, MFFS, 0xFC00048E) \
+ /* Move To FPSCR Bit 0 */ \
+ V(mtfsb0, MTFSB0, 0xFC00008C) \
+ /* Move To FPSCR Bit 1 */ \
+ V(mtfsb1, MTFSB1, 0xFC00004C) \
+ /* Move To FPSCR Field Immediate */ \
+ V(mtfsfi, MTFSFI, 0xFC00010C) \
+ /* Floating Round To Integer Minus */ \
+ V(frim, FRIM, 0xFC0003D0) \
+ /* Floating Round To Integer Nearest */ \
+ V(frin, FRIN, 0xFC000310) \
+ /* Floating Round To Integer Plus */ \
+ V(frip, FRIP, 0xFC000390) \
+ /* Floating Round To Integer toward Zero */ \
+ V(friz, FRIZ, 0xFC000350) \
+ /* Multiply Cross Halfword to Word Signed */ \
+ V(mulchw, MULCHW, 0x10000150) \
+ /* Multiply Cross Halfword to Word Unsigned */ \
+ V(mulchwu, MULCHWU, 0x10000110) \
+ /* Multiply High Halfword to Word Signed */ \
+ V(mulhhw, MULHHW, 0x10000050) \
+ /* Multiply High Halfword to Word Unsigned */ \
+ V(mulhhwu, MULHHWU, 0x10000010) \
+ /* Multiply Low Halfword to Word Signed */ \
+ V(mullhw, MULLHW, 0x10000350) \
+ /* Multiply Low Halfword to Word Unsigned */ \
+ V(mullhwu, MULLHWU, 0x10000310) \
+ /* Determine Leftmost Zero Byte DQ 56 E0000000 P 58 LSQ lq Load Quadword */ \
+ V(dlmzb, DLMZB, 0x7C00009C) \
+ /* Load Quadword And Reserve Indexed */ \
+ V(lqarx, LQARX, 0x7C000228) \
+ /* Store Quadword Conditional Indexed and record CR0 */ \
+ V(stqcx, STQCX, 0x7C00016D) \
+ /* Load String Word Immediate */ \
+ V(lswi, LSWI, 0x7C0004AA) \
+ /* Load String Word Indexed */ \
+ V(lswx, LSWX, 0x7C00042A) \
+ /* Store String Word Immediate */ \
+ V(stswi, STSWI, 0x7C0005AA) \
+ /* Store String Word Indexed */ \
+ V(stswx, STSWX, 0x7C00052A) \
+ /* Clear BHRB */ \
+ V(clrbhrb, CLRBHRB, 0x7C00035C) \
+ /* Enforce In-order Execution of I/O */ \
+ V(eieio, EIEIO, 0x7C0006AC) \
+ /* Load Byte and Zero Caching Inhibited Indexed */ \
+ V(lbzcix, LBZCIX, 0x7C0006AA) \
+ /* Load Doubleword Caching Inhibited Indexed */ \
+ V(ldcix, LDCIX, 0x7C0006EA) \
+ /* Load Halfword and Zero Caching Inhibited Indexed */ \
+ V(lhzcix, LHZCIX, 0x7C00066A) \
+ /* Load Word and Zero Caching Inhibited Indexed */ \
+ V(lwzcix, LWZCIX, 0x7C00062A) \
+ /* Move From Segment Register */ \
+ V(mfsr, MFSR, 0x7C0004A6) \
+ /* Move From Segment Register Indirect */ \
+ V(mfsrin, MFSRIN, 0x7C000526) \
+ /* Move To Machine State Register Doubleword */ \
+ V(mtmsrd, MTMSRD, 0x7C000164) \
+ /* Move To Split Little Endian */ \
+ V(mtsle, MTSLE, 0x7C000126) \
+ /* Move To Segment Register */ \
+ V(mtsr, MTSR, 0x7C0001A4) \
+ /* Move To Segment Register Indirect */ \
+ V(mtsrin, MTSRIN, 0x7C0001E4) \
+ /* SLB Find Entry ESID */ \
+ V(slbfee, SLBFEE, 0x7C0007A7) \
+ /* SLB Invalidate All */ \
+ V(slbia, SLBIA, 0x7C0003E4) \
+ /* SLB Invalidate Entry */ \
+ V(slbie, SLBIE, 0x7C000364) \
+ /* SLB Move From Entry ESID */ \
+ V(slbmfee, SLBMFEE, 0x7C000726) \
+ /* SLB Move From Entry VSID */ \
+ V(slbmfev, SLBMFEV, 0x7C0006A6) \
+ /* SLB Move To Entry */ \
+ V(slbmte, SLBMTE, 0x7C000324) \
+ /* Store Byte Caching Inhibited Indexed */ \
+ V(stbcix, STBCIX, 0x7C0007AA) \
+ /* Store Doubleword Caching Inhibited Indexed */ \
+ V(stdcix, STDCIX, 0x7C0007EA) \
+ /* Store Halfword and Zero Caching Inhibited Indexed */ \
+ V(sthcix, STHCIX, 0x7C00076A) \
+ /* Store Word and Zero Caching Inhibited Indexed */ \
+ V(stwcix, STWCIX, 0x7C00072A) \
+ /* TLB Invalidate All */ \
+ V(tlbia, TLBIA, 0x7C0002E4) \
+ /* TLB Invalidate Entry */ \
+ V(tlbie, TLBIE, 0x7C000264) \
+ /* TLB Invalidate Entry Local */ \
+ V(tlbiel, TLBIEL, 0x7C000224) \
+ /* Message Clear Privileged */ \
+ V(msgclrp, MSGCLRP, 0x7C00015C) \
+ /* Message Send Privileged */ \
+ V(msgsndp, MSGSNDP, 0x7C00011C) \
+ /* Message Clear */ \
+ V(msgclr, MSGCLR, 0x7C0001DC) \
+ /* Message Send */ \
+ V(msgsnd, MSGSND, 0x7C00019C) \
+ /* Move From Machine State Register */ \
+ V(mfmsr, MFMSR, 0x7C0000A6) \
+ /* Move To Machine State Register */ \
+ V(mtmsr, MTMSR, 0x7C000124) \
+ /* TLB Synchronize */ \
+ V(tlbsync, TLBSYNC, 0x7C00046C) \
+ /* Transaction Abort */ \
+ V(tabort, TABORT, 0x7C00071D) \
+ /* Transaction Abort Doubleword Conditional */ \
+ V(tabortdc, TABORTDC, 0x7C00065D) \
+ /* Transaction Abort Doubleword Conditional Immediate */ \
+ V(tabortdci, TABORTDCI, 0x7C0006DD) \
+ /* Transaction Abort Word Conditional */ \
+ V(tabortwc, TABORTWC, 0x7C00061D) \
+ /* Transaction Abort Word Conditional Immediate */ \
+ V(tabortwci, TABORTWCI, 0x7C00069D) \
+ /* Transaction Begin */ \
+ V(tbegin, TBEGIN, 0x7C00051D) \
+ /* Transaction Check */ \
+ V(tcheck, TCHECK, 0x7C00059C) \
+ /* Transaction End */ \
+ V(tend, TEND, 0x7C00055C) \
+ /* Transaction Recheckpoint */ \
+ V(trechkpt, TRECHKPT, 0x7C0007DD) \
+ /* Transaction Reclaim */ \
+ V(treclaim, TRECLAIM, 0x7C00075D) \
+ /* Transaction Suspend or Resume */ \
+ V(tsr, TSR, 0x7C0005DC) \
+ /* Load Vector Element Byte Indexed */ \
+ V(lvebx, LVEBX, 0x7C00000E) \
+ /* Load Vector Element Halfword Indexed */ \
+ V(lvehx, LVEHX, 0x7C00004E) \
+ /* Load Vector Element Word Indexed */ \
+ V(lvewx, LVEWX, 0x7C00008E) \
+ /* Load Vector for Shift Left */ \
+ V(lvsl, LVSL, 0x7C00000C) \
+ /* Load Vector for Shift Right */ \
+ V(lvsr, LVSR, 0x7C00004C) \
+ /* Load Vector Indexed */ \
+ V(lvx, LVX, 0x7C0000CE) \
+ /* Load Vector Indexed Last */ \
+ V(lvxl, LVXL, 0x7C0002CE) \
+ /* Store Vector Element Byte Indexed */ \
+ V(stvebx, STVEBX, 0x7C00010E) \
+ /* Store Vector Element Halfword Indexed */ \
+ V(stvehx, STVEHX, 0x7C00014E) \
+ /* Store Vector Element Word Indexed */ \
+ V(stvewx, STVEWX, 0x7C00018E) \
+ /* Store Vector Indexed */ \
+ V(stvx, STVX, 0x7C0001CE) \
+ /* Store Vector Indexed Last */ \
+ V(stvxl, STVXL, 0x7C0003CE) \
+ /* Vector Minimum Signed Doubleword */ \
+ V(vminsd, VMINSD, 0x100003C2) \
+ /* Floating Merge Even Word */ \
+ V(fmrgew, FMRGEW, 0xFC00078C) \
+ /* Floating Merge Odd Word */ \
+ V(fmrgow, FMRGOW, 0xFC00068C) \
+ /* Wait for Interrupt */ \
+ V(wait, WAIT, 0x7C00007C)
+
+#define PPC_EVS_OPCODE_LIST(V) \
+ /* Vector Select */ \
+ V(evsel, EVSEL, 0x10000278)
+
+#define PPC_DS_OPCODE_LIST(V) \
+ /* Load Doubleword */ \
+ V(ld, LD, 0xE8000000) \
+ /* Load Doubleword with Update */ \
+ V(ldu, LDU, 0xE8000001) \
+ /* Load Word Algebraic */ \
+ V(lwa, LWA, 0xE8000002) \
+ /* Store Doubleword */ \
+ V(std, STD, 0xF8000000) \
+ /* Store Doubleword with Update */ \
+ V(stdu, STDU, 0xF8000001) \
+ /* Load Floating-Point Double Pair */ \
+ V(lfdp, LFDP, 0xE4000000) \
+ /* Store Floating-Point Double Pair */ \
+ V(stfdp, STFDP, 0xF4000000) \
+ /* Store Quadword */ \
+ V(stq, STQ, 0xF8000002)
+
+#define PPC_D_OPCODE_LIST(V) \
+ /* Trap Doubleword Immediate */ \
+ V(tdi, TDI, 0x08000000) \
+ /* Add Immediate */ \
+ V(addi, ADDI, 0x38000000) \
+ /* Add Immediate Carrying */ \
+ V(addic, ADDIC, 0x30000000) \
+ /* Add Immediate Carrying & record CR0 */ \
+ V(addicx, ADDICx, 0x34000000) \
+ /* Add Immediate Shifted */ \
+ V(addis, ADDIS, 0x3C000000) \
+ /* AND Immediate & record CR0 */ \
+ V(andix, ANDIx, 0x70000000) \
+ /* AND Immediate Shifted & record CR0 */ \
+ V(andisx, ANDISx, 0x74000000) \
+ /* Compare Immediate */ \
+ V(cmpi, CMPI, 0x2C000000) \
+ /* Compare Logical Immediate */ \
+ V(cmpli, CMPLI, 0x28000000) \
+ /* Load Byte and Zero */ \
+ V(lbz, LBZ, 0x88000000) \
+ /* Load Byte and Zero with Update */ \
+ V(lbzu, LBZU, 0x8C000000) \
+ /* Load Halfword Algebraic */ \
+ V(lha, LHA, 0xA8000000) \
+ /* Load Halfword Algebraic with Update */ \
+ V(lhau, LHAU, 0xAC000000) \
+ /* Load Halfword and Zero */ \
+ V(lhz, LHZ, 0xA0000000) \
+ /* Load Halfword and Zero with Update */ \
+ V(lhzu, LHZU, 0xA4000000) \
+ /* Load Multiple Word */ \
+ V(lmw, LMW, 0xB8000000) \
+ /* Load Word and Zero */ \
+ V(lwz, LWZ, 0x80000000) \
+ /* Load Word and Zero with Update */ \
+ V(lwzu, LWZU, 0x84000000) \
+ /* Multiply Low Immediate */ \
+ V(mulli, MULLI, 0x1C000000) \
+ /* OR Immediate */ \
+ V(ori, ORI, 0x60000000) \
+ /* OR Immediate Shifted */ \
+ V(oris, ORIS, 0x64000000) \
+ /* Store Byte */ \
+ V(stb, STB, 0x98000000) \
+ /* Store Byte with Update */ \
+ V(stbu, STBU, 0x9C000000) \
+ /* Store Halfword */ \
+ V(sth, STH, 0xB0000000) \
+ /* Store Halfword with Update */ \
+ V(sthu, STHU, 0xB4000000) \
+ /* Store Multiple Word */ \
+ V(stmw, STMW, 0xBC000000) \
+ /* Store Word */ \
+ V(stw, STW, 0x90000000) \
+ /* Store Word with Update */ \
+ V(stwu, STWU, 0x94000000) \
+ /* Subtract From Immediate Carrying */ \
+ V(subfic, SUBFIC, 0x20000000) \
+ /* Trap Word Immediate */ \
+ V(twi, TWI, 0x0C000000) \
+ /* XOR Immediate */ \
+ V(xori, XORI, 0x68000000) \
+ /* XOR Immediate Shifted */ \
+ V(xoris, XORIS, 0x6C000000) \
+ /* Load Floating-Point Double */ \
+ V(lfd, LFD, 0xC8000000) \
+ /* Load Floating-Point Double with Update */ \
+ V(lfdu, LFDU, 0xCC000000) \
+ /* Load Floating-Point Single */ \
+ V(lfs, LFS, 0xC0000000) \
+ /* Load Floating-Point Single with Update */ \
+ V(lfsu, LFSU, 0xC4000000) \
+ /* Store Floating-Point Double */ \
+ V(stfd, STFD, 0xD8000000) \
+ /* Store Floating-Point Double with Update */ \
+ V(stfdu, STFDU, 0xDC000000) \
+ /* Store Floating-Point Single */ \
+ V(stfs, STFS, 0xD0000000) \
+ /* Store Floating-Point Single with Update */ \
+ V(stfsu, STFSU, 0xD4000000)
+
+#define PPC_XFL_OPCODE_LIST(V) \
+ /* Move To FPSCR Fields */ \
+ V(mtfsf, MTFSF, 0xFC00058E)
+
+#define PPC_XFX_OPCODE_LIST(V) \
+ /* Move From Condition Register */ \
+ V(mfcr, MFCR, 0x7C000026) \
+ /* Move From One Condition Register Field */ \
+ V(mfocrf, MFOCRF, 0x7C100026) \
+ /* Move From Special Purpose Register */ \
+ V(mfspr, MFSPR, 0x7C0002A6) \
+ /* Move To Condition Register Fields */ \
+ V(mtcrf, MTCRF, 0x7C000120) \
+ /* Move To One Condition Register Field */ \
+ V(mtocrf, MTOCRF, 0x7C100120) \
+ /* Move To Special Purpose Register */ \
+ V(mtspr, MTSPR, 0x7C0003A6) \
+ /* Debugger Notify Halt */ \
+ V(dnh, DNH, 0x4C00018C) \
+ /* Move From Device Control Register */ \
+ V(mfdcr, MFDCR, 0x7C000286) \
+ /* Move To Device Control Register */ \
+ V(mtdcr, MTDCR, 0x7C000386) \
+ /* Move from Performance Monitor Register */ \
+ V(mfpmr, MFPMR, 0x7C00029C) \
+ /* Move To Performance Monitor Register */ \
+ V(mtpmr, MTPMR, 0x7C00039C) \
+ /* Move From Branch History Rolling Buffer */ \
+ V(mfbhrbe, MFBHRBE, 0x7C00025C) \
+ /* Move From Time Base */ \
+ V(mftb, MFTB, 0x7C0002E6)
+
+#define PPC_MDS_OPCODE_LIST(V) \
+ /* Rotate Left Doubleword then Clear Left */ \
+ V(rldcl, RLDCL, 0x78000010) \
+ /* Rotate Left Doubleword then Clear Right */ \
+ V(rldcr, RLDCR, 0x78000012)
+
+#define PPC_A_OPCODE_LIST(V) \
+ /* Integer Select */ \
+ V(isel, ISEL, 0x7C00001E) \
+ /* Floating Add */ \
+ V(fadd, FADD, 0xFC00002A) \
+ /* Floating Add Single */ \
+ V(fadds, FADDS, 0xEC00002A) \
+ /* Floating Divide */ \
+ V(fdiv, FDIV, 0xFC000024) \
+ /* Floating Divide Single */ \
+ V(fdivs, FDIVS, 0xEC000024) \
+ /* Floating Multiply-Add */ \
+ V(fmadd, FMADD, 0xFC00003A) \
+ /* Floating Multiply-Add Single */ \
+ V(fmadds, FMADDS, 0xEC00003A) \
+ /* Floating Multiply-Subtract */ \
+ V(fmsub, FMSUB, 0xFC000038) \
+ /* Floating Multiply-Subtract Single */ \
+ V(fmsubs, FMSUBS, 0xEC000038) \
+ /* Floating Multiply */ \
+ V(fmul, FMUL, 0xFC000032) \
+ /* Floating Multiply Single */ \
+ V(fmuls, FMULS, 0xEC000032) \
+ /* Floating Negative Multiply-Add */ \
+ V(fnmadd, FNMADD, 0xFC00003E) \
+ /* Floating Negative Multiply-Add Single */ \
+ V(fnmadds, FNMADDS, 0xEC00003E) \
+ /* Floating Negative Multiply-Subtract */ \
+ V(fnmsub, FNMSUB, 0xFC00003C) \
+ /* Floating Negative Multiply-Subtract Single */ \
+ V(fnmsubs, FNMSUBS, 0xEC00003C) \
+ /* Floating Reciprocal Estimate Single */ \
+ V(fres, FRES, 0xEC000030) \
+ /* Floating Reciprocal Square Root Estimate */ \
+ V(frsqrte, FRSQRTE, 0xFC000034) \
+ /* Floating Select */ \
+ V(fsel, FSEL, 0xFC00002E) \
+ /* Floating Square Root */ \
+ V(fsqrt, FSQRT, 0xFC00002C) \
+ /* Floating Square Root Single */ \
+ V(fsqrts, FSQRTS, 0xEC00002C) \
+ /* Floating Subtract */ \
+ V(fsub, FSUB, 0xFC000028) \
+ /* Floating Subtract Single */ \
+ V(fsubs, FSUBS, 0xEC000028) \
+ /* Floating Reciprocal Estimate */ \
+ V(fre, FRE, 0xFC000030) \
+ /* Floating Reciprocal Square Root Estimate Single */ \
+ V(frsqrtes, FRSQRTES, 0xEC000034)
+
+#define PPC_VA_OPCODE_LIST(V) \
+ /* Vector Add Extended & write Carry Unsigned Quadword */ \
+ V(vaddecuq, VADDECUQ, 0x1000003D) \
+ /* Vector Add Extended Unsigned Quadword Modulo */ \
+ V(vaddeuqm, VADDEUQM, 0x1000003C) \
+ /* Vector Multiply-Add Single-Precision */ \
+ V(vmaddfp, VMADDFP, 0x1000002E) \
+ /* Vector Multiply-High-Add Signed Halfword Saturate */ \
+ V(vmhaddshs, VMHADDSHS, 0x10000020) \
+ /* Vector Multiply-High-Round-Add Signed Halfword Saturate */ \
+ V(vmhraddshs, VMHRADDSHS, 0x10000021) \
+ /* Vector Multiply-Low-Add Unsigned Halfword Modulo */ \
+ V(vmladduhm, VMLADDUHM, 0x10000022) \
+ /* Vector Multiply-Sum Mixed Byte Modulo */ \
+ V(vmsummbm, VMSUMMBM, 0x10000025) \
+ /* Vector Multiply-Sum Signed Halfword Modulo */ \
+ V(vmsumshm, VMSUMSHM, 0x10000028) \
+ /* Vector Multiply-Sum Signed Halfword Saturate */ \
+ V(vmsumshs, VMSUMSHS, 0x10000029) \
+ /* Vector Multiply-Sum Unsigned Byte Modulo */ \
+ V(vmsumubm, VMSUMUBM, 0x10000024) \
+ /* Vector Multiply-Sum Unsigned Halfword Modulo */ \
+ V(vmsumuhm, VMSUMUHM, 0x10000026) \
+ /* Vector Multiply-Sum Unsigned Halfword Saturate */ \
+ V(vmsumuhs, VMSUMUHS, 0x10000027) \
+ /* Vector Negative Multiply-Subtract Single-Precision */ \
+ V(vnmsubfp, VNMSUBFP, 0x1000002F) \
+ /* Vector Permute */ \
+ V(vperm, VPERM, 0x1000002B) \
+ /* Vector Select */ \
+ V(vsel, VSEL, 0x1000002A) \
+ /* Vector Shift Left Double by Octet Immediate */ \
+ V(vsldoi, VSLDOI, 0x1000002C) \
+ /* Vector Subtract Extended & write Carry Unsigned Quadword */ \
+ V(vsubecuq, VSUBECUQ, 0x1000003F) \
+ /* Vector Subtract Extended Unsigned Quadword Modulo */ \
+ V(vsubeuqm, VSUBEUQM, 0x1000003E) \
+ /* Vector Permute and Exclusive-OR */ \
+ V(vpermxor, VPERMXOR, 0x1000002D)
+
+#define PPC_XX1_OPCODE_LIST(V) \
+ /* Load VSR Scalar Doubleword Indexed */ \
+ V(lxsdx, LXSDX, 0x7C000498) \
+ /* Load VSX Scalar as Integer Word Algebraic Indexed */ \
+ V(lxsiwax, LXSIWAX, 0x7C000098) \
+ /* Load VSX Scalar as Integer Word and Zero Indexed */ \
+ V(lxsiwzx, LXSIWZX, 0x7C000018) \
+ /* Load VSX Scalar Single-Precision Indexed */ \
+ V(lxsspx, LXSSPX, 0x7C000418) \
+ /* Load VSR Vector Doubleword*2 Indexed */ \
+ V(lxvd, LXVD, 0x7C000698) \
+ /* Load VSR Vector Doubleword & Splat Indexed */ \
+ V(lxvdsx, LXVDSX, 0x7C000298) \
+ /* Load VSR Vector Word*4 Indexed */ \
+ V(lxvw, LXVW, 0x7C000618) \
+ /* Move From VSR Doubleword */ \
+ V(mfvsrd, MFVSRD, 0x7C000066) \
+ /* Move From VSR Word and Zero */ \
+ V(mfvsrwz, MFVSRWZ, 0x7C0000E6) \
+ /* Store VSR Scalar Doubleword Indexed */ \
+ V(stxsdx, STXSDX, 0x7C000598) \
+ /* Store VSX Scalar as Integer Word Indexed */ \
+ V(stxsiwx, STXSIWX, 0x7C000118) \
+ /* Store VSR Scalar Word Indexed */ \
+ V(stxsspx, STXSSPX, 0x7C000518) \
+ /* Store VSR Vector Doubleword*2 Indexed */ \
+ V(stxvd, STXVD, 0x7C000798) \
+ /* Store VSR Vector Word*4 Indexed */ \
+ V(stxvw, STXVW, 0x7C000718)
+
+#define PPC_B_OPCODE_LIST(V) \
+ /* Branch Conditional */ \
+ V(bc, BCX, 0x40000000)
+
+#define PPC_XO_OPCODE_LIST(V) \
+ /* Divide Doubleword */ \
+ V(divd, DIVD, 0x7C0003D2) \
+ /* Divide Doubleword Extended */ \
+ V(divde, DIVDE, 0x7C000352) \
+ /* Divide Doubleword Extended & record OV */ \
+ V(divdeo, DIVDEO, 0x7C000752) \
+ /* Divide Doubleword Extended Unsigned */ \
+ V(divdeu, DIVDEU, 0x7C000312) \
+ /* Divide Doubleword Extended Unsigned & record OV */ \
+ V(divdeuo, DIVDEUO, 0x7C000712) \
+ /* Divide Doubleword & record OV */ \
+ V(divdo, DIVDO, 0x7C0007D2) \
+ /* Divide Doubleword Unsigned */ \
+ V(divdu, DIVDU, 0x7C000392) \
+ /* Divide Doubleword Unsigned & record OV */ \
+ V(divduo, DIVDUO, 0x7C000792) \
+ /* Multiply High Doubleword */ \
+ V(mulhd, MULHD, 0x7C000092) \
+ /* Multiply High Doubleword Unsigned */ \
+ V(mulhdu, MULHDU, 0x7C000012) \
+ /* Multiply Low Doubleword */ \
+ V(mulld, MULLD, 0x7C0001D2) \
+ /* Multiply Low Doubleword & record OV */ \
+ V(mulldo, MULLDO, 0x7C0005D2) \
+ /* Add */ \
+ V(add, ADDX, 0x7C000214) \
+ /* Add Carrying */ \
+ V(addc, ADDCX, 0x7C000014) \
+ /* Add Carrying & record OV */ \
+ V(addco, ADDCO, 0x7C000414) \
+ /* Add Extended */ \
+ V(adde, ADDEX, 0x7C000114) \
+ /* Add Extended & record OV & record OV */ \
+ V(addeo, ADDEO, 0x7C000514) \
+ /* Add to Minus One Extended */ \
+ V(addme, ADDME, 0x7C0001D4) \
+ /* Add to Minus One Extended & record OV */ \
+ V(addmeo, ADDMEO, 0x7C0005D4) \
+ /* Add & record OV */ \
+ V(addo, ADDO, 0x7C000614) \
+ /* Add to Zero Extended */ \
+ V(addze, ADDZEX, 0x7C000194) \
+ /* Add to Zero Extended & record OV */ \
+ V(addzeo, ADDZEO, 0x7C000594) \
+ /* Divide Word Format */ \
+ V(divw, DIVW, 0x7C0003D6) \
+ /* Divide Word Extended */ \
+ V(divwe, DIVWE, 0x7C000356) \
+ /* Divide Word Extended & record OV */ \
+ V(divweo, DIVWEO, 0x7C000756) \
+ /* Divide Word Extended Unsigned */ \
+ V(divweu, DIVWEU, 0x7C000316) \
+ /* Divide Word Extended Unsigned & record OV */ \
+ V(divweuo, DIVWEUO, 0x7C000716) \
+ /* Divide Word & record OV */ \
+ V(divwo, DIVWO, 0x7C0007D6) \
+ /* Divide Word Unsigned */ \
+ V(divwu, DIVWU, 0x7C000396) \
+ /* Divide Word Unsigned & record OV */ \
+ V(divwuo, DIVWUO, 0x7C000796) \
+ /* Multiply High Word */ \
+ V(mulhw, MULHWX, 0x7C000096) \
+ /* Multiply High Word Unsigned */ \
+ V(mulhwu, MULHWUX, 0x7C000016) \
+ /* Multiply Low Word */ \
+ V(mullw, MULLW, 0x7C0001D6) \
+ /* Multiply Low Word & record OV */ \
+ V(mullwo, MULLWO, 0x7C0005D6) \
+ /* Negate */ \
+ V(neg, NEGX, 0x7C0000D0) \
+ /* Negate & record OV */ \
+ V(nego, NEGO, 0x7C0004D0) \
+ /* Subtract From */ \
+ V(subf, SUBFX, 0x7C000050) \
+ /* Subtract From Carrying */ \
+ V(subfc, SUBFCX, 0x7C000010) \
+ /* Subtract From Carrying & record OV */ \
+ V(subfco, SUBFCO, 0x7C000410) \
+ /* Subtract From Extended */ \
+ V(subfe, SUBFEX, 0x7C000110) \
+ /* Subtract From Extended & record OV */ \
+ V(subfeo, SUBFEO, 0x7C000510) \
+ /* Subtract From Minus One Extended */ \
+ V(subfme, SUBFME, 0x7C0001D0) \
+ /* Subtract From Minus One Extended & record OV */ \
+ V(subfmeo, SUBFMEO, 0x7C0005D0) \
+ /* Subtract From & record OV */ \
+ V(subfo, SUBFO, 0x7C000450) \
+ /* Subtract From Zero Extended */ \
+ V(subfze, SUBFZE, 0x7C000190) \
+ /* Subtract From Zero Extended & record OV */ \
+ V(subfzeo, SUBFZEO, 0x7C000590) \
+ /* Add and Generate Sixes */ \
+ V(addg, ADDG, 0x7C000094) \
+ /* Multiply Accumulate Cross Halfword to Word Modulo Signed */ \
+ V(macchw, MACCHW, 0x10000158) \
+ /* Multiply Accumulate Cross Halfword to Word Modulo Signed & record OV */ \
+ V(macchwo, MACCHWO, 0x10000158) \
+ /* Multiply Accumulate Cross Halfword to Word Saturate Signed */ \
+ V(macchws, MACCHWS, 0x100001D8) \
+ /* Multiply Accumulate Cross Halfword to Word Saturate Signed & record */ \
+ /* OV */ \
+ V(macchwso, MACCHWSO, 0x100001D8) \
+ /* Multiply Accumulate Cross Halfword to Word Saturate Unsigned */ \
+ V(macchwsu, MACCHWSU, 0x10000198) \
+ /* Multiply Accumulate Cross Halfword to Word Saturate Unsigned & record */ \
+ /* OV */ \
+ V(macchwsuo, MACCHWSUO, 0x10000198) \
+ /* Multiply Accumulate Cross Halfword to Word Modulo Unsigned */ \
+ V(macchwu, MACCHWU, 0x10000118) \
+ /* Multiply Accumulate Cross Halfword to Word Modulo Unsigned & record */ \
+ /* OV */ \
+ V(macchwuo, MACCHWUO, 0x10000118) \
+ /* Multiply Accumulate High Halfword to Word Modulo Signed */ \
+ V(machhw, MACHHW, 0x10000058) \
+ /* Multiply Accumulate High Halfword to Word Modulo Signed & record OV */ \
+ V(machhwo, MACHHWO, 0x10000058) \
+ /* Multiply Accumulate High Halfword to Word Saturate Signed */ \
+ V(machhws, MACHHWS, 0x100000D8) \
+ /* Multiply Accumulate High Halfword to Word Saturate Signed & record OV */ \
+ V(machhwso, MACHHWSO, 0x100000D8) \
+ /* Multiply Accumulate High Halfword to Word Saturate Unsigned */ \
+ V(machhwsu, MACHHWSU, 0x10000098) \
+ /* Multiply Accumulate High Halfword to Word Saturate Unsigned & record */ \
+ /* OV */ \
+ V(machhwsuo, MACHHWSUO, 0x10000098) \
+ /* Multiply Accumulate High Halfword to Word Modulo Unsigned */ \
+ V(machhwu, MACHHWU, 0x10000018) \
+ /* Multiply Accumulate High Halfword to Word Modulo Unsigned & record OV */ \
+ V(machhwuo, MACHHWUO, 0x10000018) \
+ /* Multiply Accumulate Low Halfword to Word Modulo Signed */ \
+ V(maclhw, MACLHW, 0x10000358) \
+ /* Multiply Accumulate Low Halfword to Word Modulo Signed & record OV */ \
+ V(maclhwo, MACLHWO, 0x10000358) \
+ /* Multiply Accumulate Low Halfword to Word Saturate Signed */ \
+ V(maclhws, MACLHWS, 0x100003D8) \
+ /* Multiply Accumulate Low Halfword to Word Saturate Signed & record OV */ \
+ V(maclhwso, MACLHWSO, 0x100003D8) \
+ /* Multiply Accumulate Low Halfword to Word Saturate Unsigned */ \
+ V(maclhwsu, MACLHWSU, 0x10000398) \
+ /* Multiply Accumulate Low Halfword to Word Saturate Unsigned & record */ \
+ /* OV */ \
+ V(maclhwsuo, MACLHWSUO, 0x10000398) \
+ /* Multiply Accumulate Low Halfword to Word Modulo Unsigned */ \
+ V(maclhwu, MACLHWU, 0x10000318) \
+ /* Multiply Accumulate Low Halfword to Word Modulo Unsigned & record OV */ \
+ V(maclhwuo, MACLHWUO, 0x10000318) \
+ /* Negative Multiply Accumulate Cross Halfword to Word Modulo Signed */ \
+ V(nmacchw, NMACCHW, 0x1000015C) \
+ /* Negative Multiply Accumulate Cross Halfword to Word Modulo Signed & */ \
+ /* record OV */ \
+ V(nmacchwo, NMACCHWO, 0x1000015C) \
+ /* Negative Multiply Accumulate Cross Halfword to Word Saturate Signed */ \
+ V(nmacchws, NMACCHWS, 0x100001DC) \
+ /* Negative Multiply Accumulate Cross Halfword to Word Saturate Signed & */ \
+ /* record OV */ \
+ V(nmacchwso, NMACCHWSO, 0x100001DC) \
+ /* Negative Multiply Accumulate High Halfword to Word Modulo Signed */ \
+ V(nmachhw, NMACHHW, 0x1000005C) \
+ /* Negative Multiply Accumulate High Halfword to Word Modulo Signed & */ \
+ /* record OV */ \
+ V(nmachhwo, NMACHHWO, 0x1000005C) \
+ /* Negative Multiply Accumulate High Halfword to Word Saturate Signed */ \
+ V(nmachhws, NMACHHWS, 0x100000DC) \
+ /* Negative Multiply Accumulate High Halfword to Word Saturate Signed & */ \
+ /* record OV */ \
+ V(nmachhwso, NMACHHWSO, 0x100000DC) \
+ /* Negative Multiply Accumulate Low Halfword to Word Modulo Signed */ \
+ V(nmaclhw, NMACLHW, 0x1000035C) \
+ /* Negative Multiply Accumulate Low Halfword to Word Modulo Signed & */ \
+ /* record OV */ \
+ V(nmaclhwo, NMACLHWO, 0x1000035C) \
+ /* Negative Multiply Accumulate Low Halfword to Word Saturate Signed */ \
+ V(nmaclhws, NMACLHWS, 0x100003DC) \
+ /* Negative Multiply Accumulate Low Halfword to Word Saturate Signed & */ \
+ /* record OV */ \
+ V(nmaclhwso, NMACLHWSO, 0x100003DC)
+
+#define PPC_XL_OPCODE_LIST(V) \
+ /* Branch Conditional to Count Register */ \
+ V(bcctr, BCCTRX, 0x4C000420) \
+ /* Branch Conditional to Link Register */ \
+ V(bclr, BCLRX, 0x4C000020) \
+ /* Condition Register AND */ \
+ V(crand, CRAND, 0x4C000202) \
+ /* Condition Register AND with Complement */ \
+ V(crandc, CRANDC, 0x4C000102) \
+ /* Condition Register Equivalent */ \
+ V(creqv, CREQV, 0x4C000242) \
+ /* Condition Register NAND */ \
+ V(crnand, CRNAND, 0x4C0001C2) \
+ /* Condition Register NOR */ \
+ V(crnor, CRNOR, 0x4C000042) \
+ /* Condition Register OR */ \
+ V(cror, CROR, 0x4C000382) \
+ /* Condition Register OR with Complement */ \
+ V(crorc, CRORC, 0x4C000342) \
+ /* Condition Register XOR */ \
+ V(crxor, CRXOR, 0x4C000182) \
+ /* Instruction Synchronize */ \
+ V(isync, ISYNC, 0x4C00012C) \
+ /* Move Condition Register Field */ \
+ V(mcrf, MCRF, 0x4C000000) \
+ /* Return From Critical Interrupt */ \
+ V(rfci, RFCI, 0x4C000066) \
+ /* Return From Interrupt */ \
+ V(rfi, RFI, 0x4C000064) \
+ /* Return From Machine Check Interrupt */ \
+ V(rfmci, RFMCI, 0x4C00004C) \
+ /* Embedded Hypervisor Privilege */ \
+ V(ehpriv, EHPRIV, 0x7C00021C) \
+ /* Return From Guest Interrupt */ \
+ V(rfgi, RFGI, 0x4C0000CC) \
+ /* Doze */ \
+ V(doze, DOZE, 0x4C000324) \
+ /* Return From Interrupt Doubleword Hypervisor */ \
+ V(hrfid, HRFID, 0x4C000224) \
+ /* Nap */ \
+ V(nap, NAP, 0x4C000364) \
+ /* Return from Event Based Branch */ \
+ V(rfebb, RFEBB, 0x4C000124) \
+ /* Return from Interrupt Doubleword */ \
+ V(rfid, RFID, 0x4C000024) \
+ /* Rip Van Winkle */ \
+ V(rvwinkle, RVWINKLE, 0x4C0003E4) \
+ /* Sleep */ \
+ V(sleep, SLEEP, 0x4C0003A4)
+
+#define PPC_XX4_OPCODE_LIST(V) \
+ /* VSX Select */ \
+ V(xxsel, XXSEL, 0xF0000030)
+
+#define PPC_I_OPCODE_LIST(V) \
+ /* Branch */ \
+ V(b, BX, 0x48000000)
+
+#define PPC_M_OPCODE_LIST(V) \
+ /* Rotate Left Word Immediate then Mask Insert */ \
+ V(rlwimi, RLWIMIX, 0x50000000) \
+ /* Rotate Left Word Immediate then AND with Mask */ \
+ V(rlwinm, RLWINMX, 0x54000000) \
+ /* Rotate Left Word then AND with Mask */ \
+ V(rlwnm, RLWNMX, 0x5C000000)
+
+#define PPC_VX_OPCODE_LIST(V) \
+ /* Decimal Add Modulo */ \
+ V(bcdadd, BCDADD, 0xF0000400) \
+ /* Decimal Subtract Modulo */ \
+ V(bcdsub, BCDSUB, 0xF0000440) \
+ /* Move From Vector Status and Control Register */ \
+ V(mfvscr, MFVSCR, 0x10000604) \
+ /* Move To Vector Status and Control Register */ \
+ V(mtvscr, MTVSCR, 0x10000644) \
+ /* Vector Add & write Carry Unsigned Quadword */ \
+ V(vaddcuq, VADDCUQ, 0x10000140) \
+ /* Vector Add and Write Carry-Out Unsigned Word */ \
+ V(vaddcuw, VADDCUW, 0x10000180) \
+ /* Vector Add Single-Precision */ \
+ V(vaddfp, VADDFP, 0x1000000A) \
+ /* Vector Add Signed Byte Saturate */ \
+ V(vaddsbs, VADDSBS, 0x10000300) \
+ /* Vector Add Signed Halfword Saturate */ \
+ V(vaddshs, VADDSHS, 0x10000340) \
+ /* Vector Add Signed Word Saturate */ \
+ V(vaddsws, VADDSWS, 0x10000380) \
+ /* Vector Add Unsigned Byte Modulo */ \
+ V(vaddubm, VADDUBM, 0x10000000) \
+ /* Vector Add Unsigned Byte Saturate */ \
+ V(vaddubs, VADDUBS, 0x10000200) \
+ /* Vector Add Unsigned Doubleword Modulo */ \
+ V(vaddudm, VADDUDM, 0x100000C0) \
+ /* Vector Add Unsigned Halfword Modulo */ \
+ V(vadduhm, VADDUHM, 0x10000040) \
+ /* Vector Add Unsigned Halfword Saturate */ \
+ V(vadduhs, VADDUHS, 0x10000240) \
+ /* Vector Add Unsigned Quadword Modulo */ \
+ V(vadduqm, VADDUQM, 0x10000100) \
+ /* Vector Add Unsigned Word Modulo */ \
+ V(vadduwm, VADDUWM, 0x10000080) \
+ /* Vector Add Unsigned Word Saturate */ \
+ V(vadduws, VADDUWS, 0x10000280) \
+ /* Vector Logical AND */ \
+ V(vand, VAND, 0x10000404) \
+ /* Vector Logical AND with Complement */ \
+ V(vandc, VANDC, 0x10000444) \
+ /* Vector Average Signed Byte */ \
+ V(vavgsb, VAVGSB, 0x10000502) \
+ /* Vector Average Signed Halfword */ \
+ V(vavgsh, VAVGSH, 0x10000542) \
+ /* Vector Average Signed Word */ \
+ V(vavgsw, VAVGSW, 0x10000582) \
+ /* Vector Average Unsigned Byte */ \
+ V(vavgub, VAVGUB, 0x10000402) \
+ /* Vector Average Unsigned Halfword */ \
+ V(vavguh, VAVGUH, 0x10000442) \
+ /* Vector Average Unsigned Word */ \
+ V(vavguw, VAVGUW, 0x10000482) \
+ /* Vector Bit Permute Quadword */ \
+ V(vbpermq, VBPERMQ, 0x1000054C) \
+ /* Vector Convert From Signed Fixed-Point Word To Single-Precision */ \
+ V(vcfsx, VCFSX, 0x1000034A) \
+ /* Vector Convert From Unsigned Fixed-Point Word To Single-Precision */ \
+ V(vcfux, VCFUX, 0x1000030A) \
+ /* Vector Count Leading Zeros Byte */ \
+ V(vclzb, VCLZB, 0x10000702) \
+ /* Vector Count Leading Zeros Doubleword */ \
+ V(vclzd, VCLZD, 0x100007C2) \
+ /* Vector Count Leading Zeros Halfword */ \
+ V(vclzh, VCLZH, 0x10000742) \
+ /* Vector Count Leading Zeros Word */ \
+ V(vclzw, VCLZW, 0x10000782) \
+ /* Vector Convert From Single-Precision To Signed Fixed-Point Word */ \
+ /* Saturate */ \
+ V(vctsxs, VCTSXS, 0x100003CA) \
+ /* Vector Convert From Single-Precision To Unsigned Fixed-Point Word */ \
+ /* Saturate */ \
+ V(vctuxs, VCTUXS, 0x1000038A) \
+ /* Vector Equivalence */ \
+ V(veqv, VEQV, 0x10000684) \
+ /* Vector 2 Raised to the Exponent Estimate Single-Precision */ \
+ V(vexptefp, VEXPTEFP, 0x1000018A) \
+ /* Vector Gather Bits by Byte by Doubleword */ \
+ V(vgbbd, VGBBD, 0x1000050C) \
+ /* Vector Log Base 2 Estimate Single-Precision */ \
+ V(vlogefp, VLOGEFP, 0x100001CA) \
+ /* Vector Maximum Single-Precision */ \
+ V(vmaxfp, VMAXFP, 0x1000040A) \
+ /* Vector Maximum Signed Byte */ \
+ V(vmaxsb, VMAXSB, 0x10000102) \
+ /* Vector Maximum Signed Doubleword */ \
+ V(vmaxsd, VMAXSD, 0x100001C2) \
+ /* Vector Maximum Signed Halfword */ \
+ V(vmaxsh, VMAXSH, 0x10000142) \
+ /* Vector Maximum Signed Word */ \
+ V(vmaxsw, VMAXSW, 0x10000182) \
+ /* Vector Maximum Unsigned Byte */ \
+ V(vmaxub, VMAXUB, 0x10000002) \
+ /* Vector Maximum Unsigned Doubleword */ \
+ V(vmaxud, VMAXUD, 0x100000C2) \
+ /* Vector Maximum Unsigned Halfword */ \
+ V(vmaxuh, VMAXUH, 0x10000042) \
+ /* Vector Maximum Unsigned Word */ \
+ V(vmaxuw, VMAXUW, 0x10000082) \
+ /* Vector Minimum Single-Precision */ \
+ V(vminfp, VMINFP, 0x1000044A) \
+ /* Vector Minimum Signed Byte */ \
+ V(vminsb, VMINSB, 0x10000302) \
+ /* Vector Minimum Signed Halfword */ \
+ V(vminsh, VMINSH, 0x10000342) \
+ /* Vector Minimum Signed Word */ \
+ V(vminsw, VMINSW, 0x10000382) \
+ /* Vector Minimum Unsigned Byte */ \
+ V(vminub, VMINUB, 0x10000202) \
+ /* Vector Minimum Unsigned Doubleword */ \
+ V(vminud, VMINUD, 0x100002C2) \
+ /* Vector Minimum Unsigned Halfword */ \
+ V(vminuh, VMINUH, 0x10000242) \
+ /* Vector Minimum Unsigned Word */ \
+ V(vminuw, VMINUW, 0x10000282) \
+ /* Vector Merge High Byte */ \
+ V(vmrghb, VMRGHB, 0x1000000C) \
+ /* Vector Merge High Halfword */ \
+ V(vmrghh, VMRGHH, 0x1000004C) \
+ /* Vector Merge High Word */ \
+ V(vmrghw, VMRGHW, 0x1000008C) \
+ /* Vector Merge Low Byte */ \
+ V(vmrglb, VMRGLB, 0x1000010C) \
+ /* Vector Merge Low Halfword */ \
+ V(vmrglh, VMRGLH, 0x1000014C) \
+ /* Vector Merge Low Word */ \
+ V(vmrglw, VMRGLW, 0x1000018C) \
+ /* Vector Multiply Even Signed Byte */ \
+ V(vmulesb, VMULESB, 0x10000308) \
+ /* Vector Multiply Even Signed Halfword */ \
+ V(vmulesh, VMULESH, 0x10000348) \
+ /* Vector Multiply Even Signed Word */ \
+ V(vmulesw, VMULESW, 0x10000388) \
+ /* Vector Multiply Even Unsigned Byte */ \
+ V(vmuleub, VMULEUB, 0x10000208) \
+ /* Vector Multiply Even Unsigned Halfword */ \
+ V(vmuleuh, VMULEUH, 0x10000248) \
+ /* Vector Multiply Even Unsigned Word */ \
+ V(vmuleuw, VMULEUW, 0x10000288) \
+ /* Vector Multiply Odd Signed Byte */ \
+ V(vmulosb, VMULOSB, 0x10000108) \
+ /* Vector Multiply Odd Signed Halfword */ \
+ V(vmulosh, VMULOSH, 0x10000148) \
+ /* Vector Multiply Odd Signed Word */ \
+ V(vmulosw, VMULOSW, 0x10000188) \
+ /* Vector Multiply Odd Unsigned Byte */ \
+ V(vmuloub, VMULOUB, 0x10000008) \
+ /* Vector Multiply Odd Unsigned Halfword */ \
+ V(vmulouh, VMULOUH, 0x10000048) \
+ /* Vector Multiply Odd Unsigned Word */ \
+ V(vmulouw, VMULOUW, 0x10000088) \
+ /* Vector Multiply Unsigned Word Modulo */ \
+ V(vmuluwm, VMULUWM, 0x10000089) \
+ /* Vector NAND */ \
+ V(vnand, VNAND, 0x10000584) \
+ /* Vector Logical NOR */ \
+ V(vnor, VNOR, 0x10000504) \
+ /* Vector Logical OR */ \
+ V(vor, VOR, 0x10000484) \
+ /* Vector OR with Complement */ \
+ V(vorc, VORC, 0x10000544) \
+ /* Vector Pack Pixel */ \
+ V(vpkpx, VPKPX, 0x1000030E) \
+ /* Vector Pack Signed Doubleword Signed Saturate */ \
+ V(vpksdss, VPKSDSS, 0x100005CE) \
+ /* Vector Pack Signed Doubleword Unsigned Saturate */ \
+ V(vpksdus, VPKSDUS, 0x1000054E) \
+ /* Vector Pack Signed Halfword Signed Saturate */ \
+ V(vpkshss, VPKSHSS, 0x1000018E) \
+ /* Vector Pack Signed Halfword Unsigned Saturate */ \
+ V(vpkshus, VPKSHUS, 0x1000010E) \
+ /* Vector Pack Signed Word Signed Saturate */ \
+ V(vpkswss, VPKSWSS, 0x100001CE) \
+ /* Vector Pack Signed Word Unsigned Saturate */ \
+ V(vpkswus, VPKSWUS, 0x1000014E) \
+ /* Vector Pack Unsigned Doubleword Unsigned Modulo */ \
+ V(vpkudum, VPKUDUM, 0x1000044E) \
+ /* Vector Pack Unsigned Doubleword Unsigned Saturate */ \
+ V(vpkudus, VPKUDUS, 0x100004CE) \
+ /* Vector Pack Unsigned Halfword Unsigned Modulo */ \
+ V(vpkuhum, VPKUHUM, 0x1000000E) \
+ /* Vector Pack Unsigned Halfword Unsigned Saturate */ \
+ V(vpkuhus, VPKUHUS, 0x1000008E) \
+ /* Vector Pack Unsigned Word Unsigned Modulo */ \
+ V(vpkuwum, VPKUWUM, 0x1000004E) \
+ /* Vector Pack Unsigned Word Unsigned Saturate */ \
+ V(vpkuwus, VPKUWUS, 0x100000CE) \
+ /* Vector Polynomial Multiply-Sum Byte */ \
+ V(vpmsumb, VPMSUMB, 0x10000408) \
+ /* Vector Polynomial Multiply-Sum Doubleword */ \
+ V(vpmsumd, VPMSUMD, 0x100004C8) \
+ /* Vector Polynomial Multiply-Sum Halfword */ \
+ V(vpmsumh, VPMSUMH, 0x10000448) \
+ /* Vector Polynomial Multiply-Sum Word */ \
+ V(vpmsumw, VPMSUMW, 0x10000488) \
+ /* Vector Population Count Byte */ \
+ V(vpopcntb, VPOPCNTB, 0x10000703) \
+ /* Vector Population Count Doubleword */ \
+ V(vpopcntd, VPOPCNTD, 0x100007C3) \
+ /* Vector Population Count Halfword */ \
+ V(vpopcnth, VPOPCNTH, 0x10000743) \
+ /* Vector Population Count Word */ \
+ V(vpopcntw, VPOPCNTW, 0x10000783) \
+ /* Vector Reciprocal Estimate Single-Precision */ \
+ V(vrefp, VREFP, 0x1000010A) \
+ /* Vector Round to Single-Precision Integer toward -Infinity */ \
+ V(vrfim, VRFIM, 0x100002CA) \
+ /* Vector Round to Single-Precision Integer Nearest */ \
+ V(vrfin, VRFIN, 0x1000020A) \
+ /* Vector Round to Single-Precision Integer toward +Infinity */ \
+ V(vrfip, VRFIP, 0x1000028A) \
+ /* Vector Round to Single-Precision Integer toward Zero */ \
+ V(vrfiz, VRFIZ, 0x1000024A) \
+ /* Vector Rotate Left Byte */ \
+ V(vrlb, VRLB, 0x10000004) \
+ /* Vector Rotate Left Doubleword */ \
+ V(vrld, VRLD, 0x100000C4) \
+ /* Vector Rotate Left Halfword */ \
+ V(vrlh, VRLH, 0x10000044) \
+ /* Vector Rotate Left Word */ \
+ V(vrlw, VRLW, 0x10000084) \
+ /* Vector Reciprocal Square Root Estimate Single-Precision */ \
+ V(vrsqrtefp, VRSQRTEFP, 0x1000014A) \
+ /* Vector Shift Left */ \
+ V(vsl, VSL, 0x100001C4) \
+ /* Vector Shift Left Byte */ \
+ V(vslb, VSLB, 0x10000104) \
+ /* Vector Shift Left Doubleword */ \
+ V(vsld, VSLD, 0x100005C4) \
+ /* Vector Shift Left Halfword */ \
+ V(vslh, VSLH, 0x10000144) \
+ /* Vector Shift Left by Octet */ \
+ V(vslo, VSLO, 0x1000040C) \
+ /* Vector Shift Left Word */ \
+ V(vslw, VSLW, 0x10000184) \
+ /* Vector Splat Byte */ \
+ V(vspltb, VSPLTB, 0x1000020C) \
+ /* Vector Splat Halfword */ \
+ V(vsplth, VSPLTH, 0x1000024C) \
+ /* Vector Splat Immediate Signed Byte */ \
+ V(vspltisb, VSPLTISB, 0x1000030C) \
+ /* Vector Splat Immediate Signed Halfword */ \
+ V(vspltish, VSPLTISH, 0x1000034C) \
+ /* Vector Splat Immediate Signed Word */ \
+ V(vspltisw, VSPLTISW, 0x1000038C) \
+ /* Vector Splat Word */ \
+ V(vspltw, VSPLTW, 0x1000028C) \
+ /* Vector Shift Right */ \
+ V(vsr, VSR, 0x100002C4) \
+ /* Vector Shift Right Algebraic Byte */ \
+ V(vsrab, VSRAB, 0x10000304) \
+ /* Vector Shift Right Algebraic Doubleword */ \
+ V(vsrad, VSRAD, 0x100003C4) \
+ /* Vector Shift Right Algebraic Halfword */ \
+ V(vsrah, VSRAH, 0x10000344) \
+ /* Vector Shift Right Algebraic Word */ \
+ V(vsraw, VSRAW, 0x10000384) \
+ /* Vector Shift Right Byte */ \
+ V(vsrb, VSRB, 0x10000204) \
+ /* Vector Shift Right Doubleword */ \
+ V(vsrd, VSRD, 0x100006C4) \
+ /* Vector Shift Right Halfword */ \
+ V(vsrh, VSRH, 0x10000244) \
+ /* Vector Shift Right by Octet */ \
+ V(vsro, VSRO, 0x1000044C) \
+ /* Vector Shift Right Word */ \
+ V(vsrw, VSRW, 0x10000284) \
+ /* Vector Subtract & write Carry Unsigned Quadword */ \
+ V(vsubcuq, VSUBCUQ, 0x10000540) \
+ /* Vector Subtract and Write Carry-Out Unsigned Word */ \
+ V(vsubcuw, VSUBCUW, 0x10000580) \
+ /* Vector Subtract Single-Precision */ \
+ V(vsubfp, VSUBFP, 0x1000004A) \
+ /* Vector Subtract Signed Byte Saturate */ \
+ V(vsubsbs, VSUBSBS, 0x10000700) \
+ /* Vector Subtract Signed Halfword Saturate */ \
+ V(vsubshs, VSUBSHS, 0x10000740) \
+ /* Vector Subtract Signed Word Saturate */ \
+ V(vsubsws, VSUBSWS, 0x10000780) \
+ /* Vector Subtract Unsigned Byte Modulo */ \
+ V(vsububm, VSUBUBM, 0x10000400) \
+ /* Vector Subtract Unsigned Byte Saturate */ \
+ V(vsububs, VSUBUBS, 0x10000600) \
+ /* Vector Subtract Unsigned Doubleword Modulo */ \
+ V(vsubudm, VSUBUDM, 0x100004C0) \
+ /* Vector Subtract Unsigned Halfword Modulo */ \
+ V(vsubuhm, VSUBUHM, 0x10000440) \
+ /* Vector Subtract Unsigned Halfword Saturate */ \
+ V(vsubuhs, VSUBUHS, 0x10000640) \
+ /* Vector Subtract Unsigned Quadword Modulo */ \
+ V(vsubuqm, VSUBUQM, 0x10000500) \
+ /* Vector Subtract Unsigned Word Modulo */ \
+ V(vsubuwm, VSUBUWM, 0x10000480) \
+ /* Vector Subtract Unsigned Word Saturate */ \
+ V(vsubuws, VSUBUWS, 0x10000680) \
+ /* Vector Sum across Half Signed Word Saturate */ \
+ V(vsum2sws, VSUM2SWS, 0x10000688) \
+ /* Vector Sum across Quarter Signed Byte Saturate */ \
+ V(vsum4sbs, VSUM4SBS, 0x10000708) \
+ /* Vector Sum across Quarter Signed Halfword Saturate */ \
+ V(vsum4shs, VSUM4SHS, 0x10000648) \
+ /* Vector Sum across Quarter Unsigned Byte Saturate */ \
+ V(vsum4bus, VSUM4BUS, 0x10000608) \
+ /* Vector Sum across Signed Word Saturate */ \
+ V(vsumsws, VSUMSWS, 0x10000788) \
+ /* Vector Unpack High Pixel */ \
+ V(vupkhpx, VUPKHPX, 0x1000034E) \
+ /* Vector Unpack High Signed Byte */ \
+ V(vupkhsb, VUPKHSB, 0x1000020E) \
+ /* Vector Unpack High Signed Halfword */ \
+ V(vupkhsh, VUPKHSH, 0x1000024E) \
+ /* Vector Unpack High Signed Word */ \
+ V(vupkhsw, VUPKHSW, 0x1000064E) \
+ /* Vector Unpack Low Pixel */ \
+ V(vupklpx, VUPKLPX, 0x100003CE) \
+ /* Vector Unpack Low Signed Byte */ \
+ V(vupklsb, VUPKLSB, 0x1000028E) \
+ /* Vector Unpack Low Signed Halfword */ \
+ V(vupklsh, VUPKLSH, 0x100002CE) \
+ /* Vector Unpack Low Signed Word */ \
+ V(vupklsw, VUPKLSW, 0x100006CE) \
+ /* Vector Logical XOR */ \
+ V(vxor, VXOR, 0x100004C4) \
+ /* Vector AES Cipher */ \
+ V(vcipher, VCIPHER, 0x10000508) \
+ /* Vector AES Cipher Last */ \
+ V(vcipherlast, VCIPHERLAST, 0x10000509) \
+ /* Vector AES Inverse Cipher */ \
+ V(vncipher, VNCIPHER, 0x10000548) \
+ /* Vector AES Inverse Cipher Last */ \
+ V(vncipherlast, VNCIPHERLAST, 0x10000549) \
+ /* Vector AES S-Box */ \
+ V(vsbox, VSBOX, 0x100005C8) \
+ /* Vector SHA-512 Sigma Doubleword */ \
+ V(vshasigmad, VSHASIGMAD, 0x100006C2) \
+ /* Vector SHA-256 Sigma Word */ \
+ V(vshasigmaw, VSHASIGMAW, 0x10000682) \
+ /* Vector Merge Even Word */ \
+ V(vmrgew, VMRGEW, 0x1000078C) \
+ /* Vector Merge Odd Word */ \
+ V(vmrgow, VMRGOW, 0x1000068C)
+
+#define PPC_XS_OPCODE_LIST(V) \
+ /* Shift Right Algebraic Doubleword Immediate */ \
+ V(sradi, SRADIX, 0x7C000674)
+
+#define PPC_MD_OPCODE_LIST(V) \
+ /* Rotate Left Doubleword Immediate then Clear */ \
+ V(rldic, RLDIC, 0x78000008) \
+ /* Rotate Left Doubleword Immediate then Clear Left */ \
+ V(rldicl, RLDICL, 0x78000000) \
+ /* Rotate Left Doubleword Immediate then Clear Right */ \
+ V(rldicr, RLDICR, 0x78000004) \
+ /* Rotate Left Doubleword Immediate then Mask Insert */ \
+ V(rldimi, RLDIMI, 0x7800000C)
+
+#define PPC_SC_OPCODE_LIST(V) \
+ /* System Call */ \
+ V(sc, SC, 0x44000002)
+
+
+#define PPC_OPCODE_LIST(V) \
+ PPC_X_OPCODE_LIST(V) \
+ PPC_XO_OPCODE_LIST(V) \
+ PPC_DS_OPCODE_LIST(V) \
+ PPC_MDS_OPCODE_LIST(V) \
+ PPC_MD_OPCODE_LIST(V) \
+ PPC_XS_OPCODE_LIST(V) \
+ PPC_D_OPCODE_LIST(V) \
+ PPC_I_OPCODE_LIST(V) \
+ PPC_B_OPCODE_LIST(V) \
+ PPC_XL_OPCODE_LIST(V) \
+ PPC_A_OPCODE_LIST(V) \
+ PPC_XFX_OPCODE_LIST(V) \
+ PPC_M_OPCODE_LIST(V) \
+ PPC_SC_OPCODE_LIST(V) \
+ PPC_Z23_OPCODE_LIST(V) \
+ PPC_Z22_OPCODE_LIST(V) \
+ PPC_EVX_OPCODE_LIST(V) \
+ PPC_XFL_OPCODE_LIST(V) \
+ PPC_EVS_OPCODE_LIST(V) \
+ PPC_VX_OPCODE_LIST(V) \
+ PPC_VA_OPCODE_LIST(V) \
+ PPC_VC_OPCODE_LIST(V) \
+ PPC_XX1_OPCODE_LIST(V) \
+ PPC_XX2_OPCODE_LIST(V) \
+ PPC_XX3_OPCODE_LIST(V) \
+ PPC_XX4_OPCODE_LIST(V)
+
+
+enum Opcode : uint32_t {
+#define DECLARE_INSTRUCTION(name, opcode_name, opcode_value) \
opcode_name = opcode_value,
- XX3_OPCODE_LIST(DECLARE_OPCODES) XX2_OPCODE_LIST(DECLARE_OPCODES)
-#undef DECLARE_OPCODES
+ PPC_OPCODE_LIST(DECLARE_INSTRUCTION)
+#undef DECLARE_INSTRUCTION
+ EXT1 = 0x4C000000, // Extended code set 1
+ EXT2 = 0x7C000000, // Extended code set 2
+ EXT3 = 0xEC000000, // Extended code set 3
+ EXT4 = 0xFC000000, // Extended code set 4
+ EXT5 = 0x78000000, // Extended code set 5 - 64bit only
+ EXT6 = 0xF0000000, // Extended code set 6
};
// Instruction encoding bits and masks.
@@ -556,7 +2787,7 @@ class Instruction {
}
// Read a bit field out of the instruction bits.
- inline int BitField(int hi, int lo) const {
+ inline uint32_t BitField(int hi, int lo) const {
return InstructionBits() & (((2 << (hi - lo)) - 1) << lo);
}
@@ -572,7 +2803,7 @@ class Instruction {
// Read a bit field out of the instruction bits.
- static inline int BitField(Instr instr, int hi, int lo) {
+ static inline uint32_t BitField(Instr instr, int hi, int lo) {
return instr & (((2 << (hi - lo)) - 1) << lo);
}
@@ -587,7 +2818,7 @@ class Instruction {
inline int OpcodeValue() const { return static_cast<Opcode>(Bits(31, 26)); }
inline Opcode OpcodeField() const {
- return static_cast<Opcode>(BitField(24, 21));
+ return static_cast<Opcode>(BitField(31, 26));
}
// Fields used in Software interrupt instructions
diff --git a/deps/v8/src/ppc/deoptimizer-ppc.cc b/deps/v8/src/ppc/deoptimizer-ppc.cc
index 39102a1ef0..b96dc6fece 100644
--- a/deps/v8/src/ppc/deoptimizer-ppc.cc
+++ b/deps/v8/src/ppc/deoptimizer-ppc.cc
@@ -101,7 +101,7 @@ void Deoptimizer::SetPlatformCompiledStubRegisters(
void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
for (int i = 0; i < DoubleRegister::kNumRegisters; ++i) {
- double double_value = input_->GetDoubleRegister(i);
+ Float64 double_value = input_->GetDoubleRegister(i);
output_frame->SetDoubleRegister(i, double_value);
}
}
diff --git a/deps/v8/src/ppc/disasm-ppc.cc b/deps/v8/src/ppc/disasm-ppc.cc
index 5da45f27f0..6baf3d0c7f 100644
--- a/deps/v8/src/ppc/disasm-ppc.cc
+++ b/deps/v8/src/ppc/disasm-ppc.cc
@@ -371,13 +371,13 @@ void Decoder::UnknownFormat(Instruction* instr, const char* name) {
void Decoder::DecodeExt1(Instruction* instr) {
- switch (instr->Bits(10, 1) << 1) {
+ switch (EXT1 | (instr->BitField(10, 1))) {
case MCRF: {
UnknownFormat(instr, "mcrf"); // not used by V8
break;
}
case BCLRX: {
- int bo = instr->Bits(25, 21) << 21;
+ int bo = instr->BitField(25, 21);
int bi = instr->Bits(20, 16);
CRBit cond = static_cast<CRBit>(bi & (CRWIDTH - 1));
switch (bo) {
@@ -447,7 +447,7 @@ void Decoder::DecodeExt1(Instruction* instr) {
break;
}
case BCCTRX: {
- switch (instr->Bits(25, 21) << 21) {
+ switch (instr->BitField(25, 21)) {
case DCBNZF: {
UnknownFormat(instr, "bcctrx-dcbnzf");
break;
@@ -541,7 +541,7 @@ void Decoder::DecodeExt1(Instruction* instr) {
void Decoder::DecodeExt2(Instruction* instr) {
// Some encodings are 10-1 bits, handle those first
- switch (instr->Bits(10, 1) << 1) {
+ switch (EXT2 | (instr->BitField(10, 1))) {
case SRWX: {
Format(instr, "srw'. 'ra, 'rs, 'rb");
return;
@@ -642,7 +642,7 @@ void Decoder::DecodeExt2(Instruction* instr) {
#endif
}
- switch (instr->Bits(10, 2) << 2) {
+ switch (EXT2 | (instr->BitField(10, 2))) {
case SRADIX: {
Format(instr, "sradi'. 'ra,'rs,'sh");
return;
@@ -650,7 +650,7 @@ void Decoder::DecodeExt2(Instruction* instr) {
}
// ?? are all of these xo_form?
- switch (instr->Bits(9, 1) << 1) {
+ switch (EXT2 | (instr->BitField(9, 1))) {
case CMP: {
#if V8_TARGET_ARCH_PPC64
if (instr->Bit(21)) {
@@ -899,7 +899,7 @@ void Decoder::DecodeExt2(Instruction* instr) {
#endif
}
- switch (instr->Bits(5, 1) << 1) {
+ switch (EXT2 | (instr->BitField(5, 1))) {
case ISEL: {
Format(instr, "isel 'rt, 'ra, 'rb");
return;
@@ -912,7 +912,7 @@ void Decoder::DecodeExt2(Instruction* instr) {
void Decoder::DecodeExt3(Instruction* instr) {
- switch (instr->Bits(10, 1) << 1) {
+ switch (EXT3 | (instr->BitField(10, 1))) {
case FCFID: {
Format(instr, "fcfids'. 'Dt, 'Db");
break;
@@ -929,7 +929,7 @@ void Decoder::DecodeExt3(Instruction* instr) {
void Decoder::DecodeExt4(Instruction* instr) {
- switch (instr->Bits(5, 1) << 1) {
+ switch (EXT4 | (instr->BitField(5, 1))) {
case FDIV: {
Format(instr, "fdiv'. 'Dt, 'Da, 'Db");
return;
@@ -964,7 +964,7 @@ void Decoder::DecodeExt4(Instruction* instr) {
}
}
- switch (instr->Bits(10, 1) << 1) {
+ switch (EXT4 | (instr->BitField(10, 1))) {
case FCMPU: {
Format(instr, "fcmpu 'Da, 'Db");
break;
@@ -1065,7 +1065,7 @@ void Decoder::DecodeExt4(Instruction* instr) {
void Decoder::DecodeExt5(Instruction* instr) {
- switch (instr->Bits(4, 2) << 2) {
+ switch (EXT5 | (instr->BitField(4, 2))) {
case RLDICL: {
Format(instr, "rldicl'. 'ra, 'rs, 'sh, 'mb");
return;
@@ -1083,7 +1083,7 @@ void Decoder::DecodeExt5(Instruction* instr) {
return;
}
}
- switch (instr->Bits(4, 1) << 1) {
+ switch (EXT5 | (instr->BitField(4, 1))) {
case RLDCL: {
Format(instr, "rldcl'. 'ra, 'rs, 'sb, 'mb");
return;
@@ -1093,22 +1093,22 @@ void Decoder::DecodeExt5(Instruction* instr) {
}
void Decoder::DecodeExt6(Instruction* instr) {
- switch (instr->Bits(10, 3) << 3) {
+ switch (EXT6 | (instr->BitField(10, 3))) {
#define DECODE_XX3_INSTRUCTIONS(name, opcode_name, opcode_value) \
case opcode_name: { \
Format(instr, #name" 'Dt, 'Da, 'Db"); \
return; \
}
- XX3_OPCODE_LIST(DECODE_XX3_INSTRUCTIONS)
+ PPC_XX3_OPCODE_LIST(DECODE_XX3_INSTRUCTIONS)
#undef DECODE_XX3_INSTRUCTIONS
}
- switch (instr->Bits(10, 2) << 2) {
+ switch (EXT6 | (instr->BitField(10, 2))) {
#define DECODE_XX2_INSTRUCTIONS(name, opcode_name, opcode_value) \
case opcode_name: { \
Format(instr, #name" 'Dt, 'Db"); \
return; \
}
- XX2_OPCODE_LIST(DECODE_XX2_INSTRUCTIONS)
+ PPC_XX2_OPCODE_LIST(DECODE_XX2_INSTRUCTIONS)
}
#undef DECODE_XX3_INSTRUCTIONS
Unknown(instr); // not used by V8
@@ -1130,7 +1130,8 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
return Instruction::kInstrSize;
}
- switch (instr->OpcodeValue() << 26) {
+ uint32_t opcode = instr->OpcodeValue() << 26;
+ switch (opcode) {
case TWI: {
PrintSoftwareInterrupt(instr->SvcValue());
break;
diff --git a/deps/v8/src/ppc/interface-descriptors-ppc.cc b/deps/v8/src/ppc/interface-descriptors-ppc.cc
index 4ff59bbaf1..ed03094dbd 100644
--- a/deps/v8/src/ppc/interface-descriptors-ppc.cc
+++ b/deps/v8/src/ppc/interface-descriptors-ppc.cc
@@ -67,24 +67,6 @@ void FastNewClosureDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void FastNewRestParameterDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r4};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r4};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r4};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return r3; }
@@ -137,15 +119,13 @@ void CallFunctionDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
+void CallICTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {r4, r6};
+ Register registers[] = {r4, r3, r6};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
+void CallICDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r4, r3, r6, r5};
data->InitializePlatformSpecific(arraysize(registers), registers);
@@ -174,6 +154,13 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void CallForwardVarargsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r5 : start index (to support rest parameters)
+ // r4 : the target to call
+ Register registers[] = {r4, r5};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void ConstructStubDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -208,13 +195,12 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(0, nullptr, nullptr);
}
-#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
- void Allocate##Type##Descriptor::InitializePlatformSpecific( \
- CallInterfaceDescriptorData* data) { \
- data->InitializePlatformSpecific(0, nullptr, nullptr); \
- }
-SIMD128_TYPES(SIMD128_ALLOC_DESC)
-#undef SIMD128_ALLOC_DESC
+void ArrayConstructorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite
+ Register registers[] = {r4, r6, r3, r5};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -405,6 +391,15 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+
+void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ r4, // loaded new FP
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.cc b/deps/v8/src/ppc/macro-assembler-ppc.cc
index a3e7641eb7..f2aa2e06f4 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/ppc/macro-assembler-ppc.cc
@@ -969,7 +969,7 @@ void MacroAssembler::StubPrologue(StackFrame::Type type, Register base,
int prologue_offset) {
{
ConstantPoolUnavailableScope constant_pool_unavailable(this);
- LoadSmiLiteral(r11, Smi::FromInt(type));
+ mov(r11, Operand(StackFrame::TypeToMarker(type)));
PushCommonFrame(r11);
}
if (FLAG_enable_embedded_constant_pool) {
@@ -1022,8 +1022,8 @@ void MacroAssembler::Prologue(bool code_pre_aging, Register base,
void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
LoadP(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- LoadP(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
- LoadP(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
+ LoadP(vector, FieldMemOperand(vector, JSFunction::kFeedbackVectorOffset));
+ LoadP(vector, FieldMemOperand(vector, Cell::kValueOffset));
}
@@ -1034,10 +1034,10 @@ void MacroAssembler::EnterFrame(StackFrame::Type type,
// This path cannot rely on ip containing code entry.
PushCommonFrame();
LoadConstantPoolPointerRegister();
- LoadSmiLiteral(ip, Smi::FromInt(type));
+ mov(ip, Operand(StackFrame::TypeToMarker(type)));
push(ip);
} else {
- LoadSmiLiteral(ip, Smi::FromInt(type));
+ mov(ip, Operand(StackFrame::TypeToMarker(type)));
PushCommonFrame(ip);
}
if (type == StackFrame::INTERNAL) {
@@ -1143,7 +1143,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
// all of the pushes that have happened inside of V8
// since we were called from C code
- LoadSmiLiteral(ip, Smi::FromInt(frame_type));
+ mov(ip, Operand(StackFrame::TypeToMarker(frame_type)));
PushCommonFrame(ip);
// Reserve room for saved entry sp and code object.
subi(sp, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp));
@@ -1192,19 +1192,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
-
-void MacroAssembler::InitializeNewString(Register string, Register length,
- Heap::RootListIndex map_index,
- Register scratch1, Register scratch2) {
- SmiTag(scratch1, length);
- LoadRoot(scratch2, map_index);
- StoreP(scratch1, FieldMemOperand(string, String::kLengthOffset), r0);
- li(scratch1, Operand(String::kEmptyHashField));
- StoreP(scratch2, FieldMemOperand(string, HeapObject::kMapOffset), r0);
- StoreP(scratch1, FieldMemOperand(string, String::kHashFieldSlot), r0);
-}
-
-
int MacroAssembler::ActivationFrameAlignment() {
#if !defined(USE_SIMULATOR)
// Running on the real platform. Use the alignment as mandated by the local
@@ -1566,16 +1553,17 @@ void MacroAssembler::IsObjectNameType(Register object, Register scratch,
}
-void MacroAssembler::DebugBreak() {
- li(r3, Operand::Zero());
- mov(r4,
- Operand(ExternalReference(Runtime::kHandleDebuggerStatement, isolate())));
- CEntryStub ces(isolate(), 1);
- DCHECK(AllowThisStubCall(&ces));
- Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
+void MacroAssembler::MaybeDropFrames() {
+ // Check whether we need to drop frames to restart a function on the stack.
+ ExternalReference restart_fp =
+ ExternalReference::debug_restart_fp_address(isolate());
+ mov(r4, Operand(restart_fp));
+ LoadWordArith(r4, MemOperand(r4));
+ cmpi(r4, Operand::Zero());
+ Jump(isolate()->builtins()->FrameDropperTrampoline(), RelocInfo::CODE_TARGET,
+ ne);
}
-
void MacroAssembler::PushStackHandler() {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
@@ -2150,33 +2138,6 @@ void MacroAssembler::GetMapConstructor(Register result, Register map,
bind(&done);
}
-
-void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
- Register scratch, Label* miss) {
- // Get the prototype or initial map from the function.
- LoadP(result,
- FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
- // If the prototype or initial map is the hole, don't return it and
- // simply miss the cache instead. This will allow us to allocate a
- // prototype object on-demand in the runtime system.
- LoadRoot(r0, Heap::kTheHoleValueRootIndex);
- cmp(result, r0);
- beq(miss);
-
- // If the function does not have an initial map, we're done.
- Label done;
- CompareObjectType(result, scratch, scratch, MAP_TYPE);
- bne(&done);
-
- // Get the prototype from the initial map.
- LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
-
- // All done.
- bind(&done);
-}
-
-
void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id,
Condition cond) {
DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.h b/deps/v8/src/ppc/macro-assembler-ppc.h
index b43dab711a..a1d2932f43 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/ppc/macro-assembler-ppc.h
@@ -635,12 +635,10 @@ class MacroAssembler : public Assembler {
void IsObjectNameType(Register object, Register scratch, Label* fail);
- // ---------------------------------------------------------------------------
- // Debugger Support
-
void DebugBreak();
+ // Frame restart support
+ void MaybeDropFrames();
- // ---------------------------------------------------------------------------
// Exception handling
// Push a new stack handler and link into stack handler chain.
@@ -751,14 +749,6 @@ class MacroAssembler : public Assembler {
void GetMapConstructor(Register result, Register map, Register temp,
Register temp2);
- // Try to get function prototype of a function and puts the value in
- // the result register. Checks that the function really is a
- // function and jumps to the miss label if the fast checks fail. The
- // function register will be untouched; the other registers may be
- // clobbered.
- void TryGetFunctionPrototype(Register function, Register result,
- Register scratch, Label* miss);
-
// Compare object type for heap object. heap_object contains a non-Smi
// whose object type should be compared with the given type. This both
// sets the flags and leaves the object type in the type_reg register.
@@ -1503,10 +1493,6 @@ class MacroAssembler : public Assembler {
bool* definitely_mismatches, InvokeFlag flag,
const CallWrapper& call_wrapper);
- void InitializeNewString(Register string, Register length,
- Heap::RootListIndex map_index, Register scratch1,
- Register scratch2);
-
// Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
void InNewSpace(Register object, Register scratch,
Condition cond, // eq for new space, ne otherwise.
diff --git a/deps/v8/src/ppc/simulator-ppc.cc b/deps/v8/src/ppc/simulator-ppc.cc
index e3761579b0..058632847d 100644
--- a/deps/v8/src/ppc/simulator-ppc.cc
+++ b/deps/v8/src/ppc/simulator-ppc.cc
@@ -1623,7 +1623,8 @@ void Simulator::ExecuteBranchConditional(Instruction* instr, BCType type) {
// Handle execution based on instruction types.
void Simulator::ExecuteExt1(Instruction* instr) {
- switch (instr->Bits(10, 1) << 1) {
+ uint32_t opcode = EXT1 | instr->BitField(10, 1);
+ switch (opcode) {
case MCRF:
UNIMPLEMENTED(); // Not used by V8.
case BCLRX:
@@ -1678,7 +1679,7 @@ void Simulator::ExecuteExt1(Instruction* instr) {
bool Simulator::ExecuteExt2_10bit(Instruction* instr) {
bool found = true;
- int opcode = instr->Bits(10, 1) << 1;
+ uint32_t opcode = EXT2 | instr->BitField(10, 1);
switch (opcode) {
case SRWX: {
int rs = instr->RSValue();
@@ -1949,7 +1950,7 @@ bool Simulator::ExecuteExt2_10bit(Instruction* instr) {
if (found) return found;
found = true;
- opcode = instr->Bits(10, 2) << 2;
+ opcode = EXT2 | instr->BitField(10, 2);
switch (opcode) {
case SRADIX: {
int ra = instr->RAValue();
@@ -1976,7 +1977,7 @@ bool Simulator::ExecuteExt2_10bit(Instruction* instr) {
bool Simulator::ExecuteExt2_9bit_part1(Instruction* instr) {
bool found = true;
- int opcode = instr->Bits(9, 1) << 1;
+ uint32_t opcode = EXT2 | instr->BitField(9, 1);
switch (opcode) {
case TW: {
// used for call redirection in simulation mode
@@ -2234,7 +2235,7 @@ bool Simulator::ExecuteExt2_9bit_part1(Instruction* instr) {
bool Simulator::ExecuteExt2_9bit_part2(Instruction* instr) {
bool found = true;
- int opcode = instr->Bits(9, 1) << 1;
+ uint32_t opcode = EXT2 | instr->BitField(9, 1);
switch (opcode) {
case CNTLZWX: {
int rs = instr->RSValue();
@@ -2752,7 +2753,7 @@ bool Simulator::ExecuteExt2_9bit_part2(Instruction* instr) {
void Simulator::ExecuteExt2_5bit(Instruction* instr) {
- int opcode = instr->Bits(5, 1) << 1;
+ uint32_t opcode = EXT2 | instr->BitField(5, 1);
switch (opcode) {
case ISEL: {
int rt = instr->RTValue();
@@ -2785,9 +2786,9 @@ void Simulator::ExecuteExt2(Instruction* instr) {
void Simulator::ExecuteExt3(Instruction* instr) {
- int opcode = instr->Bits(10, 1) << 1;
+ uint32_t opcode = EXT3 | instr->BitField(10, 1);
switch (opcode) {
- case FCFID: {
+ case FCFIDS: {
// fcfids
int frt = instr->RTValue();
int frb = instr->RBValue();
@@ -2796,7 +2797,7 @@ void Simulator::ExecuteExt3(Instruction* instr) {
set_d_register_from_double(frt, frt_val);
return;
}
- case FCFIDU: {
+ case FCFIDUS: {
// fcfidus
int frt = instr->RTValue();
int frb = instr->RBValue();
@@ -2811,7 +2812,8 @@ void Simulator::ExecuteExt3(Instruction* instr) {
void Simulator::ExecuteExt4(Instruction* instr) {
- switch (instr->Bits(5, 1) << 1) {
+ uint32_t opcode = EXT4 | instr->BitField(5, 1);
+ switch (opcode) {
case FDIV: {
int frt = instr->RTValue();
int fra = instr->RAValue();
@@ -2898,7 +2900,7 @@ void Simulator::ExecuteExt4(Instruction* instr) {
return;
}
}
- int opcode = instr->Bits(10, 1) << 1;
+ opcode = EXT4 | instr->BitField(10, 1);
switch (opcode) {
case FCMPU: {
int fra = instr->RAValue();
@@ -3236,7 +3238,8 @@ void Simulator::ExecuteExt4(Instruction* instr) {
#if V8_TARGET_ARCH_PPC64
void Simulator::ExecuteExt5(Instruction* instr) {
- switch (instr->Bits(4, 2) << 2) {
+ uint32_t opcode = EXT5 | instr->BitField(4, 2);
+ switch (opcode) {
case RLDICL: {
int ra = instr->RAValue();
int rs = instr->RSValue();
@@ -3324,7 +3327,8 @@ void Simulator::ExecuteExt5(Instruction* instr) {
return;
}
}
- switch (instr->Bits(4, 1) << 1) {
+ opcode = EXT5 | instr->BitField(4, 1);
+ switch (opcode) {
case RLDCL: {
int ra = instr->RAValue();
int rs = instr->RSValue();
@@ -3350,7 +3354,8 @@ void Simulator::ExecuteExt5(Instruction* instr) {
#endif
void Simulator::ExecuteExt6(Instruction* instr) {
- switch (instr->Bits(10, 3) << 3) {
+ uint32_t opcode = EXT6 | instr->BitField(10, 3);
+ switch (opcode) {
case XSADDDP: {
int frt = instr->RTValue();
int fra = instr->RAValue();
@@ -3396,7 +3401,7 @@ void Simulator::ExecuteExt6(Instruction* instr) {
}
void Simulator::ExecuteGeneric(Instruction* instr) {
- int opcode = instr->OpcodeValue() << 26;
+ uint32_t opcode = instr->OpcodeField();
switch (opcode) {
case SUBFIC: {
int rt = instr->RTValue();
@@ -3963,7 +3968,7 @@ void Simulator::ExecuteInstruction(Instruction* instr) {
if (::v8::internal::FLAG_trace_sim) {
Trace(instr);
}
- int opcode = instr->OpcodeValue() << 26;
+ uint32_t opcode = instr->OpcodeField();
if (opcode == TWI) {
SoftwareInterrupt(instr);
} else {
diff --git a/deps/v8/src/profiler/cpu-profiler.cc b/deps/v8/src/profiler/cpu-profiler.cc
index 6821ba64ca..85f9d5e475 100644
--- a/deps/v8/src/profiler/cpu-profiler.cc
+++ b/deps/v8/src/profiler/cpu-profiler.cc
@@ -277,6 +277,21 @@ void CpuProfiler::ResetProfiles() {
profiles_->set_cpu_profiler(this);
}
+void CpuProfiler::CreateEntriesForRuntimeCallStats() {
+ static_entries_.clear();
+ RuntimeCallStats* rcs = isolate_->counters()->runtime_call_stats();
+ CodeMap* code_map = generator_->code_map();
+ for (int i = 0; i < RuntimeCallStats::counters_count; ++i) {
+ RuntimeCallCounter* counter = &(rcs->*(RuntimeCallStats::counters[i]));
+ DCHECK(counter->name());
+ std::unique_ptr<CodeEntry> entry(
+ new CodeEntry(CodeEventListener::FUNCTION_TAG, counter->name(),
+ CodeEntry::kEmptyNamePrefix, "native V8Runtime"));
+ code_map->AddCode(reinterpret_cast<Address>(counter), entry.get(), 1);
+ static_entries_.push_back(std::move(entry));
+ }
+}
+
void CpuProfiler::CollectSample() {
if (processor_) {
processor_->AddCurrentStack(isolate_);
@@ -305,9 +320,10 @@ void CpuProfiler::StartProcessorIfNotStarted() {
// Disable logging when using the new implementation.
saved_is_logging_ = logger->is_logging_;
logger->is_logging_ = false;
- generator_.reset(new ProfileGenerator(isolate_, profiles_.get()));
+ generator_.reset(new ProfileGenerator(profiles_.get()));
processor_.reset(new ProfilerEventsProcessor(isolate_, generator_.get(),
sampling_interval_));
+ CreateEntriesForRuntimeCallStats();
logger->SetUpProfilerListener();
ProfilerListener* profiler_listener = logger->profiler_listener();
profiler_listener->AddObserver(this);
diff --git a/deps/v8/src/profiler/cpu-profiler.h b/deps/v8/src/profiler/cpu-profiler.h
index fa31754a6f..a6872e4986 100644
--- a/deps/v8/src/profiler/cpu-profiler.h
+++ b/deps/v8/src/profiler/cpu-profiler.h
@@ -220,12 +220,14 @@ class CpuProfiler : public CodeEventObserver {
void StopProcessor();
void ResetProfiles();
void LogBuiltins();
+ void CreateEntriesForRuntimeCallStats();
Isolate* const isolate_;
base::TimeDelta sampling_interval_;
std::unique_ptr<CpuProfilesCollection> profiles_;
std::unique_ptr<ProfileGenerator> generator_;
std::unique_ptr<ProfilerEventsProcessor> processor_;
+ std::vector<std::unique_ptr<CodeEntry>> static_entries_;
bool saved_is_logging_;
bool is_profiling_;
diff --git a/deps/v8/src/profiler/heap-profiler.cc b/deps/v8/src/profiler/heap-profiler.cc
index 2df28a7958..938bb12424 100644
--- a/deps/v8/src/profiler/heap-profiler.cc
+++ b/deps/v8/src/profiler/heap-profiler.cc
@@ -6,6 +6,7 @@
#include "src/api.h"
#include "src/debug/debug.h"
+#include "src/heap/heap-inl.h"
#include "src/profiler/allocation-tracker.h"
#include "src/profiler/heap-snapshot-generator-inl.h"
#include "src/profiler/sampling-heap-profiler.h"
@@ -16,9 +17,8 @@ namespace internal {
HeapProfiler::HeapProfiler(Heap* heap)
: ids_(new HeapObjectsMap(heap)),
names_(new StringsStorage(heap)),
- is_tracking_object_moves_(false) {
-}
-
+ is_tracking_object_moves_(false),
+ get_retainer_infos_callback_(nullptr) {}
static void DeleteHeapSnapshot(HeapSnapshot** snapshot_ptr) {
delete *snapshot_ptr;
@@ -61,6 +61,19 @@ v8::RetainedObjectInfo* HeapProfiler::ExecuteWrapperClassCallback(
class_id, Utils::ToLocal(Handle<Object>(wrapper)));
}
+void HeapProfiler::SetGetRetainerInfosCallback(
+ v8::HeapProfiler::GetRetainerInfosCallback callback) {
+ get_retainer_infos_callback_ = callback;
+}
+
+v8::HeapProfiler::RetainerInfos HeapProfiler::GetRetainerInfos(
+ Isolate* isolate) {
+ v8::HeapProfiler::RetainerInfos infos;
+ if (get_retainer_infos_callback_ != nullptr)
+ infos =
+ get_retainer_infos_callback_(reinterpret_cast<v8::Isolate*>(isolate));
+ return infos;
+}
HeapSnapshot* HeapProfiler::TakeSnapshot(
v8::ActivityControl* control,
diff --git a/deps/v8/src/profiler/heap-profiler.h b/deps/v8/src/profiler/heap-profiler.h
index 3e1dcb54f9..a10cb9228f 100644
--- a/deps/v8/src/profiler/heap-profiler.h
+++ b/deps/v8/src/profiler/heap-profiler.h
@@ -66,6 +66,11 @@ class HeapProfiler {
Object** wrapper);
void SetRetainedObjectInfo(UniqueId id, RetainedObjectInfo* info);
+ void SetGetRetainerInfosCallback(
+ v8::HeapProfiler::GetRetainerInfosCallback callback);
+
+ v8::HeapProfiler::RetainerInfos GetRetainerInfos(Isolate* isolate);
+
bool is_tracking_object_moves() const { return is_tracking_object_moves_; }
bool is_tracking_allocations() const { return !!allocation_tracker_; }
@@ -86,6 +91,7 @@ class HeapProfiler {
bool is_tracking_object_moves_;
base::Mutex profiler_mutex_;
std::unique_ptr<SamplingHeapProfiler> sampling_heap_profiler_;
+ v8::HeapProfiler::GetRetainerInfosCallback get_retainer_infos_callback_;
DISALLOW_COPY_AND_ASSIGN(HeapProfiler);
};
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index 9b6e809a04..b7b97a8320 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -4,13 +4,20 @@
#include "src/profiler/heap-snapshot-generator.h"
+#include <utility>
+
+#include "src/api.h"
#include "src/code-stubs.h"
#include "src/conversions.h"
#include "src/debug/debug.h"
+#include "src/layout-descriptor.h"
#include "src/objects-body-descriptors.h"
+#include "src/objects-inl.h"
#include "src/profiler/allocation-tracker.h"
#include "src/profiler/heap-profiler.h"
#include "src/profiler/heap-snapshot-generator-inl.h"
+#include "src/prototype.h"
+#include "src/transitions.h"
namespace v8 {
namespace internal {
@@ -152,7 +159,6 @@ const char* HeapEntry::TypeAsString() {
case kConsString: return "/concatenated string/";
case kSlicedString: return "/sliced string/";
case kSymbol: return "/symbol/";
- case kSimdValue: return "/simd/";
default: return "???";
}
}
@@ -836,8 +842,6 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
return AddEntry(object, HeapEntry::kArray, "");
} else if (object->IsHeapNumber()) {
return AddEntry(object, HeapEntry::kHeapNumber, "number");
- } else if (object->IsSimd128Value()) {
- return AddEntry(object, HeapEntry::kSimdValue, "simd");
}
return AddEntry(object, HeapEntry::kHidden, GetSystemEntryName(object));
}
@@ -1032,8 +1036,6 @@ bool V8HeapExplorer::ExtractReferencesPass1(int entry, HeapObject* obj) {
ExtractAccessorPairReferences(entry, AccessorPair::cast(obj));
} else if (obj->IsCode()) {
ExtractCodeReferences(entry, Code::cast(obj));
- } else if (obj->IsBox()) {
- ExtractBoxReferences(entry, Box::cast(obj));
} else if (obj->IsCell()) {
ExtractCellReferences(entry, Cell::cast(obj));
} else if (obj->IsWeakCell()) {
@@ -1110,9 +1112,11 @@ void V8HeapExplorer::ExtractJSObjectReferences(
}
}
SharedFunctionInfo* shared_info = js_fun->shared();
- TagObject(js_fun->literals(), "(function literals)");
- SetInternalReference(js_fun, entry, "literals", js_fun->literals(),
- JSFunction::kLiteralsOffset);
+ TagObject(js_fun->feedback_vector_cell(),
+ "(function feedback vector cell)");
+ SetInternalReference(js_fun, entry, "feedback_vector_cell",
+ js_fun->feedback_vector_cell(),
+ JSFunction::kFeedbackVectorOffset);
TagObject(shared_info, "(shared function info)");
SetInternalReference(js_fun, entry,
"shared", shared_info,
@@ -1165,6 +1169,10 @@ void V8HeapExplorer::ExtractStringReferences(int entry, String* string) {
SlicedString* ss = SlicedString::cast(string);
SetInternalReference(ss, entry, "parent", ss->parent(),
SlicedString::kParentOffset);
+ } else if (string->IsThinString()) {
+ ThinString* ts = ThinString::cast(string);
+ SetInternalReference(ts, entry, "actual", ts->actual(),
+ ThinString::kActualOffset);
}
}
@@ -1447,10 +1455,6 @@ void V8HeapExplorer::ExtractCodeReferences(int entry, Code* code) {
Code::kGCMetadataOffset);
}
-void V8HeapExplorer::ExtractBoxReferences(int entry, Box* box) {
- SetInternalReference(box, entry, "value", box->value(), Box::kValueOffset);
-}
-
void V8HeapExplorer::ExtractCellReferences(int entry, Cell* cell) {
SetInternalReference(cell, entry, "value", cell->value(), Cell::kValueOffset);
}
@@ -1819,7 +1823,6 @@ bool V8HeapExplorer::IsEssentialObject(Object* object) {
object != heap_->empty_byte_array() &&
object != heap_->empty_fixed_array() &&
object != heap_->empty_descriptor_array() &&
- object != heap_->empty_feedback_vector() &&
object != heap_->fixed_array_map() && object != heap_->cell_map() &&
object != heap_->global_property_cell_map() &&
object != heap_->shared_function_info_map() &&
@@ -2283,55 +2286,52 @@ int NativeObjectsExplorer::EstimateObjectsCount() {
void NativeObjectsExplorer::FillRetainedObjects() {
if (embedder_queried_) return;
- Isolate* isolate = isolate_;
- const GCType major_gc_type = kGCTypeMarkSweepCompact;
- // Record objects that are joined into ObjectGroups.
- isolate->heap()->CallGCPrologueCallbacks(
- major_gc_type, kGCCallbackFlagConstructRetainedObjectInfos);
- List<ObjectGroup*>* groups = isolate->global_handles()->object_groups();
- for (int i = 0; i < groups->length(); ++i) {
- ObjectGroup* group = groups->at(i);
- if (group->info == NULL) continue;
- List<HeapObject*>* list = GetListMaybeDisposeInfo(group->info);
- for (size_t j = 0; j < group->length; ++j) {
- HeapObject* obj = HeapObject::cast(*group->objects[j]);
- list->Add(obj);
- in_groups_.Insert(obj);
+ v8::HandleScope scope(reinterpret_cast<v8::Isolate*>(isolate_));
+ v8::HeapProfiler::RetainerInfos infos =
+ snapshot_->profiler()->GetRetainerInfos(isolate_);
+ for (auto& pair : infos.groups) {
+ List<HeapObject*>* list = GetListMaybeDisposeInfo(pair.first);
+ for (auto& persistent : pair.second) {
+ if (persistent->IsEmpty()) continue;
+
+ Handle<Object> object = v8::Utils::OpenHandle(
+ *persistent->Get(reinterpret_cast<v8::Isolate*>(isolate_)));
+ DCHECK(!object.is_null());
+ HeapObject* heap_object = HeapObject::cast(*object);
+ list->Add(heap_object);
+ in_groups_.Insert(heap_object);
}
- group->info = NULL; // Acquire info object ownership.
}
- isolate->global_handles()->RemoveObjectGroups();
- isolate->heap()->CallGCEpilogueCallbacks(major_gc_type, kNoGCCallbackFlags);
+
// Record objects that are not in ObjectGroups, but have class ID.
GlobalHandlesExtractor extractor(this);
- isolate->global_handles()->IterateAllRootsWithClassIds(&extractor);
+ isolate_->global_handles()->IterateAllRootsWithClassIds(&extractor);
+
+ edges_ = std::move(infos.edges);
embedder_queried_ = true;
}
+void NativeObjectsExplorer::FillEdges() {
+ v8::HandleScope scope(reinterpret_cast<v8::Isolate*>(isolate_));
+ // Fill in actual edges found.
+ for (auto& pair : edges_) {
+ if (pair.first->IsEmpty() || pair.second->IsEmpty()) continue;
-void NativeObjectsExplorer::FillImplicitReferences() {
- Isolate* isolate = isolate_;
- List<ImplicitRefGroup*>* groups =
- isolate->global_handles()->implicit_ref_groups();
- for (int i = 0; i < groups->length(); ++i) {
- ImplicitRefGroup* group = groups->at(i);
- HeapObject* parent = *group->parent;
+ Handle<Object> parent_object = v8::Utils::OpenHandle(
+ *pair.first->Get(reinterpret_cast<v8::Isolate*>(isolate_)));
+ HeapObject* parent = HeapObject::cast(*parent_object);
int parent_entry =
filler_->FindOrAddEntry(parent, native_entries_allocator_)->index();
DCHECK(parent_entry != HeapEntry::kNoEntry);
- Object*** children = group->children;
- for (size_t j = 0; j < group->length; ++j) {
- Object* child = *children[j];
- HeapEntry* child_entry =
- filler_->FindOrAddEntry(child, native_entries_allocator_);
- filler_->SetNamedReference(
- HeapGraphEdge::kInternal,
- parent_entry,
- "native",
- child_entry);
- }
+ Handle<Object> child_object = v8::Utils::OpenHandle(
+ *pair.second->Get(reinterpret_cast<v8::Isolate*>(isolate_)));
+ HeapObject* child = HeapObject::cast(*child_object);
+ HeapEntry* child_entry =
+ filler_->FindOrAddEntry(child, native_entries_allocator_);
+ filler_->SetNamedReference(HeapGraphEdge::kInternal, parent_entry, "native",
+ child_entry);
}
- isolate->global_handles()->RemoveImplicitRefGroups();
+ edges_.clear();
}
List<HeapObject*>* NativeObjectsExplorer::GetListMaybeDisposeInfo(
@@ -2351,7 +2351,7 @@ bool NativeObjectsExplorer::IterateAndExtractReferences(
SnapshotFiller* filler) {
filler_ = filler;
FillRetainedObjects();
- FillImplicitReferences();
+ FillEdges();
if (EstimateObjectsCount() > 0) {
for (base::HashMap::Entry* p = objects_by_info_.Start(); p != NULL;
p = objects_by_info_.Next(p)) {
@@ -2488,6 +2488,20 @@ HeapSnapshotGenerator::HeapSnapshotGenerator(
heap_(heap) {
}
+namespace {
+class NullContextScope {
+ public:
+ explicit NullContextScope(Isolate* isolate)
+ : isolate_(isolate), prev_(isolate->context()) {
+ isolate_->set_context(nullptr);
+ }
+ ~NullContextScope() { isolate_->set_context(prev_); }
+
+ private:
+ Isolate* isolate_;
+ Context* prev_;
+};
+} // namespace
bool HeapSnapshotGenerator::GenerateSnapshot() {
v8_heap_explorer_.TagGlobalObjects();
@@ -2501,6 +2515,8 @@ bool HeapSnapshotGenerator::GenerateSnapshot() {
heap_->CollectAllGarbage(Heap::kMakeHeapIterableMask,
GarbageCollectionReason::kHeapProfiler);
+ NullContextScope null_context_scope(heap_->isolate());
+
#ifdef VERIFY_HEAP
Heap* debug_heap = heap_;
if (FLAG_verify_heap) {
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.h b/deps/v8/src/profiler/heap-snapshot-generator.h
index b4de8b57e1..022f238cc5 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator.h
@@ -92,8 +92,7 @@ class HeapEntry BASE_EMBEDDED {
kSynthetic = v8::HeapGraphNode::kSynthetic,
kConsString = v8::HeapGraphNode::kConsString,
kSlicedString = v8::HeapGraphNode::kSlicedString,
- kSymbol = v8::HeapGraphNode::kSymbol,
- kSimdValue = v8::HeapGraphNode::kSimdValue
+ kSymbol = v8::HeapGraphNode::kSymbol
};
static const int kNoEntry;
@@ -386,7 +385,6 @@ class V8HeapExplorer : public HeapEntriesAllocator {
void ExtractAccessorInfoReferences(int entry, AccessorInfo* accessor_info);
void ExtractAccessorPairReferences(int entry, AccessorPair* accessors);
void ExtractCodeReferences(int entry, Code* code);
- void ExtractBoxReferences(int entry, Box* box);
void ExtractCellReferences(int entry, Cell* cell);
void ExtractWeakCellReferences(int entry, WeakCell* weak_cell);
void ExtractPropertyCellReferences(int entry, PropertyCell* cell);
@@ -496,7 +494,7 @@ class NativeObjectsExplorer {
private:
void FillRetainedObjects();
- void FillImplicitReferences();
+ void FillEdges();
List<HeapObject*>* GetListMaybeDisposeInfo(v8::RetainedObjectInfo* info);
void SetNativeRootReference(v8::RetainedObjectInfo* info);
void SetRootNativeRootsReference();
@@ -532,6 +530,7 @@ class NativeObjectsExplorer {
HeapEntriesAllocator* native_entries_allocator_;
// Used during references extraction.
SnapshotFiller* filler_;
+ v8::HeapProfiler::RetainerEdges edges_;
static HeapThing const kNativesRootObject;
diff --git a/deps/v8/src/profiler/profile-generator.cc b/deps/v8/src/profiler/profile-generator.cc
index 72e02b360b..742d368390 100644
--- a/deps/v8/src/profiler/profile-generator.cc
+++ b/deps/v8/src/profiler/profile-generator.cc
@@ -8,6 +8,7 @@
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/global-handles.h"
+#include "src/objects-inl.h"
#include "src/profiler/cpu-profiler.h"
#include "src/profiler/profile-generator-inl.h"
#include "src/tracing/trace-event.h"
@@ -635,18 +636,8 @@ void CpuProfilesCollection::AddPathToCurrentProfiles(
current_profiles_semaphore_.Signal();
}
-ProfileGenerator::ProfileGenerator(Isolate* isolate,
- CpuProfilesCollection* profiles)
- : isolate_(isolate), profiles_(profiles) {
- RuntimeCallStats* rcs = isolate_->counters()->runtime_call_stats();
- for (int i = 0; i < RuntimeCallStats::counters_count; ++i) {
- RuntimeCallCounter* counter = &(rcs->*(RuntimeCallStats::counters[i]));
- DCHECK(counter->name());
- auto entry = new CodeEntry(CodeEventListener::FUNCTION_TAG, counter->name(),
- CodeEntry::kEmptyNamePrefix, "native V8Runtime");
- code_map_.AddCode(reinterpret_cast<Address>(counter), entry, 1);
- }
-}
+ProfileGenerator::ProfileGenerator(CpuProfilesCollection* profiles)
+ : profiles_(profiles) {}
void ProfileGenerator::RecordTickSample(const TickSample& sample) {
std::vector<CodeEntry*> entries;
diff --git a/deps/v8/src/profiler/profile-generator.h b/deps/v8/src/profiler/profile-generator.h
index 1b3cad6dc3..c108fbd61b 100644
--- a/deps/v8/src/profiler/profile-generator.h
+++ b/deps/v8/src/profiler/profile-generator.h
@@ -368,7 +368,7 @@ class CpuProfilesCollection {
class ProfileGenerator {
public:
- ProfileGenerator(Isolate* isolate, CpuProfilesCollection* profiles);
+ explicit ProfileGenerator(CpuProfilesCollection* profiles);
void RecordTickSample(const TickSample& sample);
@@ -378,7 +378,6 @@ class ProfileGenerator {
CodeEntry* FindEntry(void* address);
CodeEntry* EntryForVMState(StateTag tag);
- Isolate* isolate_;
CpuProfilesCollection* profiles_;
CodeMap code_map_;
diff --git a/deps/v8/src/profiler/profiler-listener.cc b/deps/v8/src/profiler/profiler-listener.cc
index 4de524aeef..bacfffaa73 100644
--- a/deps/v8/src/profiler/profiler-listener.cc
+++ b/deps/v8/src/profiler/profiler-listener.cc
@@ -5,6 +5,7 @@
#include "src/profiler/profiler-listener.h"
#include "src/deoptimizer.h"
+#include "src/objects-inl.h"
#include "src/profiler/cpu-profiler.h"
#include "src/profiler/profile-generator-inl.h"
#include "src/source-position-table.h"
diff --git a/deps/v8/src/profiler/tracing-cpu-profiler.cc b/deps/v8/src/profiler/tracing-cpu-profiler.cc
index 8b31225905..a9b84b6634 100644
--- a/deps/v8/src/profiler/tracing-cpu-profiler.cc
+++ b/deps/v8/src/profiler/tracing-cpu-profiler.cc
@@ -8,9 +8,6 @@
#include "src/tracing/trace-event.h"
#include "src/v8.h"
-#define PROFILER_TRACE_CATEGORY_ENABLED(cat) \
- (*TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT(cat)))
-
namespace v8 {
std::unique_ptr<TracingCpuProfiler> TracingCpuProfiler::Create(
@@ -25,8 +22,9 @@ namespace internal {
TracingCpuProfilerImpl::TracingCpuProfilerImpl(Isolate* isolate)
: isolate_(isolate), profiling_enabled_(false) {
// Make sure tracing system notices profiler categories.
- PROFILER_TRACE_CATEGORY_ENABLED("v8.cpu_profiler");
- PROFILER_TRACE_CATEGORY_ENABLED("v8.cpu_profiler.hires");
+ TRACE_EVENT_WARMUP_CATEGORY(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler"));
+ TRACE_EVENT_WARMUP_CATEGORY(
+ TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler.hires"));
V8::GetCurrentPlatform()->AddTraceStateObserver(this);
}
@@ -36,7 +34,10 @@ TracingCpuProfilerImpl::~TracingCpuProfilerImpl() {
}
void TracingCpuProfilerImpl::OnTraceEnabled() {
- if (!PROFILER_TRACE_CATEGORY_ENABLED("v8.cpu_profiler")) return;
+ bool enabled;
+ TRACE_EVENT_CATEGORY_GROUP_ENABLED(
+ TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler"), &enabled);
+ if (!enabled) return;
profiling_enabled_ = true;
isolate_->RequestInterrupt(
[](v8::Isolate*, void* data) {
@@ -59,8 +60,10 @@ void TracingCpuProfilerImpl::OnTraceDisabled() {
void TracingCpuProfilerImpl::StartProfiling() {
base::LockGuard<base::Mutex> lock(&mutex_);
if (!profiling_enabled_ || profiler_) return;
- int sampling_interval_us =
- PROFILER_TRACE_CATEGORY_ENABLED("v8.cpu_profiler.hires") ? 100 : 1000;
+ bool enabled;
+ TRACE_EVENT_CATEGORY_GROUP_ENABLED(
+ TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler.hires"), &enabled);
+ int sampling_interval_us = enabled ? 100 : 1000;
profiler_.reset(new CpuProfiler(isolate_));
profiler_->set_sampling_interval(
base::TimeDelta::FromMicroseconds(sampling_interval_us));
diff --git a/deps/v8/src/property-details.h b/deps/v8/src/property-details.h
index d616ae76e1..6e9184d39e 100644
--- a/deps/v8/src/property-details.h
+++ b/deps/v8/src/property-details.h
@@ -7,6 +7,8 @@
#include "include/v8.h"
#include "src/allocation.h"
+// TODO(ishell): remove once FLAG_track_constant_fields is removed.
+#include "src/flags.h"
#include "src/utils.h"
namespace v8 {
@@ -72,6 +74,14 @@ enum PropertyKind { kData = 0, kAccessor = 1 };
// Must fit in the BitField PropertyDetails::LocationField.
enum PropertyLocation { kField = 0, kDescriptor = 1 };
+// Order of modes is significant.
+// Must fit in the BitField PropertyDetails::ConstnessField.
+enum PropertyConstness { kMutable = 0, kConst = 1 };
+
+// TODO(ishell): remove once constant field tracking is done.
+const PropertyConstness kDefaultFieldConstness =
+ FLAG_track_constant_fields ? kConst : kMutable;
+
class Representation {
public:
enum Kind {
@@ -231,10 +241,11 @@ class PropertyDetails BASE_EMBEDDED {
// Property details for fast mode properties.
PropertyDetails(PropertyKind kind, PropertyAttributes attributes,
- PropertyLocation location, Representation representation,
- int field_index = 0) {
- value_ = KindField::encode(kind) | LocationField::encode(location) |
- AttributesField::encode(attributes) |
+ PropertyLocation location, PropertyConstness constness,
+ Representation representation, int field_index = 0) {
+ value_ = KindField::encode(kind) | AttributesField::encode(attributes) |
+ LocationField::encode(location) |
+ ConstnessField::encode(constness) |
RepresentationField::encode(EncodeRepresentation(representation)) |
FieldIndexField::encode(field_index);
}
@@ -265,6 +276,9 @@ class PropertyDetails BASE_EMBEDDED {
PropertyDetails CopyWithRepresentation(Representation representation) const {
return PropertyDetails(value_, representation);
}
+ PropertyDetails CopyWithConstness(PropertyConstness constness) const {
+ return PropertyDetails(value_, constness);
+ }
PropertyDetails CopyAddAttributes(PropertyAttributes new_attributes) const {
new_attributes =
static_cast<PropertyAttributes>(attributes() | new_attributes);
@@ -285,6 +299,7 @@ class PropertyDetails BASE_EMBEDDED {
PropertyKind kind() const { return KindField::decode(value_); }
PropertyLocation location() const { return LocationField::decode(value_); }
+ PropertyConstness constness() const { return ConstnessField::decode(value_); }
PropertyAttributes attributes() const {
return AttributesField::decode(value_);
@@ -317,22 +332,30 @@ class PropertyDetails BASE_EMBEDDED {
// Bit fields in value_ (type, shift, size). Must be public so the
// constants can be embedded in generated code.
class KindField : public BitField<PropertyKind, 0, 1> {};
- class LocationField : public BitField<PropertyLocation, 1, 1> {};
- class AttributesField : public BitField<PropertyAttributes, 2, 3> {};
+ class LocationField : public BitField<PropertyLocation, KindField::kNext, 1> {
+ };
+ class ConstnessField
+ : public BitField<PropertyConstness, LocationField::kNext, 1> {};
+ class AttributesField
+ : public BitField<PropertyAttributes, ConstnessField::kNext, 3> {};
static const int kAttributesReadOnlyMask =
(READ_ONLY << AttributesField::kShift);
// Bit fields for normalized objects.
- class PropertyCellTypeField : public BitField<PropertyCellType, 5, 2> {};
- class DictionaryStorageField : public BitField<uint32_t, 7, 24> {};
+ class PropertyCellTypeField
+ : public BitField<PropertyCellType, AttributesField::kNext, 2> {};
+ class DictionaryStorageField
+ : public BitField<uint32_t, PropertyCellTypeField::kNext, 23> {};
// Bit fields for fast objects.
- class RepresentationField : public BitField<uint32_t, 5, 4> {};
+ class RepresentationField
+ : public BitField<uint32_t, AttributesField::kNext, 4> {};
class DescriptorPointer
- : public BitField<uint32_t, 9, kDescriptorIndexBitCount> {}; // NOLINT
- class FieldIndexField
- : public BitField<uint32_t, 9 + kDescriptorIndexBitCount,
+ : public BitField<uint32_t, RepresentationField::kNext,
kDescriptorIndexBitCount> {}; // NOLINT
+ class FieldIndexField : public BitField<uint32_t, DescriptorPointer::kNext,
+ kDescriptorIndexBitCount> {
+ }; // NOLINT
// All bits for both fast and slow objects must fit in a smi.
STATIC_ASSERT(DictionaryStorageField::kNext <= 31);
@@ -366,6 +389,9 @@ class PropertyDetails BASE_EMBEDDED {
value_ = RepresentationField::update(
value, EncodeRepresentation(representation));
}
+ PropertyDetails(int value, PropertyConstness constness) {
+ value_ = ConstnessField::update(value, constness);
+ }
PropertyDetails(int value, PropertyAttributes attributes) {
value_ = AttributesField::update(value, attributes);
}
@@ -373,6 +399,22 @@ class PropertyDetails BASE_EMBEDDED {
uint32_t value_;
};
+// kField location is more general than kDescriptor, kDescriptor generalizes
+// only to itself.
+inline bool IsGeneralizableTo(PropertyLocation a, PropertyLocation b) {
+ return b == kField || a == kDescriptor;
+}
+
+// kMutable constness is more general than kConst, kConst generalizes only to
+// itself.
+inline bool IsGeneralizableTo(PropertyConstness a, PropertyConstness b) {
+ return b == kMutable || a == kConst;
+}
+
+inline PropertyConstness GeneralizeConstness(PropertyConstness a,
+ PropertyConstness b) {
+ return a == kMutable ? kMutable : b;
+}
std::ostream& operator<<(std::ostream& os,
const PropertyAttributes& attributes);
diff --git a/deps/v8/src/property.cc b/deps/v8/src/property.cc
index 1c2666b00a..339076dbb4 100644
--- a/deps/v8/src/property.cc
+++ b/deps/v8/src/property.cc
@@ -25,13 +25,39 @@ std::ostream& operator<<(std::ostream& os,
Descriptor Descriptor::DataField(Handle<Name> key, int field_index,
PropertyAttributes attributes,
Representation representation) {
- return DataField(key, field_index, FieldType::Any(key->GetIsolate()),
- attributes, representation);
+ return DataField(key, field_index, attributes, kMutable, representation,
+ FieldType::Any(key->GetIsolate()));
+}
+
+Descriptor Descriptor::DataField(Handle<Name> key, int field_index,
+ PropertyAttributes attributes,
+ PropertyConstness constness,
+ Representation representation,
+ Handle<Object> wrapped_field_type) {
+ DCHECK(wrapped_field_type->IsSmi() || wrapped_field_type->IsWeakCell());
+ PropertyDetails details(kData, attributes, kField, constness, representation,
+ field_index);
+ return Descriptor(key, wrapped_field_type, details);
+}
+
+Descriptor Descriptor::DataConstant(Handle<Name> key, int field_index,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ if (FLAG_track_constant_fields) {
+ Handle<Object> any_type(FieldType::Any(), key->GetIsolate());
+ return DataField(key, field_index, attributes, kConst,
+ Representation::Tagged(), any_type);
+
+ } else {
+ return Descriptor(key, value, kData, attributes, kDescriptor, kConst,
+ value->OptimalRepresentation(), field_index);
+ }
}
// Outputs PropertyDetails as a dictionary details.
void PropertyDetails::PrintAsSlowTo(std::ostream& os) {
os << "(";
+ if (constness() == kConst) os << "const ";
os << (kind() == kData ? "data" : "accessor");
os << ", dictionary_index: " << dictionary_index();
os << ", attrs: " << attributes() << ")";
@@ -40,6 +66,7 @@ void PropertyDetails::PrintAsSlowTo(std::ostream& os) {
// Outputs PropertyDetails as a descriptor array details.
void PropertyDetails::PrintAsFastTo(std::ostream& os, PrintMode mode) {
os << "(";
+ if (constness() == kConst) os << "const ";
os << (kind() == kData ? "data" : "accessor");
if (location() == kField) {
os << " field";
diff --git a/deps/v8/src/property.h b/deps/v8/src/property.h
index 177f06b769..ab183d9e9f 100644
--- a/deps/v8/src/property.h
+++ b/deps/v8/src/property.h
@@ -33,24 +33,25 @@ class Descriptor final BASE_EMBEDDED {
Representation representation);
static Descriptor DataField(Handle<Name> key, int field_index,
- Handle<Object> wrapped_field_type,
PropertyAttributes attributes,
- Representation representation) {
- DCHECK(wrapped_field_type->IsSmi() || wrapped_field_type->IsWeakCell());
- return Descriptor(key, wrapped_field_type, kData, attributes, kField,
- representation, field_index);
- }
+ PropertyConstness constness,
+ Representation representation,
+ Handle<Object> wrapped_field_type);
static Descriptor DataConstant(Handle<Name> key, Handle<Object> value,
PropertyAttributes attributes) {
- return Descriptor(key, value, kData, attributes, kDescriptor,
- value->OptimalRepresentation());
+ return Descriptor(key, value, kData, attributes, kDescriptor, kConst,
+ value->OptimalRepresentation(), 0);
}
+ static Descriptor DataConstant(Handle<Name> key, int field_index,
+ Handle<Object> value,
+ PropertyAttributes attributes);
+
static Descriptor AccessorConstant(Handle<Name> key, Handle<Object> foreign,
PropertyAttributes attributes) {
- return Descriptor(key, foreign, kAccessor, attributes, kDescriptor,
- Representation::Tagged());
+ return Descriptor(key, foreign, kAccessor, attributes, kDescriptor, kConst,
+ Representation::Tagged(), 0);
}
private:
@@ -75,10 +76,12 @@ class Descriptor final BASE_EMBEDDED {
Descriptor(Handle<Name> key, Handle<Object> value, PropertyKind kind,
PropertyAttributes attributes, PropertyLocation location,
- Representation representation, int field_index = 0)
+ PropertyConstness constness, Representation representation,
+ int field_index)
: key_(key),
value_(value),
- details_(kind, attributes, location, representation, field_index) {
+ details_(kind, attributes, location, constness, representation,
+ field_index) {
DCHECK(key->IsUniqueName());
DCHECK_IMPLIES(key->IsPrivate(), !details_.IsEnumerable());
}
diff --git a/deps/v8/src/regexp/jsregexp-inl.h b/deps/v8/src/regexp/jsregexp-inl.h
index ca7a9fe991..4bcda43496 100644
--- a/deps/v8/src/regexp/jsregexp-inl.h
+++ b/deps/v8/src/regexp/jsregexp-inl.h
@@ -7,7 +7,6 @@
#define V8_REGEXP_JSREGEXP_INL_H_
#include "src/allocation.h"
-#include "src/handles.h"
#include "src/heap/heap.h"
#include "src/objects.h"
#include "src/regexp/jsregexp.h"
diff --git a/deps/v8/src/regexp/jsregexp.cc b/deps/v8/src/regexp/jsregexp.cc
index 8b21459059..0ed3086ce6 100644
--- a/deps/v8/src/regexp/jsregexp.cc
+++ b/deps/v8/src/regexp/jsregexp.cc
@@ -1106,12 +1106,11 @@ RegExpEngine::CompilationResult RegExpCompiler::Assemble(
RegExpNode* start,
int capture_count,
Handle<String> pattern) {
- Heap* heap = pattern->GetHeap();
+ Isolate* isolate = pattern->GetHeap()->isolate();
#ifdef DEBUG
if (FLAG_trace_regexp_assembler)
- macro_assembler_ =
- new RegExpMacroAssemblerTracer(isolate(), macro_assembler);
+ macro_assembler_ = new RegExpMacroAssemblerTracer(isolate, macro_assembler);
else
#endif
macro_assembler_ = macro_assembler;
@@ -1135,11 +1134,11 @@ RegExpEngine::CompilationResult RegExpCompiler::Assemble(
}
Handle<HeapObject> code = macro_assembler_->GetCode(pattern);
- heap->IncreaseTotalRegexpCodeGenerated(code->Size());
+ isolate->IncreaseTotalRegexpCodeGenerated(code->Size());
work_list_ = NULL;
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_code) {
- CodeTracer::Scope trace_scope(heap->isolate()->GetCodeTracer());
+ CodeTracer::Scope trace_scope(isolate->GetCodeTracer());
OFStream os(trace_scope.file());
Handle<Code>::cast(code)->Disassemble(pattern->ToCString().get(), os);
}
@@ -6761,8 +6760,9 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
bool RegExpEngine::TooMuchRegExpCode(Handle<String> pattern) {
Heap* heap = pattern->GetHeap();
bool too_much = pattern->length() > RegExpImpl::kRegExpTooLargeToOptimize;
- if (heap->total_regexp_code_generated() > RegExpImpl::kRegExpCompiledLimit &&
- heap->memory_allocator()->SizeExecutable() >
+ if (heap->isolate()->total_regexp_code_generated() >
+ RegExpImpl::kRegExpCompiledLimit &&
+ heap->CommittedMemoryExecutable() >
RegExpImpl::kRegExpExecutableMemoryLimit) {
too_much = true;
}
diff --git a/deps/v8/src/regexp/jsregexp.h b/deps/v8/src/regexp/jsregexp.h
index b2e84ba4f3..77d61ae17e 100644
--- a/deps/v8/src/regexp/jsregexp.h
+++ b/deps/v8/src/regexp/jsregexp.h
@@ -158,7 +158,7 @@ class RegExpImpl {
// total regexp code compiled including code that has subsequently been freed
// and the total executable memory at any point.
static const size_t kRegExpExecutableMemoryLimit = 16 * MB;
- static const int kRegExpCompiledLimit = 1 * MB;
+ static const size_t kRegExpCompiledLimit = 1 * MB;
static const int kRegExpTooLargeToOptimize = 20 * KB;
private:
diff --git a/deps/v8/src/regexp/regexp-ast.cc b/deps/v8/src/regexp/regexp-ast.cc
index b5c2bb6d91..85babb1f74 100644
--- a/deps/v8/src/regexp/regexp-ast.cc
+++ b/deps/v8/src/regexp/regexp-ast.cc
@@ -264,6 +264,12 @@ void* RegExpUnparser::VisitCapture(RegExpCapture* that, void* data) {
return NULL;
}
+void* RegExpUnparser::VisitGroup(RegExpGroup* that, void* data) {
+ os_ << "(?: ";
+ that->body()->Accept(this, data);
+ os_ << ")";
+ return NULL;
+}
void* RegExpUnparser::VisitLookaround(RegExpLookaround* that, void* data) {
os_ << "(";
diff --git a/deps/v8/src/regexp/regexp-ast.h b/deps/v8/src/regexp/regexp-ast.h
index 07a8155437..a45d083cdb 100644
--- a/deps/v8/src/regexp/regexp-ast.h
+++ b/deps/v8/src/regexp/regexp-ast.h
@@ -21,12 +21,12 @@ namespace internal {
VISIT(Atom) \
VISIT(Quantifier) \
VISIT(Capture) \
+ VISIT(Group) \
VISIT(Lookaround) \
VISIT(BackReference) \
VISIT(Empty) \
VISIT(Text)
-
#define FORWARD_DECLARE(Name) class RegExp##Name;
FOR_EACH_REG_EXP_TREE_TYPE(FORWARD_DECLARE)
#undef FORWARD_DECLARE
@@ -440,6 +440,26 @@ class RegExpCapture final : public RegExpTree {
const ZoneVector<uc16>* name_;
};
+class RegExpGroup final : public RegExpTree {
+ public:
+ explicit RegExpGroup(RegExpTree* body) : body_(body) {}
+ void* Accept(RegExpVisitor* visitor, void* data) override;
+ RegExpNode* ToNode(RegExpCompiler* compiler,
+ RegExpNode* on_success) override {
+ return body_->ToNode(compiler, on_success);
+ }
+ RegExpGroup* AsGroup() override;
+ bool IsAnchoredAtStart() override { return body_->IsAnchoredAtStart(); }
+ bool IsAnchoredAtEnd() override { return body_->IsAnchoredAtEnd(); }
+ bool IsGroup() override;
+ int min_match() override { return body_->min_match(); }
+ int max_match() override { return body_->max_match(); }
+ Interval CaptureRegisters() override { return body_->CaptureRegisters(); }
+ RegExpTree* body() { return body_; }
+
+ private:
+ RegExpTree* body_;
+};
class RegExpLookaround final : public RegExpTree {
public:
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.cc b/deps/v8/src/regexp/regexp-macro-assembler.cc
index 0a7f5c1b9e..2e3a8a2f76 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler.cc
@@ -134,6 +134,9 @@ const byte* NativeRegExpMacroAssembler::StringCharacterPosition(
start_index += SlicedString::cast(subject)->offset();
subject = SlicedString::cast(subject)->parent();
}
+ if (subject->IsThinString()) {
+ subject = ThinString::cast(subject)->actual();
+ }
DCHECK(start_index >= 0);
DCHECK(start_index <= subject->length());
if (subject->IsSeqOneByteString()) {
@@ -146,6 +149,7 @@ const byte* NativeRegExpMacroAssembler::StringCharacterPosition(
return reinterpret_cast<const byte*>(
ExternalOneByteString::cast(subject)->GetChars() + start_index);
} else {
+ DCHECK(subject->IsExternalTwoByteString());
return reinterpret_cast<const byte*>(
ExternalTwoByteString::cast(subject)->GetChars() + start_index);
}
@@ -239,6 +243,9 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Match(
subject_ptr = slice->parent();
slice_offset = slice->offset();
}
+ if (StringShape(subject_ptr).IsThin()) {
+ subject_ptr = ThinString::cast(subject_ptr)->actual();
+ }
// Ensure that an underlying string has the same representation.
bool is_one_byte = subject_ptr->IsOneByteRepresentation();
DCHECK(subject_ptr->IsExternalString() || subject_ptr->IsSeqString());
diff --git a/deps/v8/src/regexp/regexp-parser.cc b/deps/v8/src/regexp/regexp-parser.cc
index 3035f6a9a9..3621f7d96e 100644
--- a/deps/v8/src/regexp/regexp-parser.cc
+++ b/deps/v8/src/regexp/regexp-parser.cc
@@ -216,7 +216,9 @@ RegExpTree* RegExpParser::ParseDisjunction() {
RegExpCapture* capture = GetCapture(capture_index);
capture->set_body(body);
body = capture;
- } else if (group_type != GROUPING) {
+ } else if (group_type == GROUPING) {
+ body = new (zone()) RegExpGroup(body);
+ } else {
DCHECK(group_type == POSITIVE_LOOKAROUND ||
group_type == NEGATIVE_LOOKAROUND);
bool is_positive = (group_type == POSITIVE_LOOKAROUND);
diff --git a/deps/v8/src/regexp/regexp-utils.cc b/deps/v8/src/regexp/regexp-utils.cc
index d40431866a..570a348f74 100644
--- a/deps/v8/src/regexp/regexp-utils.cc
+++ b/deps/v8/src/regexp/regexp-utils.cc
@@ -145,7 +145,14 @@ bool RegExpUtils::IsUnmodifiedRegExp(Isolate* isolate, Handle<Object> obj) {
if (!proto->IsJSReceiver()) return false;
Handle<Map> initial_proto_initial_map = isolate->regexp_prototype_map();
- return (JSReceiver::cast(proto)->map() == *initial_proto_initial_map);
+ if (JSReceiver::cast(proto)->map() != *initial_proto_initial_map) {
+ return false;
+ }
+
+ // The smi check is required to omit ToLength(lastIndex) calls with possible
+ // user-code execution on the fast path.
+ Object* last_index = JSRegExp::cast(recv)->LastIndex();
+ return last_index->IsSmi() && Smi::cast(last_index)->value() >= 0;
}
int RegExpUtils::AdvanceStringIndex(Isolate* isolate, Handle<String> string,
diff --git a/deps/v8/src/regexp/regexp-utils.h b/deps/v8/src/regexp/regexp-utils.h
index eff1ed739c..eb5f85c0bd 100644
--- a/deps/v8/src/regexp/regexp-utils.h
+++ b/deps/v8/src/regexp/regexp-utils.h
@@ -10,6 +10,8 @@
namespace v8 {
namespace internal {
+class RegExpMatchInfo;
+
// Helper methods for C++ regexp builtins.
class RegExpUtils : public AllStatic {
public:
diff --git a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
index aafc840680..54dc3415e8 100644
--- a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
@@ -6,8 +6,10 @@
#include "src/regexp/x64/regexp-macro-assembler-x64.h"
+#include "src/factory.h"
#include "src/log.h"
#include "src/macro-assembler.h"
+#include "src/objects-inl.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-stack.h"
#include "src/unicode.h"
diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/runtime-profiler.cc
index c062066497..6f9f44ee2a 100644
--- a/deps/v8/src/runtime-profiler.cc
+++ b/deps/v8/src/runtime-profiler.cc
@@ -408,8 +408,7 @@ OptimizationReason RuntimeProfiler::ShouldOptimizeIgnition(
int typeinfo, generic, total, type_percentage, generic_percentage;
GetICCounts(function, &typeinfo, &generic, &total, &type_percentage,
&generic_percentage);
- if (type_percentage >= FLAG_type_info_threshold &&
- generic_percentage <= FLAG_generic_ic_threshold) {
+ if (type_percentage >= FLAG_type_info_threshold) {
// If this particular function hasn't had any ICs patched for enough
// ticks, optimize it now.
return OptimizationReason::kHotAndStable;
@@ -431,8 +430,7 @@ OptimizationReason RuntimeProfiler::ShouldOptimizeIgnition(
int typeinfo, generic, total, type_percentage, generic_percentage;
GetICCounts(function, &typeinfo, &generic, &total, &type_percentage,
&generic_percentage);
- if (type_percentage >= FLAG_type_info_threshold &&
- generic_percentage <= FLAG_generic_ic_threshold) {
+ if (type_percentage >= FLAG_type_info_threshold) {
return OptimizationReason::kSmallFunction;
}
}
diff --git a/deps/v8/src/runtime/runtime-array.cc b/deps/v8/src/runtime/runtime-array.cc
index a9cbc208b3..07c6ad0116 100644
--- a/deps/v8/src/runtime/runtime-array.cc
+++ b/deps/v8/src/runtime/runtime-array.cc
@@ -37,7 +37,7 @@ static void InstallCode(
BuiltinFunctionId id = static_cast<BuiltinFunctionId>(-1)) {
Handle<String> key = isolate->factory()->InternalizeUtf8String(name);
Handle<JSFunction> optimized =
- isolate->factory()->NewFunctionWithoutPrototype(key, code);
+ isolate->factory()->NewFunctionWithoutPrototype(key, code, true);
if (argc < 0) {
optimized->shared()->DontAdaptArguments();
} else {
@@ -46,6 +46,8 @@ static void InstallCode(
if (id >= 0) {
optimized->shared()->set_builtin_function_id(id);
}
+ optimized->shared()->set_language_mode(STRICT);
+ optimized->shared()->set_native(true);
JSObject::AddProperty(holder, key, optimized, NONE);
}
@@ -78,11 +80,9 @@ RUNTIME_FUNCTION(Runtime_SpecialArrayFunctions) {
kArrayValues);
InstallBuiltin(isolate, holder, "entries", Builtins::kArrayPrototypeEntries,
0, kArrayEntries);
-
return *holder;
}
-
RUNTIME_FUNCTION(Runtime_FixedArrayGet) {
SealHandleScope shs(isolate);
DCHECK_EQ(2, args.length());
@@ -475,23 +475,32 @@ RUNTIME_FUNCTION(Runtime_ArrayIncludes_Slow) {
// Let n be ? ToInteger(fromIndex). (If fromIndex is undefined, this step
// produces the value 0.)
- int64_t start_from;
- {
+ int64_t index = 0;
+ if (!from_index->IsUndefined(isolate)) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, from_index,
Object::ToInteger(isolate, from_index));
- double fp = from_index->Number();
- if (fp > len) return isolate->heap()->false_value();
- start_from = static_cast<int64_t>(fp);
- }
- int64_t index;
- if (start_from >= 0) {
- index = start_from;
- } else {
- index = len + start_from;
- if (index < 0) {
- index = 0;
+ if (V8_LIKELY(from_index->IsSmi())) {
+ int start_from = Smi::cast(*from_index)->value();
+ if (start_from < 0) {
+ index = std::max<int64_t>(len + start_from, 0);
+ } else {
+ index = start_from;
+ }
+ } else {
+ DCHECK(from_index->IsHeapNumber());
+ double start_from = from_index->Number();
+ if (start_from >= len) return isolate->heap()->false_value();
+ if (V8_LIKELY(std::isfinite(start_from))) {
+ if (start_from < 0) {
+ index = static_cast<int64_t>(std::max<double>(start_from + len, 0));
+ } else {
+ index = start_from;
+ }
+ }
}
+
+ DCHECK_GE(index, 0);
}
// If the receiver is not a special receiver type, and the length is a valid
@@ -646,5 +655,33 @@ RUNTIME_FUNCTION(Runtime_SpreadIterablePrepare) {
return *spread;
}
+RUNTIME_FUNCTION(Runtime_SpreadIterableFixed) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, spread, 0);
+
+ // The caller should check if proper iteration is necessary.
+ Handle<JSFunction> spread_iterable_function = isolate->spread_iterable();
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, spread,
+ Execution::Call(isolate, spread_iterable_function,
+ isolate->factory()->undefined_value(), 1, &spread));
+
+ // Create a new FixedArray and put the result of the spread into it.
+ Handle<JSArray> spread_array = Handle<JSArray>::cast(spread);
+ uint32_t spread_length;
+ CHECK(spread_array->length()->ToArrayIndex(&spread_length));
+
+ Handle<FixedArray> result = isolate->factory()->NewFixedArray(spread_length);
+ ElementsAccessor* accessor = spread_array->GetElementsAccessor();
+ for (uint32_t i = 0; i < spread_length; i++) {
+ DCHECK(accessor->HasElement(spread_array, i));
+ Handle<Object> element = accessor->Get(spread_array, i);
+ result->set(i, *element);
+ }
+
+ return *result;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-classes.cc b/deps/v8/src/runtime/runtime-classes.cc
index 246079232b..9398586de5 100644
--- a/deps/v8/src/runtime/runtime-classes.cc
+++ b/deps/v8/src/runtime/runtime-classes.cc
@@ -459,48 +459,5 @@ RUNTIME_FUNCTION(Runtime_GetSuperConstructor) {
return prototype;
}
-RUNTIME_FUNCTION(Runtime_NewWithSpread) {
- HandleScope scope(isolate);
- DCHECK_LE(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, constructor, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, new_target, 1);
-
- int constructor_argc = args.length() - 2;
- CONVERT_ARG_HANDLE_CHECKED(Object, spread, args.length() - 1);
-
- // Iterate over the spread if we need to.
- if (spread->IterationHasObservableEffects()) {
- Handle<JSFunction> spread_iterable_function = isolate->spread_iterable();
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, spread,
- Execution::Call(isolate, spread_iterable_function,
- isolate->factory()->undefined_value(), 1, &spread));
- }
-
- uint32_t spread_length;
- Handle<JSArray> spread_array = Handle<JSArray>::cast(spread);
- CHECK(spread_array->length()->ToArrayIndex(&spread_length));
- int result_length = constructor_argc - 1 + spread_length;
- ScopedVector<Handle<Object>> construct_args(result_length);
-
- // Append each of the individual args to the result.
- for (int i = 0; i < constructor_argc - 1; i++) {
- construct_args[i] = args.at<Object>(2 + i);
- }
-
- // Append element of the spread to the result.
- ElementsAccessor* accessor = spread_array->GetElementsAccessor();
- for (uint32_t i = 0; i < spread_length; i++) {
- DCHECK(accessor->HasElement(spread_array, i));
- Handle<Object> element = accessor->Get(spread_array, i);
- construct_args[constructor_argc - 1 + i] = element;
- }
-
- // Call the constructor.
- RETURN_RESULT_OR_FAILURE(
- isolate, Execution::New(isolate, constructor, new_target, result_length,
- construct_args.start()));
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-collections.cc b/deps/v8/src/runtime/runtime-collections.cc
index 15c1fab76f..214ce1c4e6 100644
--- a/deps/v8/src/runtime/runtime-collections.cc
+++ b/deps/v8/src/runtime/runtime-collections.cc
@@ -233,32 +233,7 @@ RUNTIME_FUNCTION(Runtime_GetWeakMapEntries) {
CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, holder, 0);
CONVERT_NUMBER_CHECKED(int, max_entries, Int32, args[1]);
CHECK(max_entries >= 0);
-
- Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
- if (max_entries == 0 || max_entries > table->NumberOfElements()) {
- max_entries = table->NumberOfElements();
- }
- Handle<FixedArray> entries =
- isolate->factory()->NewFixedArray(max_entries * 2);
- // Allocation can cause GC can delete weak elements. Reload.
- if (max_entries > table->NumberOfElements()) {
- max_entries = table->NumberOfElements();
- }
-
- {
- DisallowHeapAllocation no_gc;
- int count = 0;
- for (int i = 0; count / 2 < max_entries && i < table->Capacity(); i++) {
- Handle<Object> key(table->KeyAt(i), isolate);
- if (table->IsKey(isolate, *key)) {
- entries->set(count++, *key);
- Object* value = table->Lookup(key);
- entries->set(count++, value);
- }
- }
- DCHECK_EQ(max_entries * 2, count);
- }
- return *isolate->factory()->NewJSArrayWithElements(entries);
+ return *JSWeakCollection::GetEntries(holder, max_entries);
}
@@ -348,26 +323,7 @@ RUNTIME_FUNCTION(Runtime_GetWeakSetValues) {
CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, holder, 0);
CONVERT_NUMBER_CHECKED(int, max_values, Int32, args[1]);
CHECK(max_values >= 0);
-
- Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
- if (max_values == 0 || max_values > table->NumberOfElements()) {
- max_values = table->NumberOfElements();
- }
- Handle<FixedArray> values = isolate->factory()->NewFixedArray(max_values);
- // Recompute max_values because GC could have removed elements from the table.
- if (max_values > table->NumberOfElements()) {
- max_values = table->NumberOfElements();
- }
- {
- DisallowHeapAllocation no_gc;
- int count = 0;
- for (int i = 0; count < max_values && i < table->Capacity(); i++) {
- Object* key = table->KeyAt(i);
- if (table->IsKey(isolate, key)) values->set(count++, key);
- }
- DCHECK_EQ(max_values, count);
- }
- return *isolate->factory()->NewJSArrayWithElements(values);
+ return *JSWeakCollection::GetEntries(holder, max_values);
}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-compiler.cc b/deps/v8/src/runtime/runtime-compiler.cc
index f1c76bb2ac..f929d73697 100644
--- a/deps/v8/src/runtime/runtime-compiler.cc
+++ b/deps/v8/src/runtime/runtime-compiler.cc
@@ -182,6 +182,10 @@ RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
JavaScriptFrameIterator top_it(isolate);
JavaScriptFrame* top_frame = top_it.frame();
isolate->set_context(Context::cast(top_frame->context()));
+ } else {
+ // TODO(turbofan): We currently need the native context to materialize
+ // the arguments object, but only to get to its map.
+ isolate->set_context(function->native_context());
}
// Make sure to materialize objects before causing any allocation.
@@ -442,9 +446,10 @@ static Object* CompileGlobalEval(Isolate* isolate, Handle<String> source,
static const ParseRestriction restriction = NO_PARSE_RESTRICTION;
Handle<JSFunction> compiled;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, compiled, Compiler::GetFunctionFromEval(
- source, outer_info, context, language_mode,
- restriction, eval_scope_position, eval_position),
+ isolate, compiled,
+ Compiler::GetFunctionFromEval(source, outer_info, context, language_mode,
+ restriction, kNoSourcePosition,
+ eval_scope_position, eval_position),
isolate->heap()->exception());
return *compiled;
}
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index d24a450cbf..3649621b09 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -6,6 +6,7 @@
#include "src/arguments.h"
#include "src/compiler.h"
+#include "src/debug/debug-coverage.h"
#include "src/debug/debug-evaluate.h"
#include "src/debug/debug-frames.h"
#include "src/debug/debug-scopes.h"
@@ -27,29 +28,28 @@ RUNTIME_FUNCTION(Runtime_DebugBreak) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
- isolate->debug()->set_return_value(value);
+ HandleScope scope(isolate);
+ ReturnValueScope result_scope(isolate->debug());
+ isolate->debug()->set_return_value(*value);
// Get the top-most JavaScript frame.
JavaScriptFrameIterator it(isolate);
isolate->debug()->Break(it.frame());
-
- isolate->debug()->SetAfterBreakTarget(it.frame());
- return *isolate->debug()->return_value();
+ return isolate->debug()->return_value();
}
RUNTIME_FUNCTION(Runtime_DebugBreakOnBytecode) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
- isolate->debug()->set_return_value(value);
+ HandleScope scope(isolate);
+ ReturnValueScope result_scope(isolate->debug());
+ isolate->debug()->set_return_value(*value);
// Get the top-most JavaScript frame.
JavaScriptFrameIterator it(isolate);
isolate->debug()->Break(it.frame());
- // If live-edit has dropped frames, we are not going back to dispatch.
- if (LiveEdit::SetAfterBreakTarget(isolate->debug())) return Smi::kZero;
-
// Return the handler from the original bytecode array.
DCHECK(it.frame()->is_interpreted());
InterpretedFrame* interpreted_frame =
@@ -84,8 +84,13 @@ RUNTIME_FUNCTION(Runtime_SetDebugEventListener) {
CHECK(args[0]->IsJSFunction() || args[0]->IsNullOrUndefined(isolate));
CONVERT_ARG_HANDLE_CHECKED(Object, callback, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, data, 1);
- isolate->debug()->SetEventListener(callback, data);
-
+ if (callback->IsJSFunction()) {
+ JavaScriptDebugDelegate* delegate = new JavaScriptDebugDelegate(
+ isolate, Handle<JSFunction>::cast(callback), data);
+ isolate->debug()->SetDebugDelegate(delegate, true);
+ } else {
+ isolate->debug()->SetDebugDelegate(nullptr, false);
+ }
return isolate->heap()->undefined_value();
}
@@ -632,7 +637,7 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
// to the frame information.
Handle<Object> return_value = isolate->factory()->undefined_value();
if (at_return) {
- return_value = isolate->debug()->return_value();
+ return_value = handle(isolate->debug()->return_value(), isolate);
}
// Now advance to the arguments adapter frame (if any). It contains all
@@ -731,9 +736,12 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
}
// Add the receiver (same as in function frame).
- Handle<Object> receiver(it.frame()->receiver(), isolate);
+ Handle<Object> receiver = frame_inspector.summary().receiver();
DCHECK(function->shared()->IsUserJavaScript());
- DCHECK_IMPLIES(is_sloppy(shared->language_mode()), receiver->IsJSReceiver());
+ // Optimized frames only restore the receiver as best-effort (see
+ // OptimizedFrame::Summarize).
+ DCHECK_IMPLIES(!is_optimized && is_sloppy(shared->language_mode()),
+ receiver->IsJSReceiver());
details->set(kFrameDetailsReceiverIndex, *receiver);
DCHECK_EQ(details_size, details_index);
@@ -751,8 +759,10 @@ RUNTIME_FUNCTION(Runtime_GetScopeCount) {
// Get the frame where the debugging is performed.
StackFrame::Id id = DebugFrameHelper::UnwrapFrameId(wrapped_id);
- JavaScriptFrameIterator it(isolate, id);
- JavaScriptFrame* frame = it.frame();
+ StackTraceFrameIterator it(isolate, id);
+ StandardFrame* frame = it.frame();
+ if (it.frame()->is_wasm()) return 0;
+
FrameInspector frame_inspector(frame, 0, isolate);
// Count the visible scopes.
@@ -786,8 +796,9 @@ RUNTIME_FUNCTION(Runtime_GetScopeDetails) {
// Get the frame where the debugging is performed.
StackFrame::Id id = DebugFrameHelper::UnwrapFrameId(wrapped_id);
- JavaScriptFrameIterator frame_it(isolate, id);
- JavaScriptFrame* frame = frame_it.frame();
+ StackTraceFrameIterator frame_it(isolate, id);
+ // Wasm has no scopes, this must be javascript.
+ JavaScriptFrame* frame = JavaScriptFrame::cast(frame_it.frame());
FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
// Find the requested scope.
@@ -975,8 +986,9 @@ RUNTIME_FUNCTION(Runtime_SetScopeVariableValue) {
// Get the frame where the debugging is performed.
StackFrame::Id id = DebugFrameHelper::UnwrapFrameId(wrapped_id);
- JavaScriptFrameIterator frame_it(isolate, id);
- JavaScriptFrame* frame = frame_it.frame();
+ StackTraceFrameIterator frame_it(isolate, id);
+ // Wasm has no scopes, this must be javascript.
+ JavaScriptFrame* frame = JavaScriptFrame::cast(frame_it.frame());
FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
ScopeIterator it(isolate, &frame_inspector);
@@ -1178,7 +1190,7 @@ RUNTIME_FUNCTION(Runtime_PrepareStep) {
// Get the step action and check validity.
StepAction step_action = static_cast<StepAction>(NumberToInt32(args[1]));
if (step_action != StepIn && step_action != StepNext &&
- step_action != StepOut && step_action != StepFrame) {
+ step_action != StepOut) {
return isolate->Throw(isolate->heap()->illegal_argument_string());
}
@@ -1190,19 +1202,6 @@ RUNTIME_FUNCTION(Runtime_PrepareStep) {
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(Runtime_PrepareStepFrame) {
- HandleScope scope(isolate);
- DCHECK_EQ(0, args.length());
- CHECK(isolate->debug()->CheckExecutionState());
-
- // Clear all current stepping setup.
- isolate->debug()->ClearStepping();
-
- // Prepare step.
- isolate->debug()->PrepareStep(StepFrame);
- return isolate->heap()->undefined_value();
-}
-
// Clear all stepping set by PrepareStep.
RUNTIME_FUNCTION(Runtime_ClearStepping) {
HandleScope scope(isolate);
@@ -1218,19 +1217,20 @@ RUNTIME_FUNCTION(Runtime_DebugEvaluate) {
// Check the execution state and decode arguments frame and source to be
// evaluated.
- DCHECK_EQ(4, args.length());
+ DCHECK_EQ(5, args.length());
CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
CHECK(isolate->debug()->CheckExecutionState(break_id));
CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]);
CONVERT_ARG_HANDLE_CHECKED(String, source, 3);
+ CONVERT_BOOLEAN_ARG_CHECKED(throw_on_side_effect, 4);
StackFrame::Id id = DebugFrameHelper::UnwrapFrameId(wrapped_id);
RETURN_RESULT_OR_FAILURE(
- isolate,
- DebugEvaluate::Local(isolate, id, inlined_jsframe_index, source));
+ isolate, DebugEvaluate::Local(isolate, id, inlined_jsframe_index, source,
+ throw_on_side_effect));
}
@@ -1253,10 +1253,6 @@ RUNTIME_FUNCTION(Runtime_DebugGetLoadedScripts) {
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
- // This runtime function is used by the debugger to determine whether the
- // debugger is active or not. Hence we fail gracefully here and don't crash.
- if (!isolate->debug()->is_active()) return isolate->ThrowIllegalOperation();
-
Handle<FixedArray> instances;
{
DebugScope debug_scope(isolate->debug());
@@ -1452,26 +1448,6 @@ RUNTIME_FUNCTION(Runtime_FunctionGetDebugName) {
}
-// Calls specified function with or without entering the debugger.
-// This is used in unit tests to run code as if debugger is entered or simply
-// to have a stack with C++ frame in the middle.
-RUNTIME_FUNCTION(Runtime_ExecuteInDebugContext) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
-
- DebugScope debug_scope(isolate->debug());
- if (debug_scope.failed()) {
- DCHECK(isolate->has_pending_exception());
- return isolate->heap()->exception();
- }
-
- RETURN_RESULT_OR_FAILURE(
- isolate, Execution::Call(isolate, function,
- handle(function->global_proxy()), 0, NULL));
-}
-
-
RUNTIME_FUNCTION(Runtime_GetDebugContext) {
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
@@ -1662,7 +1638,8 @@ namespace {
int ScriptLinePositionWithOffset(Handle<Script> script, int line, int offset) {
if (line < 0 || offset < 0) return -1;
- if (line == 0) return ScriptLinePosition(script, line) + offset;
+ if (line == 0 || offset == 0)
+ return ScriptLinePosition(script, line) + offset;
Script::PositionInfo info;
if (!Script::GetPositionInfo(script, offset, &info, Script::NO_OFFSET)) {
@@ -1874,13 +1851,6 @@ RUNTIME_FUNCTION(Runtime_DebugPopPromise) {
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(Runtime_DebugNextAsyncTaskId) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
- return Smi::FromInt(isolate->debug()->NextAsyncTaskId(promise));
-}
-
RUNTIME_FUNCTION(Runtime_DebugAsyncFunctionPromiseCreated) {
DCHECK_EQ(1, args.length());
HandleScope scope(isolate);
@@ -1892,7 +1862,7 @@ RUNTIME_FUNCTION(Runtime_DebugAsyncFunctionPromiseCreated) {
JSObject::SetProperty(promise, async_stack_id_symbol,
handle(Smi::FromInt(id), isolate), STRICT)
.Assert();
- isolate->debug()->OnAsyncTaskEvent(debug::kDebugEnqueueAsyncFunction, id);
+ isolate->debug()->OnAsyncTaskEvent(debug::kDebugEnqueueAsyncFunction, id, 0);
return isolate->heap()->undefined_value();
}
@@ -1915,7 +1885,7 @@ RUNTIME_FUNCTION(Runtime_DebugAsyncEventEnqueueRecurring) {
isolate->debug()->OnAsyncTaskEvent(
status == v8::Promise::kFulfilled ? debug::kDebugEnqueuePromiseResolve
: debug::kDebugEnqueuePromiseReject,
- isolate->debug()->NextAsyncTaskId(promise));
+ isolate->debug()->NextAsyncTaskId(promise), 0);
}
return isolate->heap()->undefined_value();
}
@@ -1925,11 +1895,59 @@ RUNTIME_FUNCTION(Runtime_DebugIsActive) {
return Smi::FromInt(isolate->debug()->is_active());
}
-
RUNTIME_FUNCTION(Runtime_DebugBreakInOptimizedCode) {
UNIMPLEMENTED();
return NULL;
}
+RUNTIME_FUNCTION(Runtime_DebugCollectCoverage) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(0, args.length());
+ // Collect coverage data.
+ std::unique_ptr<Coverage> coverage(Coverage::Collect(isolate, false));
+ Factory* factory = isolate->factory();
+ // Turn the returned data structure into JavaScript.
+ // Create an array of scripts.
+ int num_scripts = static_cast<int>(coverage->size());
+ // Prepare property keys.
+ Handle<FixedArray> scripts_array = factory->NewFixedArray(num_scripts);
+ Handle<String> script_string = factory->NewStringFromStaticChars("script");
+ Handle<String> start_string = factory->NewStringFromStaticChars("start");
+ Handle<String> end_string = factory->NewStringFromStaticChars("end");
+ Handle<String> count_string = factory->NewStringFromStaticChars("count");
+ for (int i = 0; i < num_scripts; i++) {
+ const auto& script_data = coverage->at(i);
+ HandleScope inner_scope(isolate);
+ int num_functions = static_cast<int>(script_data.functions.size());
+ Handle<FixedArray> functions_array = factory->NewFixedArray(num_functions);
+ for (int j = 0; j < num_functions; j++) {
+ const auto& function_data = script_data.functions[j];
+ Handle<JSObject> range_obj = factory->NewJSObjectWithNullProto();
+ JSObject::AddProperty(range_obj, start_string,
+ factory->NewNumberFromInt(function_data.start),
+ NONE);
+ JSObject::AddProperty(range_obj, end_string,
+ factory->NewNumberFromInt(function_data.end), NONE);
+ JSObject::AddProperty(range_obj, count_string,
+ factory->NewNumberFromUint(function_data.count),
+ NONE);
+ functions_array->set(j, *range_obj);
+ }
+ Handle<JSArray> script_obj =
+ factory->NewJSArrayWithElements(functions_array, FAST_ELEMENTS);
+ Handle<JSObject> wrapper = Script::GetWrapper(script_data.script);
+ JSObject::AddProperty(script_obj, script_string, wrapper, NONE);
+ scripts_array->set(i, *script_obj);
+ }
+ return *factory->NewJSArrayWithElements(scripts_array, FAST_ELEMENTS);
+}
+
+RUNTIME_FUNCTION(Runtime_DebugTogglePreciseCoverage) {
+ SealHandleScope shs(isolate);
+ CONVERT_BOOLEAN_ARG_CHECKED(enable, 0);
+ Coverage::TogglePrecise(isolate, enable);
+ return isolate->heap()->undefined_value();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-forin.cc b/deps/v8/src/runtime/runtime-forin.cc
index bd37cdcf2f..9a7c539865 100644
--- a/deps/v8/src/runtime/runtime-forin.cc
+++ b/deps/v8/src/runtime/runtime-forin.cc
@@ -160,22 +160,5 @@ RUNTIME_FUNCTION(Runtime_ForInFilter) {
HasEnumerableProperty(isolate, receiver, key));
}
-
-RUNTIME_FUNCTION(Runtime_ForInNext) {
- HandleScope scope(isolate);
- DCHECK_EQ(4, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, cache_array, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, cache_type, 2);
- CONVERT_SMI_ARG_CHECKED(index, 3);
- Handle<Object> key = handle(cache_array->get(index), isolate);
- // Don't need filtering if expected map still matches that of the receiver.
- if (receiver->map() == *cache_type) {
- return *key;
- }
- RETURN_RESULT_OR_FAILURE(isolate,
- HasEnumerableProperty(isolate, receiver, key));
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-function.cc b/deps/v8/src/runtime/runtime-function.cc
index 31da4a4535..ac8a430761 100644
--- a/deps/v8/src/runtime/runtime-function.cc
+++ b/deps/v8/src/runtime/runtime-function.cc
@@ -190,7 +190,6 @@ RUNTIME_FUNCTION(Runtime_SetCode) {
target_shared->set_scope_info(source_shared->scope_info());
target_shared->set_outer_scope_info(source_shared->outer_scope_info());
target_shared->set_length(source_shared->length());
- target_shared->set_num_literals(source_shared->num_literals());
target_shared->set_feedback_metadata(source_shared->feedback_metadata());
target_shared->set_internal_formal_parameter_count(
source_shared->internal_formal_parameter_count());
diff --git a/deps/v8/src/runtime/runtime-futex.cc b/deps/v8/src/runtime/runtime-futex.cc
index 4af0831acf..b6582ffc7f 100644
--- a/deps/v8/src/runtime/runtime-futex.cc
+++ b/deps/v8/src/runtime/runtime-futex.cc
@@ -29,6 +29,11 @@ RUNTIME_FUNCTION(Runtime_AtomicsWait) {
CHECK_EQ(sta->type(), kExternalInt32Array);
CHECK(timeout == V8_INFINITY || !std::isnan(timeout));
+ if (!isolate->allow_atomics_wait()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kAtomicsWaitNotAllowed));
+ }
+
Handle<JSArrayBuffer> array_buffer = sta->GetBuffer();
size_t addr = (index << 2) + NumberToSize(sta->byte_offset());
@@ -40,7 +45,7 @@ RUNTIME_FUNCTION(Runtime_AtomicsWake) {
DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
CONVERT_SIZE_ARG_CHECKED(index, 1);
- CONVERT_INT32_ARG_CHECKED(count, 2);
+ CONVERT_UINT32_ARG_CHECKED(count, 2);
CHECK(sta->GetBuffer()->is_shared());
CHECK_LT(index, NumberToSize(sta->length()));
CHECK_EQ(sta->type(), kExternalInt32Array);
@@ -65,5 +70,14 @@ RUNTIME_FUNCTION(Runtime_AtomicsNumWaitersForTesting) {
return FutexEmulation::NumWaitersForTesting(isolate, array_buffer, addr);
}
+
+RUNTIME_FUNCTION(Runtime_SetAllowAtomicsWait) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_BOOLEAN_ARG_CHECKED(set, 0);
+
+ isolate->set_allow_atomics_wait(set);
+ return isolate->heap()->undefined_value();
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-i18n.cc b/deps/v8/src/runtime/runtime-i18n.cc
index 6630fadc10..e89175a37d 100644
--- a/deps/v8/src/runtime/runtime-i18n.cc
+++ b/deps/v8/src/runtime/runtime-i18n.cc
@@ -43,6 +43,7 @@
#include "unicode/uloc.h"
#include "unicode/unistr.h"
#include "unicode/unum.h"
+#include "unicode/ustring.h"
#include "unicode/uversion.h"
@@ -609,10 +610,11 @@ RUNTIME_FUNCTION(Runtime_InternalCompare) {
String::FlatContent flat2 = string2->GetFlatContent();
std::unique_ptr<uc16[]> sap1;
std::unique_ptr<uc16[]> sap2;
- const UChar* string_val1 = GetUCharBufferFromFlat(flat1, &sap1, length1);
- const UChar* string_val2 = GetUCharBufferFromFlat(flat2, &sap2, length2);
- result =
- collator->compare(string_val1, length1, string_val2, length2, status);
+ icu::UnicodeString string_val1(
+ FALSE, GetUCharBufferFromFlat(flat1, &sap1, length1), length1);
+ icu::UnicodeString string_val2(
+ FALSE, GetUCharBufferFromFlat(flat2, &sap2, length2), length2);
+ result = collator->compare(string_val1, string_val2, status);
}
if (U_FAILURE(status)) return isolate->ThrowIllegalOperation();
@@ -831,6 +833,8 @@ MUST_USE_RESULT Object* LocaleConvertCase(Handle<String> s, Isolate* isolate,
Handle<SeqTwoByteString> result;
std::unique_ptr<uc16[]> sap;
+ if (dest_length == 0) return isolate->heap()->empty_string();
+
// This is not a real loop. It'll be executed only once (no overflow) or
// twice (overflow).
for (int i = 0; i < 2; ++i) {
@@ -1041,7 +1045,7 @@ MUST_USE_RESULT Object* ConvertToLower(Handle<String> s, Isolate* isolate) {
MUST_USE_RESULT Object* ConvertToUpper(Handle<String> s, Isolate* isolate) {
int32_t length = s->length();
- if (s->HasOnlyOneByteChars()) {
+ if (s->HasOnlyOneByteChars() && length > 0) {
Handle<SeqOneByteString> result =
isolate->factory()->NewRawOneByteString(length).ToHandleChecked();
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index 6ff0a09b61..83995098c0 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -43,19 +43,6 @@ RUNTIME_FUNCTION(Runtime_ExportFromRuntime) {
}
-RUNTIME_FUNCTION(Runtime_ExportExperimentalFromRuntime) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSObject, container, 0);
- CHECK(isolate->bootstrapper()->IsActive());
- JSObject::NormalizeProperties(container, KEEP_INOBJECT_PROPERTIES, 10,
- "ExportExperimentalFromRuntime");
- Bootstrapper::ExportExperimentalFromRuntime(isolate, container);
- JSObject::MigrateSlowToFast(container, 0, "ExportExperimentalFromRuntime");
- return *container;
-}
-
-
RUNTIME_FUNCTION(Runtime_InstallToContext) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -101,6 +88,13 @@ RUNTIME_FUNCTION(Runtime_ThrowStackOverflow) {
return isolate->StackOverflow();
}
+RUNTIME_FUNCTION(Runtime_ThrowSymbolAsyncIteratorInvalid) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(0, args.length());
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kSymbolAsyncIteratorInvalid));
+}
+
RUNTIME_FUNCTION(Runtime_ThrowTypeError) {
HandleScope scope(isolate);
DCHECK_LE(1, args.length());
@@ -225,6 +219,26 @@ RUNTIME_FUNCTION(Runtime_ThrowSymbolIteratorInvalid) {
isolate, NewTypeError(MessageTemplate::kSymbolIteratorInvalid));
}
+RUNTIME_FUNCTION(Runtime_ThrowNonCallableInInstanceOfCheck) {
+ HandleScope scope(isolate);
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kNonCallableInInstanceOfCheck));
+}
+
+RUNTIME_FUNCTION(Runtime_ThrowNonObjectInInstanceOfCheck) {
+ HandleScope scope(isolate);
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kNonObjectInInstanceOfCheck));
+}
+
+RUNTIME_FUNCTION(Runtime_ThrowNotConstructor) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kNotConstructor, object));
+}
+
RUNTIME_FUNCTION(Runtime_ThrowNotGeneric) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -299,6 +313,7 @@ RUNTIME_FUNCTION(Runtime_AllocateSeqOneByteString) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_SMI_ARG_CHECKED(length, 0);
+ if (length == 0) return isolate->heap()->empty_string();
Handle<SeqOneByteString> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result, isolate->factory()->NewRawOneByteString(length));
@@ -309,6 +324,7 @@ RUNTIME_FUNCTION(Runtime_AllocateSeqTwoByteString) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_SMI_ARG_CHECKED(length, 0);
+ if (length == 0) return isolate->heap()->empty_string();
Handle<SeqTwoByteString> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result, isolate->factory()->NewRawTwoByteString(length));
@@ -350,8 +366,7 @@ bool ComputeLocation(Isolate* isolate, MessageLocation* target) {
Handle<String> RenderCallSite(Isolate* isolate, Handle<Object> object) {
MessageLocation location;
if (ComputeLocation(isolate, &location)) {
- Zone zone(isolate->allocator(), ZONE_NAME);
- std::unique_ptr<ParseInfo> info(new ParseInfo(&zone, location.shared()));
+ std::unique_ptr<ParseInfo> info(new ParseInfo(location.shared()));
if (parsing::ParseAny(info.get())) {
CallPrinter printer(isolate, location.shared()->IsUserJavaScript());
Handle<String> str = printer.Print(info->literal(), location.start_pos());
@@ -365,7 +380,6 @@ Handle<String> RenderCallSite(Isolate* isolate, Handle<Object> object) {
} // namespace
-
RUNTIME_FUNCTION(Runtime_ThrowCalledNonCallable) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -496,5 +510,20 @@ RUNTIME_FUNCTION(Runtime_AllowDynamicFunction) {
Builtins::AllowDynamicFunction(isolate, target, global_proxy));
}
+RUNTIME_FUNCTION(Runtime_CreateAsyncFromSyncIterator) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+
+ CONVERT_ARG_HANDLE_CHECKED(Object, sync_iterator, 0);
+
+ if (!sync_iterator->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kSymbolIteratorInvalid));
+ }
+
+ return *isolate->factory()->NewJSAsyncFromSyncIterator(
+ Handle<JSReceiver>::cast(sync_iterator));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-interpreter.cc b/deps/v8/src/runtime/runtime-interpreter.cc
index 2201b4c337..9f3897bf64 100644
--- a/deps/v8/src/runtime/runtime-interpreter.cc
+++ b/deps/v8/src/runtime/runtime-interpreter.cc
@@ -23,10 +23,15 @@ RUNTIME_FUNCTION(Runtime_InterpreterNewClosure) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared, 0);
+ CONVERT_ARG_HANDLE_CHECKED(FeedbackVector, vector, 1);
+ CONVERT_SMI_ARG_CHECKED(index, 2);
CONVERT_SMI_ARG_CHECKED(pretenured_flag, 3);
Handle<Context> context(isolate->context(), isolate);
+ FeedbackSlot slot = FeedbackVector::ToSlot(index);
+ Handle<Cell> vector_cell(Cell::cast(vector->Get(slot)), isolate);
return *isolate->factory()->NewFunctionFromSharedFunctionInfo(
- shared, context, static_cast<PretenureFlag>(pretenured_flag));
+ shared, context, vector_cell,
+ static_cast<PretenureFlag>(pretenured_flag));
}
namespace {
diff --git a/deps/v8/src/runtime/runtime-literals.cc b/deps/v8/src/runtime/runtime-literals.cc
index 45b83293b6..7beadf5e0b 100644
--- a/deps/v8/src/runtime/runtime-literals.cc
+++ b/deps/v8/src/runtime/runtime-literals.cc
@@ -15,31 +15,23 @@ namespace v8 {
namespace internal {
static Handle<Map> ComputeObjectLiteralMap(
- Handle<Context> context, Handle<FixedArray> constant_properties,
+ Handle<Context> context,
+ Handle<BoilerplateDescription> boilerplate_description,
bool* is_result_from_cache) {
- int properties_length = constant_properties->length();
- int number_of_properties = properties_length / 2;
-
- for (int p = 0; p != properties_length; p += 2) {
- Object* key = constant_properties->get(p);
- uint32_t element_index = 0;
- if (key->ToArrayIndex(&element_index)) {
- // An index key does not require space in the property backing store.
- number_of_properties--;
- }
- }
+ int number_of_properties = boilerplate_description->backing_store_size();
Isolate* isolate = context->GetIsolate();
return isolate->factory()->ObjectLiteralMapFromCache(
context, number_of_properties, is_result_from_cache);
}
MUST_USE_RESULT static MaybeHandle<Object> CreateLiteralBoilerplate(
- Isolate* isolate, Handle<LiteralsArray> literals,
- Handle<FixedArray> constant_properties);
+ Isolate* isolate, Handle<FeedbackVector> vector,
+ Handle<BoilerplateDescription> boilerplate_description);
MUST_USE_RESULT static MaybeHandle<Object> CreateObjectLiteralBoilerplate(
- Isolate* isolate, Handle<LiteralsArray> literals,
- Handle<FixedArray> constant_properties, bool should_have_fast_elements) {
+ Isolate* isolate, Handle<FeedbackVector> vector,
+ Handle<BoilerplateDescription> boilerplate_description,
+ bool should_have_fast_elements) {
Handle<Context> context = isolate->native_context();
// In case we have function literals, we want the object to be in
@@ -47,11 +39,11 @@ MUST_USE_RESULT static MaybeHandle<Object> CreateObjectLiteralBoilerplate(
// maps with constant functions can't be shared if the functions are
// not the same (which is the common case).
bool is_result_from_cache = false;
- Handle<Map> map = ComputeObjectLiteralMap(context, constant_properties,
+ Handle<Map> map = ComputeObjectLiteralMap(context, boilerplate_description,
&is_result_from_cache);
PretenureFlag pretenure_flag =
- isolate->heap()->InNewSpace(*literals) ? NOT_TENURED : TENURED;
+ isolate->heap()->InNewSpace(*vector) ? NOT_TENURED : TENURED;
Handle<JSObject> boilerplate =
isolate->factory()->NewJSObjectFromMap(map, pretenure_flag);
@@ -60,26 +52,27 @@ MUST_USE_RESULT static MaybeHandle<Object> CreateObjectLiteralBoilerplate(
if (!should_have_fast_elements) JSObject::NormalizeElements(boilerplate);
// Add the constant properties to the boilerplate.
- int length = constant_properties->length();
+ int length = boilerplate_description->size();
bool should_transform =
!is_result_from_cache && boilerplate->HasFastProperties();
bool should_normalize = should_transform;
if (should_normalize) {
// TODO(verwaest): We might not want to ever normalize here.
- JSObject::NormalizeProperties(boilerplate, KEEP_INOBJECT_PROPERTIES,
- length / 2, "Boilerplate");
+ JSObject::NormalizeProperties(boilerplate, KEEP_INOBJECT_PROPERTIES, length,
+ "Boilerplate");
}
// TODO(verwaest): Support tracking representations in the boilerplate.
- for (int index = 0; index < length; index += 2) {
- Handle<Object> key(constant_properties->get(index + 0), isolate);
- Handle<Object> value(constant_properties->get(index + 1), isolate);
- if (value->IsFixedArray()) {
- // The value contains the constant_properties of a
+ for (int index = 0; index < length; index++) {
+ Handle<Object> key(boilerplate_description->name(index), isolate);
+ Handle<Object> value(boilerplate_description->value(index), isolate);
+ if (value->IsBoilerplateDescription()) {
+ // The value contains the boilerplate properties of a
// simple object or array literal.
- Handle<FixedArray> array = Handle<FixedArray>::cast(value);
+ Handle<BoilerplateDescription> boilerplate =
+ Handle<BoilerplateDescription>::cast(value);
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, value, CreateLiteralBoilerplate(isolate, literals, array),
- Object);
+ isolate, value,
+ CreateLiteralBoilerplate(isolate, vector, boilerplate), Object);
}
MaybeHandle<Object> maybe_result;
uint32_t element_index = 0;
@@ -112,13 +105,13 @@ MUST_USE_RESULT static MaybeHandle<Object> CreateObjectLiteralBoilerplate(
}
static MaybeHandle<Object> CreateArrayLiteralBoilerplate(
- Isolate* isolate, Handle<LiteralsArray> literals,
+ Isolate* isolate, Handle<FeedbackVector> vector,
Handle<ConstantElementsPair> elements) {
// Create the JSArray.
Handle<JSFunction> constructor = isolate->array_function();
PretenureFlag pretenure_flag =
- isolate->heap()->InNewSpace(*literals) ? NOT_TENURED : TENURED;
+ isolate->heap()->InNewSpace(*vector) ? NOT_TENURED : TENURED;
Handle<JSArray> object = Handle<JSArray>::cast(
isolate->factory()->NewJSObject(constructor, pretenure_flag));
@@ -161,15 +154,16 @@ static MaybeHandle<Object> CreateArrayLiteralBoilerplate(
copied_elements_values = fixed_array_values_copy;
FOR_WITH_HANDLE_SCOPE(
isolate, int, i = 0, i, i < fixed_array_values->length(), i++, {
- if (fixed_array_values->get(i)->IsFixedArray()) {
- // The value contains the constant_properties of a
+ if (fixed_array_values->get(i)->IsBoilerplateDescription()) {
+ // The value contains the boilerplate properties of a
// simple object or array literal.
- Handle<FixedArray> fa(
- FixedArray::cast(fixed_array_values->get(i)));
+ Handle<BoilerplateDescription> boilerplate(
+ BoilerplateDescription::cast(fixed_array_values->get(i)));
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, result,
- CreateLiteralBoilerplate(isolate, literals, fa), Object);
+ CreateLiteralBoilerplate(isolate, vector, boilerplate),
+ Object);
fixed_array_values_copy->set(i, *result);
}
});
@@ -183,22 +177,24 @@ static MaybeHandle<Object> CreateArrayLiteralBoilerplate(
}
MUST_USE_RESULT static MaybeHandle<Object> CreateLiteralBoilerplate(
- Isolate* isolate, Handle<LiteralsArray> literals,
- Handle<FixedArray> array) {
+ Isolate* isolate, Handle<FeedbackVector> vector,
+ Handle<BoilerplateDescription> array) {
Handle<HeapObject> elements = CompileTimeValue::GetElements(array);
switch (CompileTimeValue::GetLiteralType(array)) {
case CompileTimeValue::OBJECT_LITERAL_FAST_ELEMENTS: {
- Handle<FixedArray> props = Handle<FixedArray>::cast(elements);
- return CreateObjectLiteralBoilerplate(isolate, literals, props, true);
+ Handle<BoilerplateDescription> props =
+ Handle<BoilerplateDescription>::cast(elements);
+ return CreateObjectLiteralBoilerplate(isolate, vector, props, true);
}
case CompileTimeValue::OBJECT_LITERAL_SLOW_ELEMENTS: {
- Handle<FixedArray> props = Handle<FixedArray>::cast(elements);
- return CreateObjectLiteralBoilerplate(isolate, literals, props, false);
+ Handle<BoilerplateDescription> props =
+ Handle<BoilerplateDescription>::cast(elements);
+ return CreateObjectLiteralBoilerplate(isolate, vector, props, false);
}
case CompileTimeValue::ARRAY_LITERAL: {
Handle<ConstantElementsPair> elems =
Handle<ConstantElementsPair>::cast(elements);
- return CreateArrayLiteralBoilerplate(isolate, literals, elems);
+ return CreateArrayLiteralBoilerplate(isolate, vector, elems);
}
default:
UNREACHABLE();
@@ -214,13 +210,15 @@ RUNTIME_FUNCTION(Runtime_CreateRegExpLiteral) {
CONVERT_SMI_ARG_CHECKED(index, 1);
CONVERT_ARG_HANDLE_CHECKED(String, pattern, 2);
CONVERT_SMI_ARG_CHECKED(flags, 3);
+ FeedbackSlot literal_slot(FeedbackVector::ToSlot(index));
// Check if boilerplate exists. If not, create it first.
- Handle<Object> boilerplate(closure->literals()->literal(index), isolate);
+ Handle<Object> boilerplate(closure->feedback_vector()->Get(literal_slot),
+ isolate);
if (boilerplate->IsUndefined(isolate)) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, boilerplate, JSRegExp::New(pattern, JSRegExp::Flags(flags)));
- closure->literals()->set_literal(index, *boilerplate);
+ closure->feedback_vector()->Set(literal_slot, *boilerplate);
}
return *JSRegExp::Copy(Handle<JSRegExp>::cast(boilerplate));
}
@@ -231,24 +229,25 @@ RUNTIME_FUNCTION(Runtime_CreateObjectLiteral) {
DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, closure, 0);
CONVERT_SMI_ARG_CHECKED(literals_index, 1);
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, constant_properties, 2);
+ CONVERT_ARG_HANDLE_CHECKED(BoilerplateDescription, boilerplate_description,
+ 2);
CONVERT_SMI_ARG_CHECKED(flags, 3);
- Handle<LiteralsArray> literals(closure->literals(), isolate);
+ Handle<FeedbackVector> vector(closure->feedback_vector(), isolate);
bool should_have_fast_elements = (flags & ObjectLiteral::kFastElements) != 0;
bool enable_mementos = (flags & ObjectLiteral::kDisableMementos) == 0;
- CHECK(literals_index >= 0);
- CHECK(literals_index < literals->literals_count());
+ FeedbackSlot literals_slot(FeedbackVector::ToSlot(literals_index));
+ CHECK(literals_slot.ToInt() < vector->slot_count());
// Check if boilerplate exists. If not, create it first.
- Handle<Object> literal_site(literals->literal(literals_index), isolate);
+ Handle<Object> literal_site(vector->Get(literals_slot), isolate);
Handle<AllocationSite> site;
Handle<JSObject> boilerplate;
if (literal_site->IsUndefined(isolate)) {
Handle<Object> raw_boilerplate;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, raw_boilerplate,
- CreateObjectLiteralBoilerplate(isolate, literals, constant_properties,
+ CreateObjectLiteralBoilerplate(isolate, vector, boilerplate_description,
should_have_fast_elements));
boilerplate = Handle<JSObject>::cast(raw_boilerplate);
@@ -259,7 +258,7 @@ RUNTIME_FUNCTION(Runtime_CreateObjectLiteral) {
creation_context.ExitScope(site, boilerplate);
// Update the functions literal and return the boilerplate.
- literals->set_literal(literals_index, *site);
+ vector->Set(literals_slot, *site);
} else {
site = Handle<AllocationSite>::cast(literal_site);
boilerplate =
@@ -275,16 +274,16 @@ RUNTIME_FUNCTION(Runtime_CreateObjectLiteral) {
}
MUST_USE_RESULT static MaybeHandle<AllocationSite> GetLiteralAllocationSite(
- Isolate* isolate, Handle<LiteralsArray> literals, int literals_index,
+ Isolate* isolate, Handle<FeedbackVector> vector, FeedbackSlot literals_slot,
Handle<ConstantElementsPair> elements) {
// Check if boilerplate exists. If not, create it first.
- Handle<Object> literal_site(literals->literal(literals_index), isolate);
+ Handle<Object> literal_site(vector->Get(literals_slot), isolate);
Handle<AllocationSite> site;
if (literal_site->IsUndefined(isolate)) {
Handle<Object> boilerplate;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, boilerplate,
- CreateArrayLiteralBoilerplate(isolate, literals, elements),
+ CreateArrayLiteralBoilerplate(isolate, vector, elements),
AllocationSite);
AllocationSiteCreationContext creation_context(isolate);
@@ -295,7 +294,7 @@ MUST_USE_RESULT static MaybeHandle<AllocationSite> GetLiteralAllocationSite(
}
creation_context.ExitScope(site, Handle<JSObject>::cast(boilerplate));
- literals->set_literal(literals_index, *site);
+ vector->Set(literals_slot, *site);
} else {
site = Handle<AllocationSite>::cast(literal_site);
}
@@ -304,13 +303,13 @@ MUST_USE_RESULT static MaybeHandle<AllocationSite> GetLiteralAllocationSite(
}
static MaybeHandle<JSObject> CreateArrayLiteralImpl(
- Isolate* isolate, Handle<LiteralsArray> literals, int literals_index,
+ Isolate* isolate, Handle<FeedbackVector> vector, FeedbackSlot literals_slot,
Handle<ConstantElementsPair> elements, int flags) {
- CHECK(literals_index >= 0 && literals_index < literals->literals_count());
+ CHECK(literals_slot.ToInt() < vector->slot_count());
Handle<AllocationSite> site;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, site,
- GetLiteralAllocationSite(isolate, literals, literals_index, elements),
+ GetLiteralAllocationSite(isolate, vector, literals_slot, elements),
JSObject);
bool enable_mementos = (flags & ArrayLiteral::kDisableMementos) == 0;
@@ -335,10 +334,11 @@ RUNTIME_FUNCTION(Runtime_CreateArrayLiteral) {
CONVERT_ARG_HANDLE_CHECKED(ConstantElementsPair, elements, 2);
CONVERT_SMI_ARG_CHECKED(flags, 3);
- Handle<LiteralsArray> literals(closure->literals(), isolate);
+ FeedbackSlot literals_slot(FeedbackVector::ToSlot(literals_index));
+ Handle<FeedbackVector> vector(closure->feedback_vector(), isolate);
RETURN_RESULT_OR_FAILURE(
- isolate, CreateArrayLiteralImpl(isolate, literals, literals_index,
- elements, flags));
+ isolate,
+ CreateArrayLiteralImpl(isolate, vector, literals_slot, elements, flags));
}
@@ -349,11 +349,11 @@ RUNTIME_FUNCTION(Runtime_CreateArrayLiteralStubBailout) {
CONVERT_SMI_ARG_CHECKED(literals_index, 1);
CONVERT_ARG_HANDLE_CHECKED(ConstantElementsPair, elements, 2);
- Handle<LiteralsArray> literals(closure->literals(), isolate);
+ Handle<FeedbackVector> vector(closure->feedback_vector(), isolate);
+ FeedbackSlot literals_slot(FeedbackVector::ToSlot(literals_index));
RETURN_RESULT_OR_FAILURE(
- isolate,
- CreateArrayLiteralImpl(isolate, literals, literals_index, elements,
- ArrayLiteral::kShallowElements));
+ isolate, CreateArrayLiteralImpl(isolate, vector, literals_slot, elements,
+ ArrayLiteral::kShallowElements));
}
} // namespace internal
diff --git a/deps/v8/src/runtime/runtime-maths.cc b/deps/v8/src/runtime/runtime-maths.cc
index 5bd7bde1eb..4cb4f006ff 100644
--- a/deps/v8/src/runtime/runtime-maths.cc
+++ b/deps/v8/src/runtime/runtime-maths.cc
@@ -9,6 +9,9 @@
#include "src/base/utils/random-number-generator.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
+#include "src/counters.h"
+#include "src/double.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/runtime/runtime-module.cc b/deps/v8/src/runtime/runtime-module.cc
index f2a9761203..f36a09b410 100644
--- a/deps/v8/src/runtime/runtime-module.cc
+++ b/deps/v8/src/runtime/runtime-module.cc
@@ -5,10 +5,19 @@
#include "src/runtime/runtime-utils.h"
#include "src/arguments.h"
+#include "src/counters.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
+RUNTIME_FUNCTION(Runtime_DynamicImportCall) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ // TODO(gsathya): Implement ImportCall.
+ return isolate->heap()->undefined_value();
+}
+
RUNTIME_FUNCTION(Runtime_GetModuleNamespace) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index 710f7b0bd2..dd24728457 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -56,6 +56,14 @@ static MaybeHandle<Object> KeyedGetObjectProperty(Isolate* isolate,
DisallowHeapAllocation no_allocation;
Handle<JSObject> receiver = Handle<JSObject>::cast(receiver_obj);
Handle<Name> key = Handle<Name>::cast(key_obj);
+ // Get to a ThinString's referenced internalized string, but don't
+ // otherwise force internalization. We assume that internalization
+ // (which is a dictionary lookup with a non-internalized key) is
+ // about as expensive as doing the property dictionary lookup with
+ // the non-internalized key directly.
+ if (key->IsThinString()) {
+ key = handle(Handle<ThinString>::cast(key)->actual(), isolate);
+ }
if (receiver->IsJSGlobalObject()) {
// Attempt dictionary lookup.
GlobalDictionary* dictionary = receiver->global_dictionary();
@@ -578,6 +586,7 @@ RUNTIME_FUNCTION(Runtime_TryMigrateInstance) {
CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
if (!object->IsJSObject()) return Smi::kZero;
Handle<JSObject> js_object = Handle<JSObject>::cast(object);
+ // It could have been a DCHECK but we call this function directly from tests.
if (!js_object->map()->is_deprecated()) return Smi::kZero;
// This call must not cause lazy deopts, because it's called from deferred
// code where we can't handle lazy deopts for lack of a suitable bailout
@@ -746,7 +755,7 @@ RUNTIME_FUNCTION(Runtime_DefineGetterPropertyUnchecked) {
RUNTIME_FUNCTION(Runtime_CopyDataProperties) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSObject, target, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, source, 1);
@@ -755,12 +764,46 @@ RUNTIME_FUNCTION(Runtime_CopyDataProperties) {
return isolate->heap()->undefined_value();
}
- MAYBE_RETURN(
- JSReceiver::SetOrCopyDataProperties(isolate, target, source, false),
- isolate->heap()->exception());
+ MAYBE_RETURN(JSReceiver::SetOrCopyDataProperties(isolate, target, source,
+ nullptr, false),
+ isolate->heap()->exception());
return isolate->heap()->undefined_value();
}
+RUNTIME_FUNCTION(Runtime_CopyDataPropertiesWithExcludedProperties) {
+ HandleScope scope(isolate);
+ DCHECK_LE(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, source, 0);
+
+ // 2. If source is undefined or null, let keys be an empty List.
+ if (source->IsUndefined(isolate) || source->IsNull(isolate)) {
+ return isolate->heap()->undefined_value();
+ }
+
+ ScopedVector<Handle<Object>> excluded_properties(args.length() - 1);
+ for (int i = 1; i < args.length(); i++) {
+ Handle<Object> property = args.at(i);
+ uint32_t property_num;
+ // We convert string to number if possible, in cases of computed
+ // properties resolving to numbers, which would've been strings
+ // instead because of our call to %ToName() in the desugaring for
+ // computed properties.
+ if (property->IsString() &&
+ String::cast(*property)->AsArrayIndex(&property_num)) {
+ property = isolate->factory()->NewNumberFromUint(property_num);
+ }
+
+ excluded_properties[i - 1] = property;
+ }
+
+ Handle<JSObject> target =
+ isolate->factory()->NewJSObject(isolate->object_function());
+ MAYBE_RETURN(JSReceiver::SetOrCopyDataProperties(isolate, target, source,
+ &excluded_properties, false),
+ isolate->heap()->exception());
+ return *target;
+}
+
RUNTIME_FUNCTION(Runtime_DefineSetterPropertyUnchecked) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
diff --git a/deps/v8/src/runtime/runtime-promise.cc b/deps/v8/src/runtime/runtime-promise.cc
index ec340e5b0a..7f8419940a 100644
--- a/deps/v8/src/runtime/runtime-promise.cc
+++ b/deps/v8/src/runtime/runtime-promise.cc
@@ -3,8 +3,11 @@
// found in the LICENSE file.
#include "src/runtime/runtime-utils.h"
+#include "src/arguments.h"
+#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/elements.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -44,7 +47,7 @@ RUNTIME_FUNCTION(Runtime_PromiseRejectEventFromStack) {
rejected_promise = isolate->GetPromiseOnStackOnThrow();
isolate->debug()->OnAsyncTaskEvent(
debug::kDebugEnqueuePromiseReject,
- isolate->debug()->NextAsyncTaskId(promise));
+ isolate->debug()->NextAsyncTaskId(promise), 0);
}
PromiseRejectEvent(isolate, promise, rejected_promise, value, true);
return isolate->heap()->undefined_value();
@@ -71,83 +74,11 @@ RUNTIME_FUNCTION(Runtime_PromiseRevokeReject) {
return isolate->heap()->undefined_value();
}
-namespace {
-
-// In an async function, reuse the existing stack related to the outer
-// Promise. Otherwise, e.g. in a direct call to then, save a new stack.
-// Promises with multiple reactions with one or more of them being async
-// functions will not get a good stack trace, as async functions require
-// different stacks from direct Promise use, but we save and restore a
-// stack once for all reactions.
-//
-// If this isn't a case of async function, we return false, otherwise
-// we set the correct id and return true.
-//
-// TODO(littledan): Improve this case.
-bool GetDebugIdForAsyncFunction(Isolate* isolate,
- Handle<PromiseReactionJobInfo> info,
- int* debug_id) {
- // deferred_promise can be Undefined, FixedArray or userland promise object.
- if (!info->deferred_promise()->IsJSPromise()) {
- return false;
- }
-
- Handle<JSPromise> deferred_promise(JSPromise::cast(info->deferred_promise()),
- isolate);
- Handle<Symbol> handled_by_symbol =
- isolate->factory()->promise_handled_by_symbol();
- Handle<Object> handled_by_promise =
- JSObject::GetDataProperty(deferred_promise, handled_by_symbol);
-
- if (!handled_by_promise->IsJSPromise()) {
- return false;
- }
-
- Handle<JSPromise> handled_by_promise_js =
- Handle<JSPromise>::cast(handled_by_promise);
- Handle<Symbol> async_stack_id_symbol =
- isolate->factory()->promise_async_stack_id_symbol();
- Handle<Object> id =
- JSObject::GetDataProperty(handled_by_promise_js, async_stack_id_symbol);
-
- // id can be Undefined or Smi.
- if (!id->IsSmi()) {
- return false;
- }
-
- *debug_id = Handle<Smi>::cast(id)->value();
- return true;
-}
-
-void SetDebugInfo(Isolate* isolate, Handle<JSPromise> promise,
- Handle<PromiseReactionJobInfo> info, int status) {
- int id = kDebugPromiseNoID;
- if (!GetDebugIdForAsyncFunction(isolate, info, &id)) {
- id = isolate->debug()->NextAsyncTaskId(promise);
- DCHECK(status != v8::Promise::kPending);
- }
- info->set_debug_id(id);
-}
-
-void EnqueuePromiseReactionJob(Isolate* isolate, Handle<JSPromise> promise,
- Handle<PromiseReactionJobInfo> info,
- int status) {
- if (isolate->debug()->is_active()) {
- SetDebugInfo(isolate, promise, info, status);
- }
-
- isolate->EnqueueMicrotask(info);
-}
-
-} // namespace
-
RUNTIME_FUNCTION(Runtime_EnqueuePromiseReactionJob) {
HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
- CONVERT_ARG_HANDLE_CHECKED(PromiseReactionJobInfo, info, 1);
- CONVERT_SMI_ARG_CHECKED(status, 2);
- EnqueuePromiseReactionJob(isolate, promise, info, status);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(PromiseReactionJobInfo, info, 0);
+ isolate->EnqueueMicrotask(info);
return isolate->heap()->undefined_value();
}
@@ -198,15 +129,6 @@ RUNTIME_FUNCTION(Runtime_PromiseMarkAsHandled) {
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(Runtime_PromiseMarkHandledHint) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(JSPromise, promise, 0);
-
- promise->set_handled_hint(true);
- return isolate->heap()->undefined_value();
-}
-
RUNTIME_FUNCTION(Runtime_PromiseHookInit) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -228,18 +150,24 @@ RUNTIME_FUNCTION(Runtime_PromiseHookResolve) {
RUNTIME_FUNCTION(Runtime_PromiseHookBefore) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
- isolate->RunPromiseHook(PromiseHookType::kBefore, promise,
- isolate->factory()->undefined_value());
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
+ if (promise->IsJSPromise()) {
+ isolate->RunPromiseHook(PromiseHookType::kBefore,
+ Handle<JSPromise>::cast(promise),
+ isolate->factory()->undefined_value());
+ }
return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(Runtime_PromiseHookAfter) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
- isolate->RunPromiseHook(PromiseHookType::kAfter, promise,
- isolate->factory()->undefined_value());
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
+ if (promise->IsJSPromise()) {
+ isolate->RunPromiseHook(PromiseHookType::kAfter,
+ Handle<JSPromise>::cast(promise),
+ isolate->factory()->undefined_value());
+ }
return isolate->heap()->undefined_value();
}
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
index 9a489ecff8..aec9556510 100644
--- a/deps/v8/src/runtime/runtime-regexp.cc
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -431,6 +431,9 @@ MUST_USE_RESULT static Object* StringReplaceGlobalAtomRegExpWithString(
} else {
result_len = static_cast<int>(result_len_64);
}
+ if (result_len == 0) {
+ return isolate->heap()->empty_string();
+ }
int subject_pos = 0;
int result_pos = 0;
@@ -1081,15 +1084,32 @@ MUST_USE_RESULT MaybeHandle<String> StringReplaceNonGlobalRegExpWithFunction(
Factory* factory = isolate->factory();
Handle<RegExpMatchInfo> last_match_info = isolate->regexp_last_match_info();
- // TODO(jgruber): This is a pattern we could refactor.
+ const int flags = regexp->GetFlags();
+
+ DCHECK(RegExpUtils::IsUnmodifiedRegExp(isolate, regexp));
+ DCHECK_EQ(flags & JSRegExp::kGlobal, 0);
+
+ // TODO(jgruber): This should be an easy port to CSA with massive payback.
+
+ const bool sticky = (flags & JSRegExp::kSticky) != 0;
+ uint32_t last_index = 0;
+ if (sticky) {
+ Handle<Object> last_index_obj(regexp->LastIndex(), isolate);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, last_index_obj,
+ Object::ToLength(isolate, last_index_obj),
+ String);
+ last_index = PositiveNumberToUint32(*last_index_obj);
+
+ if (static_cast<int>(last_index) > subject->length()) last_index = 0;
+ }
+
Handle<Object> match_indices_obj;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, match_indices_obj,
- RegExpImpl::Exec(regexp, subject, 0, last_match_info), String);
+ RegExpImpl::Exec(regexp, subject, last_index, last_match_info), String);
if (match_indices_obj->IsNull(isolate)) {
- RETURN_ON_EXCEPTION(isolate, RegExpUtils::SetLastIndex(isolate, regexp, 0),
- String);
+ if (sticky) regexp->SetLastIndex(0);
return subject;
}
@@ -1099,6 +1119,8 @@ MUST_USE_RESULT MaybeHandle<String> StringReplaceNonGlobalRegExpWithFunction(
const int index = match_indices->Capture(0);
const int end_of_match = match_indices->Capture(1);
+ if (sticky) regexp->SetLastIndex(end_of_match);
+
IncrementalStringBuilder builder(isolate);
builder.AppendString(factory->NewSubString(subject, 0, index));
@@ -1150,10 +1172,9 @@ MUST_USE_RESULT MaybeHandle<String> RegExpReplace(Isolate* isolate,
Handle<Object> replace_obj) {
Factory* factory = isolate->factory();
- // TODO(jgruber): We need the even stricter guarantee of an unmodified
- // JSRegExp map here for access to GetFlags to be legal.
const int flags = regexp->GetFlags();
const bool global = (flags & JSRegExp::kGlobal) != 0;
+ const bool sticky = (flags & JSRegExp::kSticky) != 0;
// Functional fast-paths are dispatched directly by replace builtin.
DCHECK(!replace_obj->IsCallable());
@@ -1168,14 +1189,24 @@ MUST_USE_RESULT MaybeHandle<String> RegExpReplace(Isolate* isolate,
if (!global) {
// Non-global regexp search, string replace.
+ uint32_t last_index = 0;
+ if (sticky) {
+ Handle<Object> last_index_obj(regexp->LastIndex(), isolate);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, last_index_obj,
+ Object::ToLength(isolate, last_index_obj),
+ String);
+ last_index = PositiveNumberToUint32(*last_index_obj);
+
+ if (static_cast<int>(last_index) > string->length()) last_index = 0;
+ }
+
Handle<Object> match_indices_obj;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, match_indices_obj,
- RegExpImpl::Exec(regexp, string, 0, last_match_info), String);
+ RegExpImpl::Exec(regexp, string, last_index, last_match_info), String);
if (match_indices_obj->IsNull(isolate)) {
- RETURN_ON_EXCEPTION(
- isolate, RegExpUtils::SetLastIndex(isolate, regexp, 0), String);
+ if (sticky) regexp->SetLastIndex(0);
return string;
}
@@ -1184,6 +1215,8 @@ MUST_USE_RESULT MaybeHandle<String> RegExpReplace(Isolate* isolate,
const int start_index = match_indices->Capture(0);
const int end_index = match_indices->Capture(1);
+ if (sticky) regexp->SetLastIndex(end_index);
+
IncrementalStringBuilder builder(isolate);
builder.AppendString(factory->NewSubString(string, 0, start_index));
@@ -1265,6 +1298,8 @@ RUNTIME_FUNCTION(Runtime_StringReplaceNonGlobalRegExpWithFunction) {
CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 1);
CONVERT_ARG_HANDLE_CHECKED(JSObject, replace, 2);
+ DCHECK(RegExpUtils::IsUnmodifiedRegExp(isolate, regexp));
+
RETURN_RESULT_OR_FAILURE(isolate, StringReplaceNonGlobalRegExpWithFunction(
isolate, subject, regexp, replace));
}
diff --git a/deps/v8/src/runtime/runtime-scopes.cc b/deps/v8/src/runtime/runtime-scopes.cc
index de121ecfb5..76e7c2b186 100644
--- a/deps/v8/src/runtime/runtime-scopes.cc
+++ b/deps/v8/src/runtime/runtime-scopes.cc
@@ -46,7 +46,7 @@ Object* DeclareGlobal(
Handle<Object> value, PropertyAttributes attr, bool is_var,
bool is_function_declaration, RedeclarationType redeclaration_type,
Handle<FeedbackVector> feedback_vector = Handle<FeedbackVector>(),
- FeedbackVectorSlot slot = FeedbackVectorSlot::Invalid()) {
+ FeedbackSlot slot = FeedbackSlot::Invalid()) {
Handle<ScriptContextTable> script_contexts(
global->native_context()->script_context_table());
ScriptContextTable::LookupResult lookup;
@@ -86,8 +86,7 @@ Object* DeclareGlobal(
// Check whether we can reconfigure the existing property into a
// function.
- PropertyDetails old_details = it.property_details();
- if (old_details.IsReadOnly() || old_details.IsDontEnum() ||
+ if (old_attributes & READ_ONLY || old_attributes & DONT_ENUM ||
(it.state() == LookupIterator::ACCESSOR)) {
// ECMA-262 section 15.1.11 GlobalDeclarationInstantiation 5.d:
// If hasRestrictedGlobal is true, throw a SyntaxError exception.
@@ -116,7 +115,8 @@ Object* DeclareGlobal(
RETURN_FAILURE_ON_EXCEPTION(
isolate, JSObject::DefineOwnPropertyIgnoreAttributes(&it, value, attr));
- if (!feedback_vector.is_null()) {
+ if (!feedback_vector.is_null() &&
+ it.state() != LookupIterator::State::INTERCEPTOR) {
DCHECK_EQ(*global, *it.GetHolder<Object>());
// Preinitialize the feedback slot if the global object does not have
// named interceptor or the interceptor is not masking.
@@ -137,10 +137,11 @@ Object* DeclareGlobals(Isolate* isolate, Handle<FixedArray> declarations,
// Traverse the name/value pairs and set the properties.
int length = declarations->length();
- FOR_WITH_HANDLE_SCOPE(isolate, int, i = 0, i, i < length, i += 3, {
+ FOR_WITH_HANDLE_SCOPE(isolate, int, i = 0, i, i < length, i += 4, {
Handle<String> name(String::cast(declarations->get(i)), isolate);
- FeedbackVectorSlot slot(Smi::cast(declarations->get(i + 1))->value());
- Handle<Object> initial_value(declarations->get(i + 2), isolate);
+ FeedbackSlot slot(Smi::cast(declarations->get(i + 1))->value());
+ Handle<Object> possibly_literal_slot(declarations->get(i + 2), isolate);
+ Handle<Object> initial_value(declarations->get(i + 3), isolate);
bool is_var = initial_value->IsUndefined(isolate);
bool is_function = initial_value->IsSharedFunctionInfo();
@@ -148,12 +149,16 @@ Object* DeclareGlobals(Isolate* isolate, Handle<FixedArray> declarations,
Handle<Object> value;
if (is_function) {
+ DCHECK(possibly_literal_slot->IsSmi());
// Copy the function and update its context. Use it as value.
Handle<SharedFunctionInfo> shared =
Handle<SharedFunctionInfo>::cast(initial_value);
+ FeedbackSlot literals_slot(Smi::cast(*possibly_literal_slot)->value());
+ Handle<Cell> literals(Cell::cast(feedback_vector->Get(literals_slot)),
+ isolate);
Handle<JSFunction> function =
- isolate->factory()->NewFunctionFromSharedFunctionInfo(shared, context,
- TENURED);
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(
+ shared, context, literals, TENURED);
value = function;
} else {
value = isolate->factory()->undefined_value();
@@ -584,12 +589,29 @@ RUNTIME_FUNCTION(Runtime_NewRestParameter) {
RUNTIME_FUNCTION(Runtime_NewSloppyArguments) {
HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0);
- Object** parameters = reinterpret_cast<Object**>(args[1]);
- CONVERT_SMI_ARG_CHECKED(argument_count, 2);
+ StackFrameIterator iterator(isolate);
+
+ // Stub/interpreter handler frame
+ iterator.Advance();
+ DCHECK(iterator.frame()->type() == StackFrame::STUB);
+
+ // Function frame
+ iterator.Advance();
+ JavaScriptFrame* function_frame = JavaScriptFrame::cast(iterator.frame());
+ DCHECK(function_frame->is_java_script());
+ int argc = function_frame->GetArgumentsLength();
+ Address fp = function_frame->fp();
+ if (function_frame->has_adapted_arguments()) {
+ iterator.Advance();
+ fp = iterator.frame()->fp();
+ }
+
+ Object** parameters = reinterpret_cast<Object**>(
+ fp + argc * kPointerSize + StandardFrameConstants::kCallerSPOffset);
ParameterArguments argument_getter(parameters);
- return *NewSloppyArguments(isolate, callee, argument_getter, argument_count);
+ return *NewSloppyArguments(isolate, callee, argument_getter, argc);
}
RUNTIME_FUNCTION(Runtime_NewArgumentsElements) {
@@ -612,10 +634,14 @@ RUNTIME_FUNCTION(Runtime_NewClosure) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared, 0);
+ CONVERT_ARG_HANDLE_CHECKED(FeedbackVector, vector, 1);
+ CONVERT_SMI_ARG_CHECKED(index, 2);
Handle<Context> context(isolate->context(), isolate);
+ FeedbackSlot slot = FeedbackVector::ToSlot(index);
+ Handle<Cell> vector_cell(Cell::cast(vector->Get(slot)), isolate);
Handle<JSFunction> function =
- isolate->factory()->NewFunctionFromSharedFunctionInfo(shared, context,
- NOT_TENURED);
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(
+ shared, context, vector_cell, NOT_TENURED);
return *function;
}
@@ -624,12 +650,16 @@ RUNTIME_FUNCTION(Runtime_NewClosure_Tenured) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared, 0);
+ CONVERT_ARG_HANDLE_CHECKED(FeedbackVector, vector, 1);
+ CONVERT_SMI_ARG_CHECKED(index, 2);
Handle<Context> context(isolate->context(), isolate);
+ FeedbackSlot slot = FeedbackVector::ToSlot(index);
+ Handle<Cell> vector_cell(Cell::cast(vector->Get(slot)), isolate);
// The caller ensures that we pretenure closures that are assigned
// directly to properties.
Handle<JSFunction> function =
- isolate->factory()->NewFunctionFromSharedFunctionInfo(shared, context,
- TENURED);
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(
+ shared, context, vector_cell, TENURED);
return *function;
}
diff --git a/deps/v8/src/runtime/runtime-simd.cc b/deps/v8/src/runtime/runtime-simd.cc
deleted file mode 100644
index 067e9d680d..0000000000
--- a/deps/v8/src/runtime/runtime-simd.cc
+++ /dev/null
@@ -1,1016 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/runtime/runtime-utils.h"
-
-#include "src/arguments.h"
-#include "src/base/macros.h"
-#include "src/conversions.h"
-#include "src/factory.h"
-#include "src/objects-inl.h"
-
-// Implement Single Instruction Multiple Data (SIMD) operations as defined in
-// the SIMD.js draft spec:
-// http://littledan.github.io/simd.html
-
-namespace v8 {
-namespace internal {
-
-namespace {
-
-// Functions to convert Numbers to SIMD component types.
-
-template <typename T, typename F>
-static bool CanCast(F from) {
- // A float can't represent 2^31 - 1 or 2^32 - 1 exactly, so promote the limits
- // to double. Otherwise, the limit is truncated and numbers like 2^31 or 2^32
- // get through, causing any static_cast to be undefined.
- from = trunc(from);
- return from >= static_cast<double>(std::numeric_limits<T>::min()) &&
- from <= static_cast<double>(std::numeric_limits<T>::max());
-}
-
-
-// Explicitly specialize for conversions to float, which always succeed.
-template <>
-bool CanCast<float>(int32_t from) {
- return true;
-}
-
-
-template <>
-bool CanCast<float>(uint32_t from) {
- return true;
-}
-
-
-template <typename T>
-static T ConvertNumber(double number);
-
-
-template <>
-float ConvertNumber<float>(double number) {
- return DoubleToFloat32(number);
-}
-
-
-template <>
-int32_t ConvertNumber<int32_t>(double number) {
- return DoubleToInt32(number);
-}
-
-
-template <>
-uint32_t ConvertNumber<uint32_t>(double number) {
- return DoubleToUint32(number);
-}
-
-
-template <>
-int16_t ConvertNumber<int16_t>(double number) {
- return static_cast<int16_t>(DoubleToInt32(number));
-}
-
-
-template <>
-uint16_t ConvertNumber<uint16_t>(double number) {
- return static_cast<uint16_t>(DoubleToUint32(number));
-}
-
-
-template <>
-int8_t ConvertNumber<int8_t>(double number) {
- return static_cast<int8_t>(DoubleToInt32(number));
-}
-
-
-template <>
-uint8_t ConvertNumber<uint8_t>(double number) {
- return static_cast<uint8_t>(DoubleToUint32(number));
-}
-
-
-// TODO(bbudge): Make this consistent with SIMD instruction results.
-inline float RecipApprox(float a) { return 1.0f / a; }
-
-
-// TODO(bbudge): Make this consistent with SIMD instruction results.
-inline float RecipSqrtApprox(float a) { return 1.0f / std::sqrt(a); }
-
-
-// Saturating addition for int16_t and int8_t.
-template <typename T>
-inline T AddSaturate(T a, T b) {
- const T max = std::numeric_limits<T>::max();
- const T min = std::numeric_limits<T>::min();
- int32_t result = a + b;
- if (result > max) return max;
- if (result < min) return min;
- return result;
-}
-
-
-// Saturating subtraction for int16_t and int8_t.
-template <typename T>
-inline T SubSaturate(T a, T b) {
- const T max = std::numeric_limits<T>::max();
- const T min = std::numeric_limits<T>::min();
- int32_t result = a - b;
- if (result > max) return max;
- if (result < min) return min;
- return result;
-}
-
-
-inline float Min(float a, float b) {
- if (a < b) return a;
- if (a > b) return b;
- if (a == b) return std::signbit(a) ? a : b;
- return std::numeric_limits<float>::quiet_NaN();
-}
-
-
-inline float Max(float a, float b) {
- if (a > b) return a;
- if (a < b) return b;
- if (a == b) return std::signbit(b) ? a : b;
- return std::numeric_limits<float>::quiet_NaN();
-}
-
-
-inline float MinNumber(float a, float b) {
- if (std::isnan(a)) return b;
- if (std::isnan(b)) return a;
- return Min(a, b);
-}
-
-
-inline float MaxNumber(float a, float b) {
- if (std::isnan(a)) return b;
- if (std::isnan(b)) return a;
- return Max(a, b);
-}
-
-} // namespace
-
-//-------------------------------------------------------------------
-
-// SIMD helper functions.
-
-RUNTIME_FUNCTION(Runtime_IsSimdValue) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- return isolate->heap()->ToBoolean(args[0]->IsSimd128Value());
-}
-
-
-//-------------------------------------------------------------------
-
-// Utility macros.
-
-// TODO(gdeepti): Fix to use ToNumber conversion once polyfill is updated.
-#define CONVERT_SIMD_LANE_ARG_CHECKED(name, index, lanes) \
- Handle<Object> name_object = args.at(index); \
- if (!name_object->IsNumber()) { \
- THROW_NEW_ERROR_RETURN_FAILURE( \
- isolate, NewTypeError(MessageTemplate::kInvalidSimdIndex)); \
- } \
- double number = name_object->Number(); \
- if (number < 0 || number >= lanes || !IsInt32Double(number)) { \
- THROW_NEW_ERROR_RETURN_FAILURE( \
- isolate, NewRangeError(MessageTemplate::kInvalidSimdIndex)); \
- } \
- uint32_t name = static_cast<uint32_t>(number);
-
-#define CONVERT_SIMD_ARG_HANDLE_THROW(Type, name, index) \
- Handle<Type> name; \
- if (args[index]->Is##Type()) { \
- name = args.at<Type>(index); \
- } else { \
- THROW_NEW_ERROR_RETURN_FAILURE( \
- isolate, NewTypeError(MessageTemplate::kInvalidSimdOperation)); \
- }
-
-#define SIMD_UNARY_OP(type, lane_type, lane_count, op, result) \
- static const int kLaneCount = lane_count; \
- DCHECK_EQ(1, args.length()); \
- CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
- lane_type lanes[kLaneCount]; \
- for (int i = 0; i < kLaneCount; i++) { \
- lanes[i] = op(a->get_lane(i)); \
- } \
- Handle<type> result = isolate->factory()->New##type(lanes);
-
-#define SIMD_BINARY_OP(type, lane_type, lane_count, op, result) \
- static const int kLaneCount = lane_count; \
- DCHECK_EQ(2, args.length()); \
- CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
- CONVERT_SIMD_ARG_HANDLE_THROW(type, b, 1); \
- lane_type lanes[kLaneCount]; \
- for (int i = 0; i < kLaneCount; i++) { \
- lanes[i] = op(a->get_lane(i), b->get_lane(i)); \
- } \
- Handle<type> result = isolate->factory()->New##type(lanes);
-
-#define SIMD_RELATIONAL_OP(type, bool_type, lane_count, a, b, op, result) \
- static const int kLaneCount = lane_count; \
- DCHECK_EQ(2, args.length()); \
- CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
- CONVERT_SIMD_ARG_HANDLE_THROW(type, b, 1); \
- bool lanes[kLaneCount]; \
- for (int i = 0; i < kLaneCount; i++) { \
- lanes[i] = a->get_lane(i) op b->get_lane(i); \
- } \
- Handle<bool_type> result = isolate->factory()->New##bool_type(lanes);
-
-//-------------------------------------------------------------------
-
-// Common functions.
-
-#define GET_NUMERIC_ARG(lane_type, name, index) \
- Handle<Object> a; \
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, a, \
- Object::ToNumber(args.at(index))); \
- name = ConvertNumber<lane_type>(a->Number());
-
-#define GET_BOOLEAN_ARG(lane_type, name, index) \
- name = args[index]->BooleanValue();
-
-#define SIMD_ALL_TYPES(FUNCTION) \
- FUNCTION(Float32x4, float, 4, NewNumber, GET_NUMERIC_ARG) \
- FUNCTION(Int32x4, int32_t, 4, NewNumber, GET_NUMERIC_ARG) \
- FUNCTION(Uint32x4, uint32_t, 4, NewNumber, GET_NUMERIC_ARG) \
- FUNCTION(Bool32x4, bool, 4, ToBoolean, GET_BOOLEAN_ARG) \
- FUNCTION(Int16x8, int16_t, 8, NewNumber, GET_NUMERIC_ARG) \
- FUNCTION(Uint16x8, uint16_t, 8, NewNumber, GET_NUMERIC_ARG) \
- FUNCTION(Bool16x8, bool, 8, ToBoolean, GET_BOOLEAN_ARG) \
- FUNCTION(Int8x16, int8_t, 16, NewNumber, GET_NUMERIC_ARG) \
- FUNCTION(Uint8x16, uint8_t, 16, NewNumber, GET_NUMERIC_ARG) \
- FUNCTION(Bool8x16, bool, 16, ToBoolean, GET_BOOLEAN_ARG)
-
-#define SIMD_CREATE_FUNCTION(type, lane_type, lane_count, extract, replace) \
- RUNTIME_FUNCTION(Runtime_Create##type) { \
- static const int kLaneCount = lane_count; \
- HandleScope scope(isolate); \
- DCHECK(args.length() == kLaneCount); \
- lane_type lanes[kLaneCount]; \
- for (int i = 0; i < kLaneCount; i++) { \
- replace(lane_type, lanes[i], i) \
- } \
- return *isolate->factory()->New##type(lanes); \
- }
-
-#define SIMD_EXTRACT_FUNCTION(type, lane_type, lane_count, extract, replace) \
- RUNTIME_FUNCTION(Runtime_##type##ExtractLane) { \
- HandleScope scope(isolate); \
- DCHECK_EQ(2, args.length()); \
- CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
- CONVERT_SIMD_LANE_ARG_CHECKED(lane, 1, lane_count); \
- return *isolate->factory()->extract(a->get_lane(lane)); \
- }
-
-#define SIMD_REPLACE_FUNCTION(type, lane_type, lane_count, extract, replace) \
- RUNTIME_FUNCTION(Runtime_##type##ReplaceLane) { \
- static const int kLaneCount = lane_count; \
- HandleScope scope(isolate); \
- DCHECK_EQ(3, args.length()); \
- CONVERT_SIMD_ARG_HANDLE_THROW(type, simd, 0); \
- CONVERT_SIMD_LANE_ARG_CHECKED(lane, 1, kLaneCount); \
- lane_type lanes[kLaneCount]; \
- for (int i = 0; i < kLaneCount; i++) { \
- lanes[i] = simd->get_lane(i); \
- } \
- replace(lane_type, lanes[lane], 2); \
- Handle<type> result = isolate->factory()->New##type(lanes); \
- return *result; \
- }
-
-#define SIMD_CHECK_FUNCTION(type, lane_type, lane_count, extract, replace) \
- RUNTIME_FUNCTION(Runtime_##type##Check) { \
- HandleScope scope(isolate); \
- CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
- return *a; \
- }
-
-#define SIMD_SWIZZLE_FUNCTION(type, lane_type, lane_count, extract, replace) \
- RUNTIME_FUNCTION(Runtime_##type##Swizzle) { \
- static const int kLaneCount = lane_count; \
- HandleScope scope(isolate); \
- DCHECK(args.length() == 1 + kLaneCount); \
- CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
- lane_type lanes[kLaneCount]; \
- for (int i = 0; i < kLaneCount; i++) { \
- CONVERT_SIMD_LANE_ARG_CHECKED(index, i + 1, kLaneCount); \
- lanes[i] = a->get_lane(index); \
- } \
- Handle<type> result = isolate->factory()->New##type(lanes); \
- return *result; \
- }
-
-#define SIMD_SHUFFLE_FUNCTION(type, lane_type, lane_count, extract, replace) \
- RUNTIME_FUNCTION(Runtime_##type##Shuffle) { \
- static const int kLaneCount = lane_count; \
- HandleScope scope(isolate); \
- DCHECK(args.length() == 2 + kLaneCount); \
- CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
- CONVERT_SIMD_ARG_HANDLE_THROW(type, b, 1); \
- lane_type lanes[kLaneCount]; \
- for (int i = 0; i < kLaneCount; i++) { \
- CONVERT_SIMD_LANE_ARG_CHECKED(index, i + 2, kLaneCount * 2); \
- lanes[i] = index < kLaneCount ? a->get_lane(index) \
- : b->get_lane(index - kLaneCount); \
- } \
- Handle<type> result = isolate->factory()->New##type(lanes); \
- return *result; \
- }
-
-SIMD_ALL_TYPES(SIMD_CREATE_FUNCTION)
-SIMD_ALL_TYPES(SIMD_EXTRACT_FUNCTION)
-SIMD_ALL_TYPES(SIMD_REPLACE_FUNCTION)
-SIMD_ALL_TYPES(SIMD_CHECK_FUNCTION)
-SIMD_ALL_TYPES(SIMD_SWIZZLE_FUNCTION)
-SIMD_ALL_TYPES(SIMD_SHUFFLE_FUNCTION)
-
-//-------------------------------------------------------------------
-
-// Float-only functions.
-
-#define SIMD_ABS_FUNCTION(type, lane_type, lane_count) \
- RUNTIME_FUNCTION(Runtime_##type##Abs) { \
- HandleScope scope(isolate); \
- SIMD_UNARY_OP(type, lane_type, lane_count, std::abs, result); \
- return *result; \
- }
-
-#define SIMD_SQRT_FUNCTION(type, lane_type, lane_count) \
- RUNTIME_FUNCTION(Runtime_##type##Sqrt) { \
- HandleScope scope(isolate); \
- SIMD_UNARY_OP(type, lane_type, lane_count, std::sqrt, result); \
- return *result; \
- }
-
-#define SIMD_RECIP_APPROX_FUNCTION(type, lane_type, lane_count) \
- RUNTIME_FUNCTION(Runtime_##type##RecipApprox) { \
- HandleScope scope(isolate); \
- SIMD_UNARY_OP(type, lane_type, lane_count, RecipApprox, result); \
- return *result; \
- }
-
-#define SIMD_RECIP_SQRT_APPROX_FUNCTION(type, lane_type, lane_count) \
- RUNTIME_FUNCTION(Runtime_##type##RecipSqrtApprox) { \
- HandleScope scope(isolate); \
- SIMD_UNARY_OP(type, lane_type, lane_count, RecipSqrtApprox, result); \
- return *result; \
- }
-
-#define BINARY_DIV(a, b) (a) / (b)
-#define SIMD_DIV_FUNCTION(type, lane_type, lane_count) \
- RUNTIME_FUNCTION(Runtime_##type##Div) { \
- HandleScope scope(isolate); \
- SIMD_BINARY_OP(type, lane_type, lane_count, BINARY_DIV, result); \
- return *result; \
- }
-
-#define SIMD_MINNUM_FUNCTION(type, lane_type, lane_count) \
- RUNTIME_FUNCTION(Runtime_##type##MinNum) { \
- HandleScope scope(isolate); \
- SIMD_BINARY_OP(type, lane_type, lane_count, MinNumber, result); \
- return *result; \
- }
-
-#define SIMD_MAXNUM_FUNCTION(type, lane_type, lane_count) \
- RUNTIME_FUNCTION(Runtime_##type##MaxNum) { \
- HandleScope scope(isolate); \
- SIMD_BINARY_OP(type, lane_type, lane_count, MaxNumber, result); \
- return *result; \
- }
-
-SIMD_ABS_FUNCTION(Float32x4, float, 4)
-SIMD_SQRT_FUNCTION(Float32x4, float, 4)
-SIMD_RECIP_APPROX_FUNCTION(Float32x4, float, 4)
-SIMD_RECIP_SQRT_APPROX_FUNCTION(Float32x4, float, 4)
-SIMD_DIV_FUNCTION(Float32x4, float, 4)
-SIMD_MINNUM_FUNCTION(Float32x4, float, 4)
-SIMD_MAXNUM_FUNCTION(Float32x4, float, 4)
-
-//-------------------------------------------------------------------
-
-// Int-only functions.
-
-#define SIMD_INT_TYPES(FUNCTION) \
- FUNCTION(Int32x4, int32_t, 32, 4) \
- FUNCTION(Int16x8, int16_t, 16, 8) \
- FUNCTION(Int8x16, int8_t, 8, 16)
-
-#define SIMD_UINT_TYPES(FUNCTION) \
- FUNCTION(Uint32x4, uint32_t, 32, 4) \
- FUNCTION(Uint16x8, uint16_t, 16, 8) \
- FUNCTION(Uint8x16, uint8_t, 8, 16)
-
-#define CONVERT_SHIFT_ARG_CHECKED(name, index) \
- Handle<Object> name_object = args.at(index); \
- if (!name_object->IsNumber()) { \
- THROW_NEW_ERROR_RETURN_FAILURE( \
- isolate, NewTypeError(MessageTemplate::kInvalidSimdOperation)); \
- } \
- int32_t signed_shift = 0; \
- args[index]->ToInt32(&signed_shift); \
- uint32_t name = bit_cast<uint32_t>(signed_shift);
-
-#define SIMD_LSL_FUNCTION(type, lane_type, lane_bits, lane_count) \
- RUNTIME_FUNCTION(Runtime_##type##ShiftLeftByScalar) { \
- static const int kLaneCount = lane_count; \
- HandleScope scope(isolate); \
- DCHECK_EQ(2, args.length()); \
- CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
- CONVERT_SHIFT_ARG_CHECKED(shift, 1); \
- lane_type lanes[kLaneCount] = {0}; \
- shift &= lane_bits - 1; \
- for (int i = 0; i < kLaneCount; i++) { \
- lanes[i] = a->get_lane(i) << shift; \
- } \
- Handle<type> result = isolate->factory()->New##type(lanes); \
- return *result; \
- }
-
-#define SIMD_LSR_FUNCTION(type, lane_type, lane_bits, lane_count) \
- RUNTIME_FUNCTION(Runtime_##type##ShiftRightByScalar) { \
- static const int kLaneCount = lane_count; \
- HandleScope scope(isolate); \
- DCHECK_EQ(2, args.length()); \
- CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
- CONVERT_SHIFT_ARG_CHECKED(shift, 1); \
- lane_type lanes[kLaneCount] = {0}; \
- shift &= lane_bits - 1; \
- for (int i = 0; i < kLaneCount; i++) { \
- lanes[i] = static_cast<lane_type>(bit_cast<lane_type>(a->get_lane(i)) >> \
- shift); \
- } \
- Handle<type> result = isolate->factory()->New##type(lanes); \
- return *result; \
- }
-
-#define SIMD_ASR_FUNCTION(type, lane_type, lane_bits, lane_count) \
- RUNTIME_FUNCTION(Runtime_##type##ShiftRightByScalar) { \
- static const int kLaneCount = lane_count; \
- HandleScope scope(isolate); \
- DCHECK_EQ(2, args.length()); \
- CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
- CONVERT_SHIFT_ARG_CHECKED(shift, 1); \
- shift &= lane_bits - 1; \
- lane_type lanes[kLaneCount]; \
- for (int i = 0; i < kLaneCount; i++) { \
- int64_t shifted = static_cast<int64_t>(a->get_lane(i)) >> shift; \
- lanes[i] = static_cast<lane_type>(shifted); \
- } \
- Handle<type> result = isolate->factory()->New##type(lanes); \
- return *result; \
- }
-
-SIMD_INT_TYPES(SIMD_LSL_FUNCTION)
-SIMD_UINT_TYPES(SIMD_LSL_FUNCTION)
-SIMD_INT_TYPES(SIMD_ASR_FUNCTION)
-SIMD_UINT_TYPES(SIMD_LSR_FUNCTION)
-
-//-------------------------------------------------------------------
-
-// Bool-only functions.
-
-#define SIMD_BOOL_TYPES(FUNCTION) \
- FUNCTION(Bool32x4, 4) \
- FUNCTION(Bool16x8, 8) \
- FUNCTION(Bool8x16, 16)
-
-#define SIMD_ANY_FUNCTION(type, lane_count) \
- RUNTIME_FUNCTION(Runtime_##type##AnyTrue) { \
- HandleScope scope(isolate); \
- DCHECK_EQ(1, args.length()); \
- CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
- bool result = false; \
- for (int i = 0; i < lane_count; i++) { \
- if (a->get_lane(i)) { \
- result = true; \
- break; \
- } \
- } \
- return isolate->heap()->ToBoolean(result); \
- }
-
-#define SIMD_ALL_FUNCTION(type, lane_count) \
- RUNTIME_FUNCTION(Runtime_##type##AllTrue) { \
- HandleScope scope(isolate); \
- DCHECK_EQ(1, args.length()); \
- CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
- bool result = true; \
- for (int i = 0; i < lane_count; i++) { \
- if (!a->get_lane(i)) { \
- result = false; \
- break; \
- } \
- } \
- return isolate->heap()->ToBoolean(result); \
- }
-
-SIMD_BOOL_TYPES(SIMD_ANY_FUNCTION)
-SIMD_BOOL_TYPES(SIMD_ALL_FUNCTION)
-
-//-------------------------------------------------------------------
-
-// Small Int-only functions.
-
-#define SIMD_SMALL_INT_TYPES(FUNCTION) \
- FUNCTION(Int16x8, int16_t, 8) \
- FUNCTION(Uint16x8, uint16_t, 8) \
- FUNCTION(Int8x16, int8_t, 16) \
- FUNCTION(Uint8x16, uint8_t, 16)
-
-#define SIMD_ADD_SATURATE_FUNCTION(type, lane_type, lane_count) \
- RUNTIME_FUNCTION(Runtime_##type##AddSaturate) { \
- HandleScope scope(isolate); \
- SIMD_BINARY_OP(type, lane_type, lane_count, AddSaturate, result); \
- return *result; \
- }
-
-#define BINARY_SUB(a, b) (a) - (b)
-#define SIMD_SUB_SATURATE_FUNCTION(type, lane_type, lane_count) \
- RUNTIME_FUNCTION(Runtime_##type##SubSaturate) { \
- HandleScope scope(isolate); \
- SIMD_BINARY_OP(type, lane_type, lane_count, SubSaturate, result); \
- return *result; \
- }
-
-SIMD_SMALL_INT_TYPES(SIMD_ADD_SATURATE_FUNCTION)
-SIMD_SMALL_INT_TYPES(SIMD_SUB_SATURATE_FUNCTION)
-
-//-------------------------------------------------------------------
-
-// Numeric functions.
-
-#define SIMD_NUMERIC_TYPES(FUNCTION) \
- FUNCTION(Float32x4, float, 4) \
- FUNCTION(Int32x4, int32_t, 4) \
- FUNCTION(Uint32x4, uint32_t, 4) \
- FUNCTION(Int16x8, int16_t, 8) \
- FUNCTION(Uint16x8, uint16_t, 8) \
- FUNCTION(Int8x16, int8_t, 16) \
- FUNCTION(Uint8x16, uint8_t, 16)
-
-#define BINARY_ADD(a, b) (a) + (b)
-#define SIMD_ADD_FUNCTION(type, lane_type, lane_count) \
- RUNTIME_FUNCTION(Runtime_##type##Add) { \
- HandleScope scope(isolate); \
- SIMD_BINARY_OP(type, lane_type, lane_count, BINARY_ADD, result); \
- return *result; \
- }
-
-#define BINARY_SUB(a, b) (a) - (b)
-#define SIMD_SUB_FUNCTION(type, lane_type, lane_count) \
- RUNTIME_FUNCTION(Runtime_##type##Sub) { \
- HandleScope scope(isolate); \
- SIMD_BINARY_OP(type, lane_type, lane_count, BINARY_SUB, result); \
- return *result; \
- }
-
-#define BINARY_MUL(a, b) (a) * (b)
-#define SIMD_MUL_FUNCTION(type, lane_type, lane_count) \
- RUNTIME_FUNCTION(Runtime_##type##Mul) { \
- HandleScope scope(isolate); \
- SIMD_BINARY_OP(type, lane_type, lane_count, BINARY_MUL, result); \
- return *result; \
- }
-
-#define SIMD_MIN_FUNCTION(type, lane_type, lane_count) \
- RUNTIME_FUNCTION(Runtime_##type##Min) { \
- HandleScope scope(isolate); \
- SIMD_BINARY_OP(type, lane_type, lane_count, Min, result); \
- return *result; \
- }
-
-#define SIMD_MAX_FUNCTION(type, lane_type, lane_count) \
- RUNTIME_FUNCTION(Runtime_##type##Max) { \
- HandleScope scope(isolate); \
- SIMD_BINARY_OP(type, lane_type, lane_count, Max, result); \
- return *result; \
- }
-
-SIMD_NUMERIC_TYPES(SIMD_ADD_FUNCTION)
-SIMD_NUMERIC_TYPES(SIMD_SUB_FUNCTION)
-SIMD_NUMERIC_TYPES(SIMD_MUL_FUNCTION)
-SIMD_NUMERIC_TYPES(SIMD_MIN_FUNCTION)
-SIMD_NUMERIC_TYPES(SIMD_MAX_FUNCTION)
-
-//-------------------------------------------------------------------
-
-// Relational functions.
-
-#define SIMD_RELATIONAL_TYPES(FUNCTION) \
- FUNCTION(Float32x4, Bool32x4, 4) \
- FUNCTION(Int32x4, Bool32x4, 4) \
- FUNCTION(Uint32x4, Bool32x4, 4) \
- FUNCTION(Int16x8, Bool16x8, 8) \
- FUNCTION(Uint16x8, Bool16x8, 8) \
- FUNCTION(Int8x16, Bool8x16, 16) \
- FUNCTION(Uint8x16, Bool8x16, 16)
-
-#define SIMD_EQUALITY_TYPES(FUNCTION) \
- SIMD_RELATIONAL_TYPES(FUNCTION) \
- FUNCTION(Bool32x4, Bool32x4, 4) \
- FUNCTION(Bool16x8, Bool16x8, 8) \
- FUNCTION(Bool8x16, Bool8x16, 16)
-
-#define SIMD_EQUAL_FUNCTION(type, bool_type, lane_count) \
- RUNTIME_FUNCTION(Runtime_##type##Equal) { \
- HandleScope scope(isolate); \
- SIMD_RELATIONAL_OP(type, bool_type, lane_count, a, b, ==, result); \
- return *result; \
- }
-
-#define SIMD_NOT_EQUAL_FUNCTION(type, bool_type, lane_count) \
- RUNTIME_FUNCTION(Runtime_##type##NotEqual) { \
- HandleScope scope(isolate); \
- SIMD_RELATIONAL_OP(type, bool_type, lane_count, a, b, !=, result); \
- return *result; \
- }
-
-SIMD_EQUALITY_TYPES(SIMD_EQUAL_FUNCTION)
-SIMD_EQUALITY_TYPES(SIMD_NOT_EQUAL_FUNCTION)
-
-#define SIMD_LESS_THAN_FUNCTION(type, bool_type, lane_count) \
- RUNTIME_FUNCTION(Runtime_##type##LessThan) { \
- HandleScope scope(isolate); \
- SIMD_RELATIONAL_OP(type, bool_type, lane_count, a, b, <, result); \
- return *result; \
- }
-
-#define SIMD_LESS_THAN_OR_EQUAL_FUNCTION(type, bool_type, lane_count) \
- RUNTIME_FUNCTION(Runtime_##type##LessThanOrEqual) { \
- HandleScope scope(isolate); \
- SIMD_RELATIONAL_OP(type, bool_type, lane_count, a, b, <=, result); \
- return *result; \
- }
-
-#define SIMD_GREATER_THAN_FUNCTION(type, bool_type, lane_count) \
- RUNTIME_FUNCTION(Runtime_##type##GreaterThan) { \
- HandleScope scope(isolate); \
- SIMD_RELATIONAL_OP(type, bool_type, lane_count, a, b, >, result); \
- return *result; \
- }
-
-#define SIMD_GREATER_THAN_OR_EQUAL_FUNCTION(type, bool_type, lane_count) \
- RUNTIME_FUNCTION(Runtime_##type##GreaterThanOrEqual) { \
- HandleScope scope(isolate); \
- SIMD_RELATIONAL_OP(type, bool_type, lane_count, a, b, >=, result); \
- return *result; \
- }
-
-SIMD_RELATIONAL_TYPES(SIMD_LESS_THAN_FUNCTION)
-SIMD_RELATIONAL_TYPES(SIMD_LESS_THAN_OR_EQUAL_FUNCTION)
-SIMD_RELATIONAL_TYPES(SIMD_GREATER_THAN_FUNCTION)
-SIMD_RELATIONAL_TYPES(SIMD_GREATER_THAN_OR_EQUAL_FUNCTION)
-
-//-------------------------------------------------------------------
-
-// Logical functions.
-
-#define SIMD_LOGICAL_TYPES(FUNCTION) \
- FUNCTION(Int32x4, int32_t, 4, _INT) \
- FUNCTION(Uint32x4, uint32_t, 4, _INT) \
- FUNCTION(Int16x8, int16_t, 8, _INT) \
- FUNCTION(Uint16x8, uint16_t, 8, _INT) \
- FUNCTION(Int8x16, int8_t, 16, _INT) \
- FUNCTION(Uint8x16, uint8_t, 16, _INT) \
- FUNCTION(Bool32x4, bool, 4, _BOOL) \
- FUNCTION(Bool16x8, bool, 8, _BOOL) \
- FUNCTION(Bool8x16, bool, 16, _BOOL)
-
-#define BINARY_AND_INT(a, b) (a) & (b)
-#define BINARY_AND_BOOL(a, b) (a) && (b)
-#define SIMD_AND_FUNCTION(type, lane_type, lane_count, op) \
- RUNTIME_FUNCTION(Runtime_##type##And) { \
- HandleScope scope(isolate); \
- SIMD_BINARY_OP(type, lane_type, lane_count, BINARY_AND##op, result); \
- return *result; \
- }
-
-#define BINARY_OR_INT(a, b) (a) | (b)
-#define BINARY_OR_BOOL(a, b) (a) || (b)
-#define SIMD_OR_FUNCTION(type, lane_type, lane_count, op) \
- RUNTIME_FUNCTION(Runtime_##type##Or) { \
- HandleScope scope(isolate); \
- SIMD_BINARY_OP(type, lane_type, lane_count, BINARY_OR##op, result); \
- return *result; \
- }
-
-#define BINARY_XOR_INT(a, b) (a) ^ (b)
-#define BINARY_XOR_BOOL(a, b) (a) != (b)
-#define SIMD_XOR_FUNCTION(type, lane_type, lane_count, op) \
- RUNTIME_FUNCTION(Runtime_##type##Xor) { \
- HandleScope scope(isolate); \
- SIMD_BINARY_OP(type, lane_type, lane_count, BINARY_XOR##op, result); \
- return *result; \
- }
-
-#define UNARY_NOT_INT ~
-#define UNARY_NOT_BOOL !
-#define SIMD_NOT_FUNCTION(type, lane_type, lane_count, op) \
- RUNTIME_FUNCTION(Runtime_##type##Not) { \
- HandleScope scope(isolate); \
- SIMD_UNARY_OP(type, lane_type, lane_count, UNARY_NOT##op, result); \
- return *result; \
- }
-
-SIMD_LOGICAL_TYPES(SIMD_AND_FUNCTION)
-SIMD_LOGICAL_TYPES(SIMD_OR_FUNCTION)
-SIMD_LOGICAL_TYPES(SIMD_XOR_FUNCTION)
-SIMD_LOGICAL_TYPES(SIMD_NOT_FUNCTION)
-
-//-------------------------------------------------------------------
-
-// Select functions.
-
-#define SIMD_SELECT_TYPES(FUNCTION) \
- FUNCTION(Float32x4, float, Bool32x4, 4) \
- FUNCTION(Int32x4, int32_t, Bool32x4, 4) \
- FUNCTION(Uint32x4, uint32_t, Bool32x4, 4) \
- FUNCTION(Int16x8, int16_t, Bool16x8, 8) \
- FUNCTION(Uint16x8, uint16_t, Bool16x8, 8) \
- FUNCTION(Int8x16, int8_t, Bool8x16, 16) \
- FUNCTION(Uint8x16, uint8_t, Bool8x16, 16)
-
-#define SIMD_SELECT_FUNCTION(type, lane_type, bool_type, lane_count) \
- RUNTIME_FUNCTION(Runtime_##type##Select) { \
- static const int kLaneCount = lane_count; \
- HandleScope scope(isolate); \
- DCHECK_EQ(3, args.length()); \
- CONVERT_SIMD_ARG_HANDLE_THROW(bool_type, mask, 0); \
- CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 1); \
- CONVERT_SIMD_ARG_HANDLE_THROW(type, b, 2); \
- lane_type lanes[kLaneCount]; \
- for (int i = 0; i < kLaneCount; i++) { \
- lanes[i] = mask->get_lane(i) ? a->get_lane(i) : b->get_lane(i); \
- } \
- Handle<type> result = isolate->factory()->New##type(lanes); \
- return *result; \
- }
-
-SIMD_SELECT_TYPES(SIMD_SELECT_FUNCTION)
-
-//-------------------------------------------------------------------
-
-// Signed / unsigned functions.
-
-#define SIMD_SIGNED_TYPES(FUNCTION) \
- FUNCTION(Float32x4, float, 4) \
- FUNCTION(Int32x4, int32_t, 4) \
- FUNCTION(Int16x8, int16_t, 8) \
- FUNCTION(Int8x16, int8_t, 16)
-
-#define SIMD_NEG_FUNCTION(type, lane_type, lane_count) \
- RUNTIME_FUNCTION(Runtime_##type##Neg) { \
- HandleScope scope(isolate); \
- SIMD_UNARY_OP(type, lane_type, lane_count, -, result); \
- return *result; \
- }
-
-SIMD_SIGNED_TYPES(SIMD_NEG_FUNCTION)
-
-//-------------------------------------------------------------------
-
-// Casting functions.
-
-#define SIMD_FROM_TYPES(FUNCTION) \
- FUNCTION(Float32x4, float, 4, Int32x4, int32_t) \
- FUNCTION(Float32x4, float, 4, Uint32x4, uint32_t) \
- FUNCTION(Int32x4, int32_t, 4, Float32x4, float) \
- FUNCTION(Int32x4, int32_t, 4, Uint32x4, uint32_t) \
- FUNCTION(Uint32x4, uint32_t, 4, Float32x4, float) \
- FUNCTION(Uint32x4, uint32_t, 4, Int32x4, int32_t) \
- FUNCTION(Int16x8, int16_t, 8, Uint16x8, uint16_t) \
- FUNCTION(Uint16x8, uint16_t, 8, Int16x8, int16_t) \
- FUNCTION(Int8x16, int8_t, 16, Uint8x16, uint8_t) \
- FUNCTION(Uint8x16, uint8_t, 16, Int8x16, int8_t)
-
-#define SIMD_FROM_FUNCTION(type, lane_type, lane_count, from_type, from_ctype) \
- RUNTIME_FUNCTION(Runtime_##type##From##from_type) { \
- static const int kLaneCount = lane_count; \
- HandleScope scope(isolate); \
- DCHECK_EQ(1, args.length()); \
- CONVERT_SIMD_ARG_HANDLE_THROW(from_type, a, 0); \
- lane_type lanes[kLaneCount]; \
- for (int i = 0; i < kLaneCount; i++) { \
- from_ctype a_value = a->get_lane(i); \
- if (a_value != a_value || !CanCast<lane_type>(a_value)) { \
- THROW_NEW_ERROR_RETURN_FAILURE( \
- isolate, NewRangeError(MessageTemplate::kInvalidSimdLaneValue)); \
- } \
- lanes[i] = static_cast<lane_type>(a_value); \
- } \
- Handle<type> result = isolate->factory()->New##type(lanes); \
- return *result; \
- }
-
-SIMD_FROM_TYPES(SIMD_FROM_FUNCTION)
-
-#define SIMD_FROM_BITS_TYPES(FUNCTION) \
- FUNCTION(Float32x4, float, 4, Int32x4) \
- FUNCTION(Float32x4, float, 4, Uint32x4) \
- FUNCTION(Float32x4, float, 4, Int16x8) \
- FUNCTION(Float32x4, float, 4, Uint16x8) \
- FUNCTION(Float32x4, float, 4, Int8x16) \
- FUNCTION(Float32x4, float, 4, Uint8x16) \
- FUNCTION(Int32x4, int32_t, 4, Float32x4) \
- FUNCTION(Int32x4, int32_t, 4, Uint32x4) \
- FUNCTION(Int32x4, int32_t, 4, Int16x8) \
- FUNCTION(Int32x4, int32_t, 4, Uint16x8) \
- FUNCTION(Int32x4, int32_t, 4, Int8x16) \
- FUNCTION(Int32x4, int32_t, 4, Uint8x16) \
- FUNCTION(Uint32x4, uint32_t, 4, Float32x4) \
- FUNCTION(Uint32x4, uint32_t, 4, Int32x4) \
- FUNCTION(Uint32x4, uint32_t, 4, Int16x8) \
- FUNCTION(Uint32x4, uint32_t, 4, Uint16x8) \
- FUNCTION(Uint32x4, uint32_t, 4, Int8x16) \
- FUNCTION(Uint32x4, uint32_t, 4, Uint8x16) \
- FUNCTION(Int16x8, int16_t, 8, Float32x4) \
- FUNCTION(Int16x8, int16_t, 8, Int32x4) \
- FUNCTION(Int16x8, int16_t, 8, Uint32x4) \
- FUNCTION(Int16x8, int16_t, 8, Uint16x8) \
- FUNCTION(Int16x8, int16_t, 8, Int8x16) \
- FUNCTION(Int16x8, int16_t, 8, Uint8x16) \
- FUNCTION(Uint16x8, uint16_t, 8, Float32x4) \
- FUNCTION(Uint16x8, uint16_t, 8, Int32x4) \
- FUNCTION(Uint16x8, uint16_t, 8, Uint32x4) \
- FUNCTION(Uint16x8, uint16_t, 8, Int16x8) \
- FUNCTION(Uint16x8, uint16_t, 8, Int8x16) \
- FUNCTION(Uint16x8, uint16_t, 8, Uint8x16) \
- FUNCTION(Int8x16, int8_t, 16, Float32x4) \
- FUNCTION(Int8x16, int8_t, 16, Int32x4) \
- FUNCTION(Int8x16, int8_t, 16, Uint32x4) \
- FUNCTION(Int8x16, int8_t, 16, Int16x8) \
- FUNCTION(Int8x16, int8_t, 16, Uint16x8) \
- FUNCTION(Int8x16, int8_t, 16, Uint8x16) \
- FUNCTION(Uint8x16, uint8_t, 16, Float32x4) \
- FUNCTION(Uint8x16, uint8_t, 16, Int32x4) \
- FUNCTION(Uint8x16, uint8_t, 16, Uint32x4) \
- FUNCTION(Uint8x16, uint8_t, 16, Int16x8) \
- FUNCTION(Uint8x16, uint8_t, 16, Uint16x8) \
- FUNCTION(Uint8x16, uint8_t, 16, Int8x16)
-
-#define SIMD_FROM_BITS_FUNCTION(type, lane_type, lane_count, from_type) \
- RUNTIME_FUNCTION(Runtime_##type##From##from_type##Bits) { \
- static const int kLaneCount = lane_count; \
- HandleScope scope(isolate); \
- DCHECK_EQ(1, args.length()); \
- CONVERT_SIMD_ARG_HANDLE_THROW(from_type, a, 0); \
- lane_type lanes[kLaneCount]; \
- a->CopyBits(lanes); \
- Handle<type> result = isolate->factory()->New##type(lanes); \
- return *result; \
- }
-
-SIMD_FROM_BITS_TYPES(SIMD_FROM_BITS_FUNCTION)
-
-
-//-------------------------------------------------------------------
-
-// Load and Store functions.
-
-#define SIMD_LOADN_STOREN_TYPES(FUNCTION) \
- FUNCTION(Float32x4, float, 4) \
- FUNCTION(Int32x4, int32_t, 4) \
- FUNCTION(Uint32x4, uint32_t, 4)
-
-#define SIMD_COERCE_INDEX(name, i) \
- Handle<Object> length_object, number_object; \
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, length_object, \
- Object::ToLength(isolate, args.at(i))); \
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, number_object, \
- Object::ToNumber(args.at(i))); \
- if (number_object->Number() != length_object->Number()) { \
- THROW_NEW_ERROR_RETURN_FAILURE( \
- isolate, NewTypeError(MessageTemplate::kInvalidSimdIndex)); \
- } \
- int32_t name = number_object->Number();
-
-// Common Load and Store Functions
-
-#define SIMD_LOAD(type, lane_type, lane_count, count, result) \
- static const int kLaneCount = lane_count; \
- DCHECK_EQ(2, args.length()); \
- CONVERT_SIMD_ARG_HANDLE_THROW(JSTypedArray, tarray, 0); \
- SIMD_COERCE_INDEX(index, 1); \
- size_t bpe = tarray->element_size(); \
- uint32_t bytes = count * sizeof(lane_type); \
- size_t byte_length = NumberToSize(tarray->byte_length()); \
- if (index < 0 || index * bpe + bytes > byte_length) { \
- THROW_NEW_ERROR_RETURN_FAILURE( \
- isolate, NewRangeError(MessageTemplate::kInvalidSimdIndex)); \
- } \
- size_t tarray_offset = NumberToSize(tarray->byte_offset()); \
- uint8_t* tarray_base = \
- static_cast<uint8_t*>(tarray->GetBuffer()->backing_store()) + \
- tarray_offset; \
- lane_type lanes[kLaneCount] = {0}; \
- memcpy(lanes, tarray_base + index * bpe, bytes); \
- Handle<type> result = isolate->factory()->New##type(lanes);
-
-#define SIMD_STORE(type, lane_type, lane_count, count, a) \
- static const int kLaneCount = lane_count; \
- DCHECK_EQ(3, args.length()); \
- CONVERT_SIMD_ARG_HANDLE_THROW(JSTypedArray, tarray, 0); \
- CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 2); \
- SIMD_COERCE_INDEX(index, 1); \
- size_t bpe = tarray->element_size(); \
- uint32_t bytes = count * sizeof(lane_type); \
- size_t byte_length = NumberToSize(tarray->byte_length()); \
- if (index < 0 || byte_length < index * bpe + bytes) { \
- THROW_NEW_ERROR_RETURN_FAILURE( \
- isolate, NewRangeError(MessageTemplate::kInvalidSimdIndex)); \
- } \
- size_t tarray_offset = NumberToSize(tarray->byte_offset()); \
- uint8_t* tarray_base = \
- static_cast<uint8_t*>(tarray->GetBuffer()->backing_store()) + \
- tarray_offset; \
- lane_type lanes[kLaneCount]; \
- for (int i = 0; i < kLaneCount; i++) { \
- lanes[i] = a->get_lane(i); \
- } \
- memcpy(tarray_base + index * bpe, lanes, bytes);
-
-#define SIMD_LOAD_FUNCTION(type, lane_type, lane_count) \
- RUNTIME_FUNCTION(Runtime_##type##Load) { \
- HandleScope scope(isolate); \
- SIMD_LOAD(type, lane_type, lane_count, lane_count, result); \
- return *result; \
- }
-
-
-#define SIMD_LOAD1_FUNCTION(type, lane_type, lane_count) \
- RUNTIME_FUNCTION(Runtime_##type##Load1) { \
- HandleScope scope(isolate); \
- SIMD_LOAD(type, lane_type, lane_count, 1, result); \
- return *result; \
- }
-
-
-#define SIMD_LOAD2_FUNCTION(type, lane_type, lane_count) \
- RUNTIME_FUNCTION(Runtime_##type##Load2) { \
- HandleScope scope(isolate); \
- SIMD_LOAD(type, lane_type, lane_count, 2, result); \
- return *result; \
- }
-
-
-#define SIMD_LOAD3_FUNCTION(type, lane_type, lane_count) \
- RUNTIME_FUNCTION(Runtime_##type##Load3) { \
- HandleScope scope(isolate); \
- SIMD_LOAD(type, lane_type, lane_count, 3, result); \
- return *result; \
- }
-
-
-#define SIMD_STORE_FUNCTION(type, lane_type, lane_count) \
- RUNTIME_FUNCTION(Runtime_##type##Store) { \
- HandleScope scope(isolate); \
- SIMD_STORE(type, lane_type, lane_count, lane_count, a); \
- return *a; \
- }
-
-
-#define SIMD_STORE1_FUNCTION(type, lane_type, lane_count) \
- RUNTIME_FUNCTION(Runtime_##type##Store1) { \
- HandleScope scope(isolate); \
- SIMD_STORE(type, lane_type, lane_count, 1, a); \
- return *a; \
- }
-
-
-#define SIMD_STORE2_FUNCTION(type, lane_type, lane_count) \
- RUNTIME_FUNCTION(Runtime_##type##Store2) { \
- HandleScope scope(isolate); \
- SIMD_STORE(type, lane_type, lane_count, 2, a); \
- return *a; \
- }
-
-
-#define SIMD_STORE3_FUNCTION(type, lane_type, lane_count) \
- RUNTIME_FUNCTION(Runtime_##type##Store3) { \
- HandleScope scope(isolate); \
- SIMD_STORE(type, lane_type, lane_count, 3, a); \
- return *a; \
- }
-
-
-SIMD_NUMERIC_TYPES(SIMD_LOAD_FUNCTION)
-SIMD_LOADN_STOREN_TYPES(SIMD_LOAD1_FUNCTION)
-SIMD_LOADN_STOREN_TYPES(SIMD_LOAD2_FUNCTION)
-SIMD_LOADN_STOREN_TYPES(SIMD_LOAD3_FUNCTION)
-SIMD_NUMERIC_TYPES(SIMD_STORE_FUNCTION)
-SIMD_LOADN_STOREN_TYPES(SIMD_STORE1_FUNCTION)
-SIMD_LOADN_STOREN_TYPES(SIMD_STORE2_FUNCTION)
-SIMD_LOADN_STOREN_TYPES(SIMD_STORE3_FUNCTION)
-
-//-------------------------------------------------------------------
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-strings.cc b/deps/v8/src/runtime/runtime-strings.cc
index 31d9f1fc6e..3a435913e3 100644
--- a/deps/v8/src/runtime/runtime-strings.cc
+++ b/deps/v8/src/runtime/runtime-strings.cc
@@ -5,14 +5,54 @@
#include "src/runtime/runtime-utils.h"
#include "src/arguments.h"
+#include "src/conversions.h"
+#include "src/counters.h"
+#include "src/objects-inl.h"
#include "src/regexp/jsregexp-inl.h"
#include "src/string-builder.h"
-#include "src/string-case.h"
#include "src/string-search.h"
namespace v8 {
namespace internal {
+RUNTIME_FUNCTION(Runtime_GetSubstitution) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(4, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(String, matched, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
+ CONVERT_SMI_ARG_CHECKED(position, 2);
+ CONVERT_ARG_HANDLE_CHECKED(String, replacement, 3);
+
+ // A simple match without captures.
+ class SimpleMatch : public String::Match {
+ public:
+ SimpleMatch(Handle<String> match, Handle<String> prefix,
+ Handle<String> suffix)
+ : match_(match), prefix_(prefix), suffix_(suffix) {}
+
+ Handle<String> GetMatch() override { return match_; }
+ MaybeHandle<String> GetCapture(int i, bool* capture_exists) override {
+ *capture_exists = false;
+ return match_; // Return arbitrary string handle.
+ }
+ Handle<String> GetPrefix() override { return prefix_; }
+ Handle<String> GetSuffix() override { return suffix_; }
+ int CaptureCount() override { return 0; }
+
+ private:
+ Handle<String> match_, prefix_, suffix_;
+ };
+
+ Handle<String> prefix =
+ isolate->factory()->NewSubString(subject, 0, position);
+ Handle<String> suffix = isolate->factory()->NewSubString(
+ subject, position + matched->length(), subject->length());
+ SimpleMatch match(matched, prefix, suffix);
+
+ RETURN_RESULT_OR_FAILURE(
+ isolate, String::GetSubstitution(isolate, &match, replacement));
+}
+
// This may return an empty MaybeHandle if an exception is thrown or
// we abort due to reaching the recursion limit.
MaybeHandle<String> StringReplaceOneCharWithString(
@@ -263,6 +303,9 @@ RUNTIME_FUNCTION(Runtime_StringBuilderConcat) {
if (length == -1) {
return isolate->Throw(isolate->heap()->illegal_argument_string());
}
+ if (length == 0) {
+ return isolate->heap()->empty_string();
+ }
if (one_byte) {
Handle<SeqOneByteString> answer;
@@ -589,182 +632,6 @@ RUNTIME_FUNCTION(Runtime_StringToArray) {
}
-static inline bool ToUpperOverflows(uc32 character) {
- // y with umlauts and the micro sign are the only characters that stop
- // fitting into one-byte when converting to uppercase.
- static const uc32 yuml_code = 0xff;
- static const uc32 micro_code = 0xb5;
- return (character == yuml_code || character == micro_code);
-}
-
-
-template <class Converter>
-MUST_USE_RESULT static Object* ConvertCaseHelper(
- Isolate* isolate, String* string, SeqString* result, int result_length,
- unibrow::Mapping<Converter, 128>* mapping) {
- DisallowHeapAllocation no_gc;
- // We try this twice, once with the assumption that the result is no longer
- // than the input and, if that assumption breaks, again with the exact
- // length. This may not be pretty, but it is nicer than what was here before
- // and I hereby claim my vaffel-is.
- //
- // NOTE: This assumes that the upper/lower case of an ASCII
- // character is also ASCII. This is currently the case, but it
- // might break in the future if we implement more context and locale
- // dependent upper/lower conversions.
- bool has_changed_character = false;
-
- // Convert all characters to upper case, assuming that they will fit
- // in the buffer
- StringCharacterStream stream(string);
- unibrow::uchar chars[Converter::kMaxWidth];
- // We can assume that the string is not empty
- uc32 current = stream.GetNext();
- bool ignore_overflow = Converter::kIsToLower || result->IsSeqTwoByteString();
- for (int i = 0; i < result_length;) {
- bool has_next = stream.HasMore();
- uc32 next = has_next ? stream.GetNext() : 0;
- int char_length = mapping->get(current, next, chars);
- if (char_length == 0) {
- // The case conversion of this character is the character itself.
- result->Set(i, current);
- i++;
- } else if (char_length == 1 &&
- (ignore_overflow || !ToUpperOverflows(current))) {
- // Common case: converting the letter resulted in one character.
- DCHECK(static_cast<uc32>(chars[0]) != current);
- result->Set(i, chars[0]);
- has_changed_character = true;
- i++;
- } else if (result_length == string->length()) {
- bool overflows = ToUpperOverflows(current);
- // We've assumed that the result would be as long as the
- // input but here is a character that converts to several
- // characters. No matter, we calculate the exact length
- // of the result and try the whole thing again.
- //
- // Note that this leaves room for optimization. We could just
- // memcpy what we already have to the result string. Also,
- // the result string is the last object allocated we could
- // "realloc" it and probably, in the vast majority of cases,
- // extend the existing string to be able to hold the full
- // result.
- int next_length = 0;
- if (has_next) {
- next_length = mapping->get(next, 0, chars);
- if (next_length == 0) next_length = 1;
- }
- int current_length = i + char_length + next_length;
- while (stream.HasMore()) {
- current = stream.GetNext();
- overflows |= ToUpperOverflows(current);
- // NOTE: we use 0 as the next character here because, while
- // the next character may affect what a character converts to,
- // it does not in any case affect the length of what it convert
- // to.
- int char_length = mapping->get(current, 0, chars);
- if (char_length == 0) char_length = 1;
- current_length += char_length;
- if (current_length > String::kMaxLength) {
- AllowHeapAllocation allocate_error_and_return;
- THROW_NEW_ERROR_RETURN_FAILURE(isolate,
- NewInvalidStringLengthError());
- }
- }
- // Try again with the real length. Return signed if we need
- // to allocate a two-byte string for to uppercase.
- return (overflows && !ignore_overflow) ? Smi::FromInt(-current_length)
- : Smi::FromInt(current_length);
- } else {
- for (int j = 0; j < char_length; j++) {
- result->Set(i, chars[j]);
- i++;
- }
- has_changed_character = true;
- }
- current = next;
- }
- if (has_changed_character) {
- return result;
- } else {
- // If we didn't actually change anything in doing the conversion
- // we simple return the result and let the converted string
- // become garbage; there is no reason to keep two identical strings
- // alive.
- return string;
- }
-}
-
-template <class Converter>
-MUST_USE_RESULT static Object* ConvertCase(
- Handle<String> s, Isolate* isolate,
- unibrow::Mapping<Converter, 128>* mapping) {
- s = String::Flatten(s);
- int length = s->length();
- // Assume that the string is not empty; we need this assumption later
- if (length == 0) return *s;
-
- // Simpler handling of ASCII strings.
- //
- // NOTE: This assumes that the upper/lower case of an ASCII
- // character is also ASCII. This is currently the case, but it
- // might break in the future if we implement more context and locale
- // dependent upper/lower conversions.
- if (s->IsOneByteRepresentationUnderneath()) {
- // Same length as input.
- Handle<SeqOneByteString> result =
- isolate->factory()->NewRawOneByteString(length).ToHandleChecked();
- DisallowHeapAllocation no_gc;
- String::FlatContent flat_content = s->GetFlatContent();
- DCHECK(flat_content.IsFlat());
- bool has_changed_character = false;
- int index_to_first_unprocessed = FastAsciiConvert<Converter::kIsToLower>(
- reinterpret_cast<char*>(result->GetChars()),
- reinterpret_cast<const char*>(flat_content.ToOneByteVector().start()),
- length, &has_changed_character);
- // If not ASCII, we discard the result and take the 2 byte path.
- if (index_to_first_unprocessed == length)
- return has_changed_character ? *result : *s;
- }
-
- Handle<SeqString> result; // Same length as input.
- if (s->IsOneByteRepresentation()) {
- result = isolate->factory()->NewRawOneByteString(length).ToHandleChecked();
- } else {
- result = isolate->factory()->NewRawTwoByteString(length).ToHandleChecked();
- }
-
- Object* answer = ConvertCaseHelper(isolate, *s, *result, length, mapping);
- if (answer->IsException(isolate) || answer->IsString()) return answer;
-
- DCHECK(answer->IsSmi());
- length = Smi::cast(answer)->value();
- if (s->IsOneByteRepresentation() && length > 0) {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, isolate->factory()->NewRawOneByteString(length));
- } else {
- if (length < 0) length = -length;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, isolate->factory()->NewRawTwoByteString(length));
- }
- return ConvertCaseHelper(isolate, *s, *result, length, mapping);
-}
-
-
-RUNTIME_FUNCTION(Runtime_StringToLowerCase) {
- HandleScope scope(isolate);
- DCHECK_EQ(args.length(), 1);
- CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
- return ConvertCase(s, isolate, isolate->runtime_state()->to_lower_mapping());
-}
-
-RUNTIME_FUNCTION(Runtime_StringToUpperCase) {
- HandleScope scope(isolate);
- DCHECK_EQ(args.length(), 1);
- CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
- return ConvertCase(s, isolate, isolate->runtime_state()->to_upper_mapping());
-}
-
RUNTIME_FUNCTION(Runtime_StringLessThan) {
HandleScope handle_scope(isolate);
DCHECK_EQ(2, args.length());
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index bea7245c35..c6234fcd85 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -7,6 +7,7 @@
#include <memory>
#include "src/arguments.h"
+#include "src/assembler-inl.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/compiler.h"
#include "src/deoptimizer.h"
@@ -275,21 +276,28 @@ RUNTIME_FUNCTION(Runtime_NeverOptimizeFunction) {
return isolate->heap()->undefined_value();
}
-
RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
HandleScope scope(isolate);
DCHECK(args.length() == 1 || args.length() == 2);
+ int status = 0;
if (!isolate->use_crankshaft()) {
- return Smi::FromInt(4); // 4 == "never".
+ status |= static_cast<int>(OptimizationStatus::kNeverOptimize);
+ }
+ if (FLAG_always_opt || FLAG_prepare_always_opt) {
+ status |= static_cast<int>(OptimizationStatus::kAlwaysOptimize);
+ }
+ if (FLAG_deopt_every_n_times) {
+ status |= static_cast<int>(OptimizationStatus::kMaybeDeopted);
}
// This function is used by fuzzers to get coverage for optimizations
// in compiler. Ignore calls on non-function objects to avoid runtime errors.
CONVERT_ARG_HANDLE_CHECKED(Object, function_object, 0);
if (!function_object->IsJSFunction()) {
- return isolate->heap()->undefined_value();
+ return Smi::FromInt(status);
}
Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
+ status |= static_cast<int>(OptimizationStatus::kIsFunction);
bool sync_with_compiler_thread = true;
if (args.length() == 2) {
@@ -308,22 +316,16 @@ RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
base::OS::Sleep(base::TimeDelta::FromMilliseconds(50));
}
}
- if (FLAG_always_opt || FLAG_prepare_always_opt) {
- // With --always-opt, optimization status expectations might not
- // match up, so just return a sentinel.
- return Smi::FromInt(3); // 3 == "always".
- }
- if (FLAG_deopt_every_n_times) {
- return Smi::FromInt(6); // 6 == "maybe deopted".
- }
- if (function->IsOptimized() && function->code()->is_turbofanned()) {
- return Smi::FromInt(7); // 7 == "TurboFan compiler".
+ if (function->IsOptimized()) {
+ status |= static_cast<int>(OptimizationStatus::kOptimized);
+ if (function->code()->is_turbofanned()) {
+ status |= static_cast<int>(OptimizationStatus::kTurboFanned);
+ }
}
if (function->IsInterpreted()) {
- return Smi::FromInt(8); // 8 == "Interpreted".
+ status |= static_cast<int>(OptimizationStatus::kInterpreted);
}
- return function->IsOptimized() ? Smi::FromInt(1) // 1 == "yes".
- : Smi::FromInt(2); // 2 == "no".
+ return Smi::FromInt(status);
}
@@ -392,7 +394,7 @@ RUNTIME_FUNCTION(Runtime_GetCallable) {
return *Utils::OpenHandle(*instance);
}
-RUNTIME_FUNCTION(Runtime_ClearFunctionTypeFeedback) {
+RUNTIME_FUNCTION(Runtime_ClearFunctionFeedback) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
diff --git a/deps/v8/src/runtime/runtime-typedarray.cc b/deps/v8/src/runtime/runtime-typedarray.cc
index d5e394c345..4ca7bbb009 100644
--- a/deps/v8/src/runtime/runtime-typedarray.cc
+++ b/deps/v8/src/runtime/runtime-typedarray.cc
@@ -367,6 +367,67 @@ RUNTIME_FUNCTION(Runtime_TypedArraySetFastCases) {
}
}
+namespace {
+
+template <typename T>
+bool CompareNum(T x, T y) {
+ if (x < y) {
+ return true;
+ } else if (x > y) {
+ return false;
+ } else if (!std::is_integral<T>::value) {
+ double _x = x, _y = y;
+ if (x == 0 && x == y) {
+ /* -0.0 is less than +0.0 */
+ return std::signbit(_x) && !std::signbit(_y);
+ } else if (!std::isnan(_x) && std::isnan(_y)) {
+ /* number is less than NaN */
+ return true;
+ }
+ }
+ return false;
+}
+
+} // namespace
+
+RUNTIME_FUNCTION(Runtime_TypedArraySortFast) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+
+ CONVERT_ARG_HANDLE_CHECKED(Object, target_obj, 0);
+
+ Handle<JSTypedArray> array;
+ const char* method = "%TypedArray%.prototype.sort";
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, array, JSTypedArray::Validate(isolate, target_obj, method));
+
+ // This line can be removed when JSTypedArray::Validate throws
+ // if array.[[ViewedArrayBuffer]] is neutered(v8:4648)
+ if (V8_UNLIKELY(array->WasNeutered())) return *array;
+
+ size_t length = array->length_value();
+ if (length <= 1) return *array;
+
+ Handle<FixedTypedArrayBase> elements(
+ FixedTypedArrayBase::cast(array->elements()));
+ switch (array->type()) {
+#define TYPED_ARRAY_SORT(Type, type, TYPE, ctype, size) \
+ case kExternal##Type##Array: { \
+ ctype* data = static_cast<ctype*>(elements->DataPtr()); \
+ if (kExternal##Type##Array == kExternalFloat64Array || \
+ kExternal##Type##Array == kExternalFloat32Array) \
+ std::sort(data, data + length, CompareNum<ctype>); \
+ else \
+ std::sort(data, data + length); \
+ break; \
+ }
+
+ TYPED_ARRAYS(TYPED_ARRAY_SORT)
+#undef TYPED_ARRAY_SORT
+ }
+
+ return *array;
+}
RUNTIME_FUNCTION(Runtime_TypedArrayMaxSizeInHeap) {
DCHECK_EQ(0, args.length());
diff --git a/deps/v8/src/runtime/runtime-wasm.cc b/deps/v8/src/runtime/runtime-wasm.cc
index 3ae5b92da1..9f125c1345 100644
--- a/deps/v8/src/runtime/runtime-wasm.cc
+++ b/deps/v8/src/runtime/runtime-wasm.cc
@@ -21,7 +21,7 @@ namespace v8 {
namespace internal {
namespace {
-Handle<WasmInstanceObject> GetWasmInstanceOnStackTop(Isolate* isolate) {
+WasmInstanceObject* GetWasmInstanceOnStackTop(Isolate* isolate) {
DisallowHeapAllocation no_allocation;
const Address entry = Isolate::c_entry_fp(isolate->thread_local_top());
Address pc =
@@ -30,7 +30,12 @@ Handle<WasmInstanceObject> GetWasmInstanceOnStackTop(Isolate* isolate) {
DCHECK_EQ(Code::WASM_FUNCTION, code->kind());
WasmInstanceObject* owning_instance = wasm::GetOwningWasmInstance(code);
CHECK_NOT_NULL(owning_instance);
- return handle(owning_instance, isolate);
+ return owning_instance;
+}
+Context* GetWasmContextOnStackTop(Isolate* isolate) {
+ return GetWasmInstanceOnStackTop(isolate)
+ ->compiled_module()
+ ->ptr_to_native_context();
}
} // namespace
@@ -38,7 +43,8 @@ RUNTIME_FUNCTION(Runtime_WasmMemorySize) {
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
- Handle<WasmInstanceObject> instance = GetWasmInstanceOnStackTop(isolate);
+ Handle<WasmInstanceObject> instance(GetWasmInstanceOnStackTop(isolate),
+ isolate);
return *isolate->factory()->NewNumberFromInt(
wasm::GetInstanceMemorySize(isolate, instance));
}
@@ -47,7 +53,13 @@ RUNTIME_FUNCTION(Runtime_WasmGrowMemory) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_UINT32_ARG_CHECKED(delta_pages, 0);
- Handle<WasmInstanceObject> instance = GetWasmInstanceOnStackTop(isolate);
+ Handle<WasmInstanceObject> instance(GetWasmInstanceOnStackTop(isolate),
+ isolate);
+
+ // Set the current isolate's context.
+ DCHECK_NULL(isolate->context());
+ isolate->set_context(instance->compiled_module()->ptr_to_native_context());
+
return *isolate->factory()->NewNumberFromInt(
wasm::GrowMemory(isolate, instance, delta_pages));
}
@@ -55,6 +67,8 @@ RUNTIME_FUNCTION(Runtime_WasmGrowMemory) {
Object* ThrowRuntimeError(Isolate* isolate, int message_id, int byte_offset,
bool patch_source_position) {
HandleScope scope(isolate);
+ DCHECK_NULL(isolate->context());
+ isolate->set_context(GetWasmContextOnStackTop(isolate));
Handle<Object> error_obj = isolate->factory()->NewWasmRuntimeError(
static_cast<MessageTemplate::Template>(message_id));
@@ -108,6 +122,12 @@ Object* ThrowRuntimeError(Isolate* isolate, int message_id, int byte_offset,
return isolate->Throw(*error_obj);
}
+RUNTIME_FUNCTION(Runtime_ThrowWasmErrorFromTrapIf) {
+ DCHECK_EQ(1, args.length());
+ CONVERT_SMI_ARG_CHECKED(message_id, 0);
+ return ThrowRuntimeError(isolate, message_id, 0, false);
+}
+
RUNTIME_FUNCTION(Runtime_ThrowWasmError) {
DCHECK_EQ(2, args.length());
CONVERT_SMI_ARG_CHECKED(message_id, 0);
@@ -115,14 +135,6 @@ RUNTIME_FUNCTION(Runtime_ThrowWasmError) {
return ThrowRuntimeError(isolate, message_id, byte_offset, true);
}
-#define DECLARE_ENUM(name) \
- RUNTIME_FUNCTION(Runtime_ThrowWasm##name) { \
- int message_id = wasm::WasmOpcodes::TrapReasonToMessageId(wasm::k##name); \
- return ThrowRuntimeError(isolate, message_id, 0, false); \
- }
-FOREACH_WASM_TRAPREASON(DECLARE_ENUM)
-#undef DECLARE_ENUM
-
RUNTIME_FUNCTION(Runtime_WasmThrowTypeError) {
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
@@ -138,6 +150,10 @@ RUNTIME_FUNCTION(Runtime_WasmThrow) {
const int32_t thrown_value = (upper << 16) | lower;
+ // Set the current isolate's context.
+ DCHECK_NULL(isolate->context());
+ isolate->set_context(GetWasmContextOnStackTop(isolate));
+
return isolate->Throw(*isolate->factory()->NewNumberFromInt(thrown_value));
}
@@ -153,12 +169,14 @@ RUNTIME_FUNCTION(Runtime_WasmGetCaughtExceptionValue) {
}
RUNTIME_FUNCTION(Runtime_WasmRunInterpreter) {
- DCHECK(args.length() == 3);
+ DCHECK_EQ(3, args.length());
HandleScope scope(isolate);
CONVERT_ARG_HANDLE_CHECKED(JSObject, instance_obj, 0);
CONVERT_NUMBER_CHECKED(int32_t, func_index, Int32, args[1]);
CONVERT_ARG_HANDLE_CHECKED(Object, arg_buffer_obj, 2);
CHECK(WasmInstanceObject::IsWasmInstanceObject(*instance_obj));
+ Handle<WasmInstanceObject> instance =
+ Handle<WasmInstanceObject>::cast(instance_obj);
// The arg buffer is the raw pointer to the caller's stack. It looks like a
// Smi (lowest bit not set, as checked by IsSmi), but is no valid Smi. We just
@@ -167,13 +185,28 @@ RUNTIME_FUNCTION(Runtime_WasmRunInterpreter) {
CHECK(arg_buffer_obj->IsSmi());
uint8_t* arg_buffer = reinterpret_cast<uint8_t*>(*arg_buffer_obj);
- Handle<WasmInstanceObject> instance =
- Handle<WasmInstanceObject>::cast(instance_obj);
- Handle<WasmDebugInfo> debug_info =
- WasmInstanceObject::GetOrCreateDebugInfo(instance);
- WasmDebugInfo::RunInterpreter(debug_info, func_index, arg_buffer);
+ // Set the current isolate's context.
+ DCHECK_NULL(isolate->context());
+ isolate->set_context(instance->compiled_module()->ptr_to_native_context());
+
+ instance->debug_info()->RunInterpreter(func_index, arg_buffer);
return isolate->heap()->undefined_value();
}
+RUNTIME_FUNCTION(Runtime_WasmStackGuard) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(0, args.length());
+
+ // Set the current isolate's context.
+ DCHECK_NULL(isolate->context());
+ isolate->set_context(GetWasmContextOnStackTop(isolate));
+
+ // Check if this is a real stack overflow.
+ StackLimitCheck check(isolate);
+ if (check.JsHasOverflowed()) return isolate->StackOverflow();
+
+ return isolate->stack_guard()->HandleInterrupts();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index 7eadbe2c09..6c5a039d67 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -9,8 +9,8 @@
#include "src/allocation.h"
#include "src/base/platform/time.h"
+#include "src/elements-kind.h"
#include "src/globals.h"
-#include "src/objects.h"
#include "src/unicode.h"
#include "src/zone/zone.h"
@@ -56,7 +56,8 @@ namespace internal {
F(ArraySpeciesConstructor, 1, 1) \
F(ArrayIncludes_Slow, 3, 1) \
F(ArrayIndexOf, 3, 1) \
- F(SpreadIterablePrepare, 1, 1)
+ F(SpreadIterablePrepare, 1, 1) \
+ F(SpreadIterableFixed, 1, 1)
#define FOR_EACH_INTRINSIC_ATOMICS(F) \
F(ThrowNotIntegerSharedTypedArrayError, 1, 1) \
@@ -72,7 +73,8 @@ namespace internal {
F(AtomicsIsLockFree, 1, 1) \
F(AtomicsWait, 4, 1) \
F(AtomicsWake, 3, 1) \
- F(AtomicsNumWaitersForTesting, 2, 1)
+ F(AtomicsNumWaitersForTesting, 2, 1) \
+ F(SetAllowAtomicsWait, 1, 1)
#define FOR_EACH_INTRINSIC_CLASSES(F) \
F(ThrowUnsupportedSuperError, 0, 1) \
@@ -90,8 +92,7 @@ namespace internal {
F(StoreToSuper_Sloppy, 4, 1) \
F(StoreKeyedToSuper_Strict, 4, 1) \
F(StoreKeyedToSuper_Sloppy, 4, 1) \
- F(GetSuperConstructor, 1, 1) \
- F(NewWithSpread, -1, 1)
+ F(GetSuperConstructor, 1, 1)
#define FOR_EACH_INTRINSIC_COLLECTIONS(F) \
F(StringGetRawHashField, 1, 1) \
@@ -170,9 +171,8 @@ namespace internal {
F(ChangeBreakOnException, 2, 1) \
F(IsBreakOnException, 1, 1) \
F(PrepareStep, 2, 1) \
- F(PrepareStepFrame, 0, 1) \
F(ClearStepping, 0, 1) \
- F(DebugEvaluate, 4, 1) \
+ F(DebugEvaluate, 5, 1) \
F(DebugEvaluateGlobal, 2, 1) \
F(DebugGetLoadedScripts, 0, 1) \
F(DebugReferencedBy, 3, 1) \
@@ -181,7 +181,6 @@ namespace internal {
F(DebugSetScriptSource, 2, 1) \
F(FunctionGetInferredName, 1, 1) \
F(FunctionGetDebugName, 1, 1) \
- F(ExecuteInDebugContext, 1, 1) \
F(GetDebugContext, 0, 1) \
F(CollectGarbage, 1, 1) \
F(GetHeapUsage, 0, 1) \
@@ -200,19 +199,19 @@ namespace internal {
F(DebugPushPromise, 1, 1) \
F(DebugPopPromise, 0, 1) \
F(DebugPromiseReject, 2, 1) \
- F(DebugNextAsyncTaskId, 1, 1) \
F(DebugAsyncEventEnqueueRecurring, 2, 1) \
F(DebugAsyncFunctionPromiseCreated, 1, 1) \
F(DebugIsActive, 0, 1) \
- F(DebugBreakInOptimizedCode, 0, 1)
+ F(DebugBreakInOptimizedCode, 0, 1) \
+ F(DebugCollectCoverage, 0, 1) \
+ F(DebugTogglePreciseCoverage, 1, 1)
#define FOR_EACH_INTRINSIC_ERROR(F) F(ErrorToString, 1, 1)
#define FOR_EACH_INTRINSIC_FORIN(F) \
F(ForInEnumerate, 1, 1) \
F(ForInFilter, 2, 1) \
- F(ForInHasProperty, 2, 1) \
- F(ForInNext, 4, 1)
+ F(ForInHasProperty, 2, 1)
#define FOR_EACH_INTRINSIC_INTERPRETER(F) \
F(InterpreterNewClosure, 4, 1) \
@@ -290,12 +289,9 @@ namespace internal {
F(AllocateSeqOneByteString, 1, 1) \
F(AllocateSeqTwoByteString, 1, 1) \
F(CheckIsBootstrapping, 0, 1) \
+ F(CreateAsyncFromSyncIterator, 1, 1) \
F(CreateListFromArrayLike, 1, 1) \
- F(EnqueueMicrotask, 1, 1) \
- F(EnqueuePromiseReactionJob, 3, 1) \
- F(EnqueuePromiseResolveThenableJob, 1, 1) \
F(GetAndResetRuntimeCallStats, -1 /* <= 2 */, 1) \
- F(ExportExperimentalFromRuntime, 1, 1) \
F(ExportFromRuntime, 1, 1) \
F(IncrementUseCounter, 1, 1) \
F(InstallToContext, 1, 1) \
@@ -305,17 +301,6 @@ namespace internal {
F(NewSyntaxError, 2, 1) \
F(NewTypeError, 2, 1) \
F(OrdinaryHasInstance, 2, 1) \
- F(ReportPromiseReject, 2, 1) \
- F(PromiseHookInit, 2, 1) \
- F(PromiseHookResolve, 1, 1) \
- F(PromiseHookBefore, 1, 1) \
- F(PromiseHookAfter, 1, 1) \
- F(PromiseMarkAsHandled, 1, 1) \
- F(PromiseMarkHandledHint, 1, 1) \
- F(PromiseRejectEventFromStack, 2, 1) \
- F(PromiseRevokeReject, 1, 1) \
- F(PromiseResult, 1, 1) \
- F(PromiseStatus, 1, 1) \
F(PromoteScheduledException, 0, 1) \
F(ReThrow, 1, 1) \
F(RunMicrotasks, 0, 1) \
@@ -334,9 +319,13 @@ namespace internal {
F(ThrowInvalidStringLength, 0, 1) \
F(ThrowIteratorResultNotAnObject, 1, 1) \
F(ThrowSymbolIteratorInvalid, 0, 1) \
+ F(ThrowNonCallableInInstanceOfCheck, 0, 1) \
+ F(ThrowNonObjectInInstanceOfCheck, 0, 1) \
+ F(ThrowNotConstructor, 1, 1) \
F(ThrowNotGeneric, 1, 1) \
F(ThrowReferenceError, 1, 1) \
F(ThrowStackOverflow, 0, 1) \
+ F(ThrowSymbolAsyncIteratorInvalid, 0, 1) \
F(ThrowTypeError, -1 /* >= 1 */, 1) \
F(ThrowUndefinedOrNullToObject, 1, 1) \
F(Typeof, 1, 1) \
@@ -366,6 +355,7 @@ namespace internal {
#define FOR_EACH_INTRINSIC_MATHS(F) F(GenerateRandomNumbers, 0, 1)
#define FOR_EACH_INTRINSIC_MODULE(F) \
+ F(DynamicImportCall, 1, 1) \
F(GetModuleNamespace, 1, 1) \
F(LoadModuleVariable, 1, 1) \
F(StoreModuleVariable, 2, 1)
@@ -385,56 +375,57 @@ namespace internal {
F(GetHoleNaNUpper, 0, 1) \
F(GetHoleNaNLower, 0, 1)
-#define FOR_EACH_INTRINSIC_OBJECT(F) \
- F(GetPrototype, 1, 1) \
- F(ObjectHasOwnProperty, 2, 1) \
- F(ObjectCreate, 2, 1) \
- F(InternalSetPrototype, 2, 1) \
- F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
- F(GetProperty, 2, 1) \
- F(KeyedGetProperty, 2, 1) \
- F(AddNamedProperty, 4, 1) \
- F(SetProperty, 4, 1) \
- F(AddElement, 3, 1) \
- F(AppendElement, 2, 1) \
- F(DeleteProperty_Sloppy, 2, 1) \
- F(DeleteProperty_Strict, 2, 1) \
- F(HasProperty, 2, 1) \
- F(GetOwnPropertyKeys, 2, 1) \
- F(GetInterceptorInfo, 1, 1) \
- F(ToFastProperties, 1, 1) \
- F(AllocateHeapNumber, 0, 1) \
- F(NewObject, 2, 1) \
- F(FinalizeInstanceSize, 1, 1) \
- F(LoadMutableDouble, 2, 1) \
- F(TryMigrateInstance, 1, 1) \
- F(IsJSGlobalProxy, 1, 1) \
- F(DefineAccessorPropertyUnchecked, 5, 1) \
- F(DefineDataPropertyInLiteral, 6, 1) \
- F(GetDataProperty, 2, 1) \
- F(GetConstructorName, 1, 1) \
- F(HasFastPackedElements, 1, 1) \
- F(ValueOf, 1, 1) \
- F(IsJSReceiver, 1, 1) \
- F(ClassOf, 1, 1) \
- F(CopyDataProperties, 2, 1) \
- F(DefineGetterPropertyUnchecked, 4, 1) \
- F(DefineSetterPropertyUnchecked, 4, 1) \
- F(ToObject, 1, 1) \
- F(ToPrimitive, 1, 1) \
- F(ToPrimitive_Number, 1, 1) \
- F(ToNumber, 1, 1) \
- F(ToInteger, 1, 1) \
- F(ToLength, 1, 1) \
- F(ToString, 1, 1) \
- F(ToName, 1, 1) \
- F(SameValue, 2, 1) \
- F(SameValueZero, 2, 1) \
- F(Compare, 3, 1) \
- F(HasInPrototypeChain, 2, 1) \
- F(CreateIterResultObject, 2, 1) \
- F(CreateKeyValueArray, 2, 1) \
- F(IsAccessCheckNeeded, 1, 1) \
+#define FOR_EACH_INTRINSIC_OBJECT(F) \
+ F(GetPrototype, 1, 1) \
+ F(ObjectHasOwnProperty, 2, 1) \
+ F(ObjectCreate, 2, 1) \
+ F(InternalSetPrototype, 2, 1) \
+ F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
+ F(GetProperty, 2, 1) \
+ F(KeyedGetProperty, 2, 1) \
+ F(AddNamedProperty, 4, 1) \
+ F(SetProperty, 4, 1) \
+ F(AddElement, 3, 1) \
+ F(AppendElement, 2, 1) \
+ F(DeleteProperty_Sloppy, 2, 1) \
+ F(DeleteProperty_Strict, 2, 1) \
+ F(HasProperty, 2, 1) \
+ F(GetOwnPropertyKeys, 2, 1) \
+ F(GetInterceptorInfo, 1, 1) \
+ F(ToFastProperties, 1, 1) \
+ F(AllocateHeapNumber, 0, 1) \
+ F(NewObject, 2, 1) \
+ F(FinalizeInstanceSize, 1, 1) \
+ F(LoadMutableDouble, 2, 1) \
+ F(TryMigrateInstance, 1, 1) \
+ F(IsJSGlobalProxy, 1, 1) \
+ F(DefineAccessorPropertyUnchecked, 5, 1) \
+ F(DefineDataPropertyInLiteral, 6, 1) \
+ F(GetDataProperty, 2, 1) \
+ F(GetConstructorName, 1, 1) \
+ F(HasFastPackedElements, 1, 1) \
+ F(ValueOf, 1, 1) \
+ F(IsJSReceiver, 1, 1) \
+ F(ClassOf, 1, 1) \
+ F(CopyDataProperties, 2, 1) \
+ F(CopyDataPropertiesWithExcludedProperties, -1 /* >= 1 */, 1) \
+ F(DefineGetterPropertyUnchecked, 4, 1) \
+ F(DefineSetterPropertyUnchecked, 4, 1) \
+ F(ToObject, 1, 1) \
+ F(ToPrimitive, 1, 1) \
+ F(ToPrimitive_Number, 1, 1) \
+ F(ToNumber, 1, 1) \
+ F(ToInteger, 1, 1) \
+ F(ToLength, 1, 1) \
+ F(ToString, 1, 1) \
+ F(ToName, 1, 1) \
+ F(SameValue, 2, 1) \
+ F(SameValueZero, 2, 1) \
+ F(Compare, 3, 1) \
+ F(HasInPrototypeChain, 2, 1) \
+ F(CreateIterResultObject, 2, 1) \
+ F(CreateKeyValueArray, 2, 1) \
+ F(IsAccessCheckNeeded, 1, 1) \
F(CreateDataProperty, 3, 1)
#define FOR_EACH_INTRINSIC_OPERATORS(F) \
@@ -459,6 +450,21 @@ namespace internal {
F(GreaterThanOrEqual, 2, 1) \
F(InstanceOf, 2, 1)
+#define FOR_EACH_INTRINSIC_PROMISE(F) \
+ F(EnqueueMicrotask, 1, 1) \
+ F(EnqueuePromiseReactionJob, 1, 1) \
+ F(EnqueuePromiseResolveThenableJob, 1, 1) \
+ F(PromiseHookInit, 2, 1) \
+ F(PromiseHookResolve, 1, 1) \
+ F(PromiseHookBefore, 1, 1) \
+ F(PromiseHookAfter, 1, 1) \
+ F(PromiseMarkAsHandled, 1, 1) \
+ F(PromiseRejectEventFromStack, 2, 1) \
+ F(PromiseRevokeReject, 1, 1) \
+ F(PromiseResult, 1, 1) \
+ F(PromiseStatus, 1, 1) \
+ F(ReportPromiseReject, 2, 1)
+
#define FOR_EACH_INTRINSIC_PROXY(F) \
F(IsJSProxy, 1, 1) \
F(JSProxyCall, -1 /* >= 2 */, 1) \
@@ -507,317 +513,8 @@ namespace internal {
F(StoreLookupSlot_Sloppy, 2, 1) \
F(StoreLookupSlot_Strict, 2, 1)
-#define FOR_EACH_INTRINSIC_SIMD(F) \
- F(IsSimdValue, 1, 1) \
- F(CreateFloat32x4, 4, 1) \
- F(CreateInt32x4, 4, 1) \
- F(CreateUint32x4, 4, 1) \
- F(CreateBool32x4, 4, 1) \
- F(CreateInt16x8, 8, 1) \
- F(CreateUint16x8, 8, 1) \
- F(CreateBool16x8, 8, 1) \
- F(CreateInt8x16, 16, 1) \
- F(CreateUint8x16, 16, 1) \
- F(CreateBool8x16, 16, 1) \
- F(Float32x4Check, 1, 1) \
- F(Float32x4ExtractLane, 2, 1) \
- F(Float32x4ReplaceLane, 3, 1) \
- F(Float32x4Abs, 1, 1) \
- F(Float32x4Neg, 1, 1) \
- F(Float32x4Sqrt, 1, 1) \
- F(Float32x4RecipApprox, 1, 1) \
- F(Float32x4RecipSqrtApprox, 1, 1) \
- F(Float32x4Add, 2, 1) \
- F(Float32x4Sub, 2, 1) \
- F(Float32x4Mul, 2, 1) \
- F(Float32x4Div, 2, 1) \
- F(Float32x4Min, 2, 1) \
- F(Float32x4Max, 2, 1) \
- F(Float32x4MinNum, 2, 1) \
- F(Float32x4MaxNum, 2, 1) \
- F(Float32x4Equal, 2, 1) \
- F(Float32x4NotEqual, 2, 1) \
- F(Float32x4LessThan, 2, 1) \
- F(Float32x4LessThanOrEqual, 2, 1) \
- F(Float32x4GreaterThan, 2, 1) \
- F(Float32x4GreaterThanOrEqual, 2, 1) \
- F(Float32x4Select, 3, 1) \
- F(Float32x4Swizzle, 5, 1) \
- F(Float32x4Shuffle, 6, 1) \
- F(Float32x4FromInt32x4, 1, 1) \
- F(Float32x4FromUint32x4, 1, 1) \
- F(Float32x4FromInt32x4Bits, 1, 1) \
- F(Float32x4FromUint32x4Bits, 1, 1) \
- F(Float32x4FromInt16x8Bits, 1, 1) \
- F(Float32x4FromUint16x8Bits, 1, 1) \
- F(Float32x4FromInt8x16Bits, 1, 1) \
- F(Float32x4FromUint8x16Bits, 1, 1) \
- F(Float32x4Load, 2, 1) \
- F(Float32x4Load1, 2, 1) \
- F(Float32x4Load2, 2, 1) \
- F(Float32x4Load3, 2, 1) \
- F(Float32x4Store, 3, 1) \
- F(Float32x4Store1, 3, 1) \
- F(Float32x4Store2, 3, 1) \
- F(Float32x4Store3, 3, 1) \
- F(Int32x4Check, 1, 1) \
- F(Int32x4ExtractLane, 2, 1) \
- F(Int32x4ReplaceLane, 3, 1) \
- F(Int32x4Neg, 1, 1) \
- F(Int32x4Add, 2, 1) \
- F(Int32x4Sub, 2, 1) \
- F(Int32x4Mul, 2, 1) \
- F(Int32x4Min, 2, 1) \
- F(Int32x4Max, 2, 1) \
- F(Int32x4And, 2, 1) \
- F(Int32x4Or, 2, 1) \
- F(Int32x4Xor, 2, 1) \
- F(Int32x4Not, 1, 1) \
- F(Int32x4ShiftLeftByScalar, 2, 1) \
- F(Int32x4ShiftRightByScalar, 2, 1) \
- F(Int32x4Equal, 2, 1) \
- F(Int32x4NotEqual, 2, 1) \
- F(Int32x4LessThan, 2, 1) \
- F(Int32x4LessThanOrEqual, 2, 1) \
- F(Int32x4GreaterThan, 2, 1) \
- F(Int32x4GreaterThanOrEqual, 2, 1) \
- F(Int32x4Select, 3, 1) \
- F(Int32x4Swizzle, 5, 1) \
- F(Int32x4Shuffle, 6, 1) \
- F(Int32x4FromFloat32x4, 1, 1) \
- F(Int32x4FromUint32x4, 1, 1) \
- F(Int32x4FromFloat32x4Bits, 1, 1) \
- F(Int32x4FromUint32x4Bits, 1, 1) \
- F(Int32x4FromInt16x8Bits, 1, 1) \
- F(Int32x4FromUint16x8Bits, 1, 1) \
- F(Int32x4FromInt8x16Bits, 1, 1) \
- F(Int32x4FromUint8x16Bits, 1, 1) \
- F(Int32x4Load, 2, 1) \
- F(Int32x4Load1, 2, 1) \
- F(Int32x4Load2, 2, 1) \
- F(Int32x4Load3, 2, 1) \
- F(Int32x4Store, 3, 1) \
- F(Int32x4Store1, 3, 1) \
- F(Int32x4Store2, 3, 1) \
- F(Int32x4Store3, 3, 1) \
- F(Uint32x4Check, 1, 1) \
- F(Uint32x4ExtractLane, 2, 1) \
- F(Uint32x4ReplaceLane, 3, 1) \
- F(Uint32x4Add, 2, 1) \
- F(Uint32x4Sub, 2, 1) \
- F(Uint32x4Mul, 2, 1) \
- F(Uint32x4Min, 2, 1) \
- F(Uint32x4Max, 2, 1) \
- F(Uint32x4And, 2, 1) \
- F(Uint32x4Or, 2, 1) \
- F(Uint32x4Xor, 2, 1) \
- F(Uint32x4Not, 1, 1) \
- F(Uint32x4ShiftLeftByScalar, 2, 1) \
- F(Uint32x4ShiftRightByScalar, 2, 1) \
- F(Uint32x4Equal, 2, 1) \
- F(Uint32x4NotEqual, 2, 1) \
- F(Uint32x4LessThan, 2, 1) \
- F(Uint32x4LessThanOrEqual, 2, 1) \
- F(Uint32x4GreaterThan, 2, 1) \
- F(Uint32x4GreaterThanOrEqual, 2, 1) \
- F(Uint32x4Select, 3, 1) \
- F(Uint32x4Swizzle, 5, 1) \
- F(Uint32x4Shuffle, 6, 1) \
- F(Uint32x4FromFloat32x4, 1, 1) \
- F(Uint32x4FromInt32x4, 1, 1) \
- F(Uint32x4FromFloat32x4Bits, 1, 1) \
- F(Uint32x4FromInt32x4Bits, 1, 1) \
- F(Uint32x4FromInt16x8Bits, 1, 1) \
- F(Uint32x4FromUint16x8Bits, 1, 1) \
- F(Uint32x4FromInt8x16Bits, 1, 1) \
- F(Uint32x4FromUint8x16Bits, 1, 1) \
- F(Uint32x4Load, 2, 1) \
- F(Uint32x4Load1, 2, 1) \
- F(Uint32x4Load2, 2, 1) \
- F(Uint32x4Load3, 2, 1) \
- F(Uint32x4Store, 3, 1) \
- F(Uint32x4Store1, 3, 1) \
- F(Uint32x4Store2, 3, 1) \
- F(Uint32x4Store3, 3, 1) \
- F(Bool32x4Check, 1, 1) \
- F(Bool32x4ExtractLane, 2, 1) \
- F(Bool32x4ReplaceLane, 3, 1) \
- F(Bool32x4And, 2, 1) \
- F(Bool32x4Or, 2, 1) \
- F(Bool32x4Xor, 2, 1) \
- F(Bool32x4Not, 1, 1) \
- F(Bool32x4AnyTrue, 1, 1) \
- F(Bool32x4AllTrue, 1, 1) \
- F(Bool32x4Swizzle, 5, 1) \
- F(Bool32x4Shuffle, 6, 1) \
- F(Bool32x4Equal, 2, 1) \
- F(Bool32x4NotEqual, 2, 1) \
- F(Int16x8Check, 1, 1) \
- F(Int16x8ExtractLane, 2, 1) \
- F(Int16x8ReplaceLane, 3, 1) \
- F(Int16x8Neg, 1, 1) \
- F(Int16x8Add, 2, 1) \
- F(Int16x8AddSaturate, 2, 1) \
- F(Int16x8Sub, 2, 1) \
- F(Int16x8SubSaturate, 2, 1) \
- F(Int16x8Mul, 2, 1) \
- F(Int16x8Min, 2, 1) \
- F(Int16x8Max, 2, 1) \
- F(Int16x8And, 2, 1) \
- F(Int16x8Or, 2, 1) \
- F(Int16x8Xor, 2, 1) \
- F(Int16x8Not, 1, 1) \
- F(Int16x8ShiftLeftByScalar, 2, 1) \
- F(Int16x8ShiftRightByScalar, 2, 1) \
- F(Int16x8Equal, 2, 1) \
- F(Int16x8NotEqual, 2, 1) \
- F(Int16x8LessThan, 2, 1) \
- F(Int16x8LessThanOrEqual, 2, 1) \
- F(Int16x8GreaterThan, 2, 1) \
- F(Int16x8GreaterThanOrEqual, 2, 1) \
- F(Int16x8Select, 3, 1) \
- F(Int16x8Swizzle, 9, 1) \
- F(Int16x8Shuffle, 10, 1) \
- F(Int16x8FromUint16x8, 1, 1) \
- F(Int16x8FromFloat32x4Bits, 1, 1) \
- F(Int16x8FromInt32x4Bits, 1, 1) \
- F(Int16x8FromUint32x4Bits, 1, 1) \
- F(Int16x8FromUint16x8Bits, 1, 1) \
- F(Int16x8FromInt8x16Bits, 1, 1) \
- F(Int16x8FromUint8x16Bits, 1, 1) \
- F(Int16x8Load, 2, 1) \
- F(Int16x8Store, 3, 1) \
- F(Uint16x8Check, 1, 1) \
- F(Uint16x8ExtractLane, 2, 1) \
- F(Uint16x8ReplaceLane, 3, 1) \
- F(Uint16x8Add, 2, 1) \
- F(Uint16x8AddSaturate, 2, 1) \
- F(Uint16x8Sub, 2, 1) \
- F(Uint16x8SubSaturate, 2, 1) \
- F(Uint16x8Mul, 2, 1) \
- F(Uint16x8Min, 2, 1) \
- F(Uint16x8Max, 2, 1) \
- F(Uint16x8And, 2, 1) \
- F(Uint16x8Or, 2, 1) \
- F(Uint16x8Xor, 2, 1) \
- F(Uint16x8Not, 1, 1) \
- F(Uint16x8ShiftLeftByScalar, 2, 1) \
- F(Uint16x8ShiftRightByScalar, 2, 1) \
- F(Uint16x8Equal, 2, 1) \
- F(Uint16x8NotEqual, 2, 1) \
- F(Uint16x8LessThan, 2, 1) \
- F(Uint16x8LessThanOrEqual, 2, 1) \
- F(Uint16x8GreaterThan, 2, 1) \
- F(Uint16x8GreaterThanOrEqual, 2, 1) \
- F(Uint16x8Select, 3, 1) \
- F(Uint16x8Swizzle, 9, 1) \
- F(Uint16x8Shuffle, 10, 1) \
- F(Uint16x8FromInt16x8, 1, 1) \
- F(Uint16x8FromFloat32x4Bits, 1, 1) \
- F(Uint16x8FromInt32x4Bits, 1, 1) \
- F(Uint16x8FromUint32x4Bits, 1, 1) \
- F(Uint16x8FromInt16x8Bits, 1, 1) \
- F(Uint16x8FromInt8x16Bits, 1, 1) \
- F(Uint16x8FromUint8x16Bits, 1, 1) \
- F(Uint16x8Load, 2, 1) \
- F(Uint16x8Store, 3, 1) \
- F(Bool16x8Check, 1, 1) \
- F(Bool16x8ExtractLane, 2, 1) \
- F(Bool16x8ReplaceLane, 3, 1) \
- F(Bool16x8And, 2, 1) \
- F(Bool16x8Or, 2, 1) \
- F(Bool16x8Xor, 2, 1) \
- F(Bool16x8Not, 1, 1) \
- F(Bool16x8AnyTrue, 1, 1) \
- F(Bool16x8AllTrue, 1, 1) \
- F(Bool16x8Swizzle, 9, 1) \
- F(Bool16x8Shuffle, 10, 1) \
- F(Bool16x8Equal, 2, 1) \
- F(Bool16x8NotEqual, 2, 1) \
- F(Int8x16Check, 1, 1) \
- F(Int8x16ExtractLane, 2, 1) \
- F(Int8x16ReplaceLane, 3, 1) \
- F(Int8x16Neg, 1, 1) \
- F(Int8x16Add, 2, 1) \
- F(Int8x16AddSaturate, 2, 1) \
- F(Int8x16Sub, 2, 1) \
- F(Int8x16SubSaturate, 2, 1) \
- F(Int8x16Mul, 2, 1) \
- F(Int8x16Min, 2, 1) \
- F(Int8x16Max, 2, 1) \
- F(Int8x16And, 2, 1) \
- F(Int8x16Or, 2, 1) \
- F(Int8x16Xor, 2, 1) \
- F(Int8x16Not, 1, 1) \
- F(Int8x16ShiftLeftByScalar, 2, 1) \
- F(Int8x16ShiftRightByScalar, 2, 1) \
- F(Int8x16Equal, 2, 1) \
- F(Int8x16NotEqual, 2, 1) \
- F(Int8x16LessThan, 2, 1) \
- F(Int8x16LessThanOrEqual, 2, 1) \
- F(Int8x16GreaterThan, 2, 1) \
- F(Int8x16GreaterThanOrEqual, 2, 1) \
- F(Int8x16Select, 3, 1) \
- F(Int8x16Swizzle, 17, 1) \
- F(Int8x16Shuffle, 18, 1) \
- F(Int8x16FromUint8x16, 1, 1) \
- F(Int8x16FromFloat32x4Bits, 1, 1) \
- F(Int8x16FromInt32x4Bits, 1, 1) \
- F(Int8x16FromUint32x4Bits, 1, 1) \
- F(Int8x16FromInt16x8Bits, 1, 1) \
- F(Int8x16FromUint16x8Bits, 1, 1) \
- F(Int8x16FromUint8x16Bits, 1, 1) \
- F(Int8x16Load, 2, 1) \
- F(Int8x16Store, 3, 1) \
- F(Uint8x16Check, 1, 1) \
- F(Uint8x16ExtractLane, 2, 1) \
- F(Uint8x16ReplaceLane, 3, 1) \
- F(Uint8x16Add, 2, 1) \
- F(Uint8x16AddSaturate, 2, 1) \
- F(Uint8x16Sub, 2, 1) \
- F(Uint8x16SubSaturate, 2, 1) \
- F(Uint8x16Mul, 2, 1) \
- F(Uint8x16Min, 2, 1) \
- F(Uint8x16Max, 2, 1) \
- F(Uint8x16And, 2, 1) \
- F(Uint8x16Or, 2, 1) \
- F(Uint8x16Xor, 2, 1) \
- F(Uint8x16Not, 1, 1) \
- F(Uint8x16ShiftLeftByScalar, 2, 1) \
- F(Uint8x16ShiftRightByScalar, 2, 1) \
- F(Uint8x16Equal, 2, 1) \
- F(Uint8x16NotEqual, 2, 1) \
- F(Uint8x16LessThan, 2, 1) \
- F(Uint8x16LessThanOrEqual, 2, 1) \
- F(Uint8x16GreaterThan, 2, 1) \
- F(Uint8x16GreaterThanOrEqual, 2, 1) \
- F(Uint8x16Select, 3, 1) \
- F(Uint8x16Swizzle, 17, 1) \
- F(Uint8x16Shuffle, 18, 1) \
- F(Uint8x16FromInt8x16, 1, 1) \
- F(Uint8x16FromFloat32x4Bits, 1, 1) \
- F(Uint8x16FromInt32x4Bits, 1, 1) \
- F(Uint8x16FromUint32x4Bits, 1, 1) \
- F(Uint8x16FromInt16x8Bits, 1, 1) \
- F(Uint8x16FromUint16x8Bits, 1, 1) \
- F(Uint8x16FromInt8x16Bits, 1, 1) \
- F(Uint8x16Load, 2, 1) \
- F(Uint8x16Store, 3, 1) \
- F(Bool8x16Check, 1, 1) \
- F(Bool8x16ExtractLane, 2, 1) \
- F(Bool8x16ReplaceLane, 3, 1) \
- F(Bool8x16And, 2, 1) \
- F(Bool8x16Or, 2, 1) \
- F(Bool8x16Xor, 2, 1) \
- F(Bool8x16Not, 1, 1) \
- F(Bool8x16AnyTrue, 1, 1) \
- F(Bool8x16AllTrue, 1, 1) \
- F(Bool8x16Swizzle, 17, 1) \
- F(Bool8x16Shuffle, 18, 1) \
- F(Bool8x16Equal, 2, 1) \
- F(Bool8x16NotEqual, 2, 1)
-
#define FOR_EACH_INTRINSIC_STRINGS(F) \
+ F(GetSubstitution, 4, 1) \
F(StringReplaceOneCharWithString, 3, 1) \
F(StringIndexOf, 3, 1) \
F(StringIndexOfUnchecked, 3, 1) \
@@ -831,8 +528,6 @@ namespace internal {
F(StringBuilderJoin, 3, 1) \
F(SparseJoinWithSeparator, 3, 1) \
F(StringToArray, 2, 1) \
- F(StringToLowerCase, 1, 1) \
- F(StringToUpperCase, 1, 1) \
F(StringLessThan, 2, 1) \
F(StringLessThanOrEqual, 2, 1) \
F(StringGreaterThan, 2, 1) \
@@ -867,7 +562,7 @@ namespace internal {
F(GetOptimizationCount, 1, 1) \
F(GetUndetectable, 0, 1) \
F(GetCallable, 0, 1) \
- F(ClearFunctionTypeFeedback, 1, 1) \
+ F(ClearFunctionFeedback, 1, 1) \
F(CheckWasmWrapperElision, 2, 1) \
F(NotifyContextDisposed, 0, 1) \
F(SetAllocationTimeout, -1 /* 2 || 3 */, 1) \
@@ -929,28 +624,23 @@ namespace internal {
F(TypedArrayGetLength, 1, 1) \
F(TypedArrayGetBuffer, 1, 1) \
F(TypedArraySetFastCases, 3, 1) \
+ F(TypedArraySortFast, 1, 1) \
F(TypedArrayMaxSizeInHeap, 0, 1) \
F(IsTypedArray, 1, 1) \
F(IsSharedTypedArray, 1, 1) \
F(IsSharedIntegerTypedArray, 1, 1) \
F(IsSharedInteger32TypedArray, 1, 1)
-#define FOR_EACH_INTRINSIC_WASM(F) \
- F(WasmGrowMemory, 1, 1) \
- F(WasmMemorySize, 0, 1) \
- F(ThrowWasmError, 2, 1) \
- F(WasmThrowTypeError, 0, 1) \
- F(WasmThrow, 2, 1) \
- F(WasmGetCaughtExceptionValue, 1, 1) \
- F(ThrowWasmTrapUnreachable, 0, 1) \
- F(ThrowWasmTrapMemOutOfBounds, 0, 1) \
- F(ThrowWasmTrapDivByZero, 0, 1) \
- F(ThrowWasmTrapDivUnrepresentable, 0, 1) \
- F(ThrowWasmTrapRemByZero, 0, 1) \
- F(ThrowWasmTrapFloatUnrepresentable, 0, 1) \
- F(ThrowWasmTrapFuncInvalid, 0, 1) \
- F(ThrowWasmTrapFuncSigMismatch, 0, 1) \
- F(WasmRunInterpreter, 3, 1)
+#define FOR_EACH_INTRINSIC_WASM(F) \
+ F(WasmGrowMemory, 1, 1) \
+ F(WasmMemorySize, 0, 1) \
+ F(ThrowWasmError, 2, 1) \
+ F(ThrowWasmErrorFromTrapIf, 1, 1) \
+ F(WasmThrowTypeError, 0, 1) \
+ F(WasmThrow, 2, 1) \
+ F(WasmGetCaughtExceptionValue, 1, 1) \
+ F(WasmRunInterpreter, 3, 1) \
+ F(WasmStackGuard, 0, 1)
#define FOR_EACH_INTRINSIC_RETURN_PAIR(F) \
F(LoadLookupSlotForCall, 1, 2)
@@ -963,22 +653,20 @@ namespace internal {
#define FOR_EACH_INTRINSIC_IC(F) \
F(BinaryOpIC_Miss, 2, 1) \
F(BinaryOpIC_MissWithAllocationSite, 3, 1) \
- F(CallIC_Miss, 3, 1) \
F(CompareIC_Miss, 3, 1) \
F(ElementsTransitionAndStoreIC_Miss, 6, 1) \
F(KeyedLoadIC_Miss, 4, 1) \
- F(KeyedLoadIC_MissFromStubFailure, 4, 1) \
F(KeyedStoreIC_Miss, 5, 1) \
F(KeyedStoreIC_Slow, 5, 1) \
F(LoadElementWithInterceptor, 2, 1) \
F(LoadGlobalIC_Miss, 3, 1) \
- F(LoadGlobalIC_Slow, 1, 1) \
+ F(LoadGlobalIC_Slow, 3, 1) \
F(LoadIC_Miss, 4, 1) \
- F(LoadPropertyWithInterceptor, 3, 1) \
+ F(LoadPropertyWithInterceptor, 5, 1) \
F(LoadPropertyWithInterceptorOnly, 3, 1) \
F(StoreCallbackProperty, 6, 1) \
F(StoreIC_Miss, 5, 1) \
- F(StorePropertyWithInterceptor, 3, 1) \
+ F(StorePropertyWithInterceptor, 5, 1) \
F(ToBooleanIC_Miss, 1, 1) \
F(Unreachable, 0, 1)
@@ -1005,10 +693,10 @@ namespace internal {
FOR_EACH_INTRINSIC_NUMBERS(F) \
FOR_EACH_INTRINSIC_OBJECT(F) \
FOR_EACH_INTRINSIC_OPERATORS(F) \
+ FOR_EACH_INTRINSIC_PROMISE(F) \
FOR_EACH_INTRINSIC_PROXY(F) \
FOR_EACH_INTRINSIC_REGEXP(F) \
FOR_EACH_INTRINSIC_SCOPES(F) \
- FOR_EACH_INTRINSIC_SIMD(F) \
FOR_EACH_INTRINSIC_STRINGS(F) \
FOR_EACH_INTRINSIC_SYMBOL(F) \
FOR_EACH_INTRINSIC_TEST(F) \
@@ -1157,6 +845,18 @@ class DeclareGlobalsNativeFlag : public BitField<bool, 1, 1> {};
STATIC_ASSERT(LANGUAGE_END == 2);
class DeclareGlobalsLanguageMode : public BitField<LanguageMode, 2, 1> {};
+// A set of bits returned by Runtime_GetOptimizationStatus.
+// These bits must be in sync with bits defined in test/mjsunit/mjsunit.js
+enum class OptimizationStatus {
+ kIsFunction = 1 << 0,
+ kNeverOptimize = 1 << 1,
+ kAlwaysOptimize = 1 << 2,
+ kMaybeDeopted = 1 << 3,
+ kOptimized = 1 << 4,
+ kTurboFanned = 1 << 5,
+ kInterpreted = 1 << 6,
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/s390/assembler-s390-inl.h b/deps/v8/src/s390/assembler-s390-inl.h
index 189b89c258..eee6d6c7ad 100644
--- a/deps/v8/src/s390/assembler-s390-inl.h
+++ b/deps/v8/src/s390/assembler-s390-inl.h
@@ -41,6 +41,7 @@
#include "src/assembler.h"
#include "src/debug/debug.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -118,6 +119,18 @@ Address RelocInfo::constant_pool_entry_address() {
int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
+Address Assembler::target_address_at(Address pc, Code* code) {
+ Address constant_pool = code ? code->constant_pool() : NULL;
+ return target_address_at(pc, constant_pool);
+}
+
+void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
+ Address target,
+ ICacheFlushMode icache_flush_mode) {
+ Address constant_pool = code ? code->constant_pool() : NULL;
+ set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
+}
+
Address Assembler::target_address_from_return_address(Address pc) {
// Returns the address of the call target from the return address that will
// be returned to after a call.
diff --git a/deps/v8/src/s390/assembler-s390.cc b/deps/v8/src/s390/assembler-s390.cc
index 448acb34ad..19510b20aa 100644
--- a/deps/v8/src/s390/assembler-s390.cc
+++ b/deps/v8/src/s390/assembler-s390.cc
@@ -541,7 +541,7 @@ void Assembler::load_label_offset(Register r1, Label* L) {
void Assembler::branchOnCond(Condition c, int branch_offset, bool is_bound) {
int offset_in_halfwords = branch_offset / 2;
if (is_bound && is_int16(offset_in_halfwords)) {
- brc(c, Operand(offset_in_halfwords & 0xFFFF)); // short jump
+ brc(c, Operand(offset_in_halfwords)); // short jump
} else {
brcl(c, Operand(offset_in_halfwords)); // long jump
}
@@ -597,34 +597,7 @@ void Assembler::nop(int type) {
}
}
-// RX format: <insn> R1,D2(X2,B2)
-// +--------+----+----+----+-------------+
-// | OpCode | R1 | X2 | B2 | D2 |
-// +--------+----+----+----+-------------+
-// 0 8 12 16 20 31
-#define RX_FORM_EMIT(name, op) \
- void Assembler::name(Register r, const MemOperand& opnd) { \
- name(r, opnd.getIndexRegister(), opnd.getBaseRegister(), \
- opnd.getDisplacement()); \
- } \
- void Assembler::name(Register r1, Register x2, Register b2, Disp d2) { \
- rx_form(op, r1, x2, b2, d2); \
- }
-void Assembler::rx_form(Opcode op, Register r1, Register x2, Register b2,
- Disp d2) {
- DCHECK(is_uint8(op));
- DCHECK(is_uint12(d2));
- emit4bytes(op * B24 | r1.code() * B20 | x2.code() * B16 | b2.code() * B12 |
- d2);
-}
-void Assembler::rx_form(Opcode op, DoubleRegister r1, Register x2, Register b2,
- Disp d2) {
- DCHECK(is_uint8(op));
- DCHECK(is_uint12(d2));
- emit4bytes(op * B24 | r1.code() * B20 | x2.code() * B16 | b2.code() * B12 |
- d2);
-}
// RI1 format: <insn> R1,I2
// +--------+----+----+------------------+
@@ -652,7 +625,7 @@ void Assembler::ri_form(Opcode op, Register r1, const Operand& i2) {
void Assembler::ri_form(Opcode op, Condition m1, const Operand& i2) {
DCHECK(is_uint12(op));
DCHECK(is_uint4(m1));
- DCHECK(is_uint16(i2.imm_));
+ DCHECK(op == BRC ? is_int16(i2.imm_) : is_uint16(i2.imm_));
emit4bytes((op & 0xFF0) * B20 | m1 * B20 | (op & 0xF) * B16 |
(i2.imm_ & 0xFFFF));
}
@@ -858,62 +831,6 @@ void Assembler::rxe_form(Opcode op, Register r1, Register x2, Register b2,
emit6bytes(code);
}
-// RXY format: <insn> R1,D2(X2,B2)
-// +--------+----+----+----+-------------+--------+--------+
-// | OpCode | R1 | X2 | B2 | DL2 | DH2 | OpCode |
-// +--------+----+----+----+-------------+--------+--------+
-// 0 8 12 16 20 32 36 40 47
-#define RXY_FORM_EMIT(name, op) \
- void Assembler::name(Register r1, Register x2, Register b2, Disp d2) { \
- rxy_form(op, r1, x2, b2, d2); \
- } \
- void Assembler::name(Register r1, const MemOperand& opnd) { \
- name(r1, opnd.getIndexRegister(), opnd.getBaseRegister(), \
- opnd.getDisplacement()); \
- }
-
-void Assembler::rxy_form(Opcode op, Register r1, Register x2, Register b2,
- Disp d2) {
- DCHECK(is_int20(d2));
- DCHECK(is_uint16(op));
- uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
- (static_cast<uint64_t>(r1.code())) * B36 |
- (static_cast<uint64_t>(x2.code())) * B32 |
- (static_cast<uint64_t>(b2.code())) * B28 |
- (static_cast<uint64_t>(d2 & 0x0FFF)) * B16 |
- (static_cast<uint64_t>(d2 & 0x0FF000)) >> 4 |
- (static_cast<uint64_t>(op & 0x00FF));
- emit6bytes(code);
-}
-
-void Assembler::rxy_form(Opcode op, Register r1, Condition m3, Register b2,
- Disp d2) {
- DCHECK(is_int20(d2));
- DCHECK(is_uint16(op));
- uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
- (static_cast<uint64_t>(r1.code())) * B36 |
- (static_cast<uint64_t>(m3 & 0xF)) * B32 |
- (static_cast<uint64_t>(b2.code())) * B28 |
- (static_cast<uint64_t>(d2 & 0x0FFF)) * B16 |
- (static_cast<uint64_t>(d2 & 0x0FF000)) >> 4 |
- (static_cast<uint64_t>(op & 0x00FF));
- emit6bytes(code);
-}
-
-void Assembler::rxy_form(Opcode op, DoubleRegister r1, Register x2, Register b2,
- Disp d2) {
- DCHECK(is_int20(d2));
- DCHECK(is_uint16(op));
- uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
- (static_cast<uint64_t>(r1.code())) * B36 |
- (static_cast<uint64_t>(x2.code())) * B32 |
- (static_cast<uint64_t>(b2.code())) * B28 |
- (static_cast<uint64_t>(d2 & 0x0FFF)) * B16 |
- (static_cast<uint64_t>(d2 & 0x0FF000)) >> 4 |
- (static_cast<uint64_t>(op & 0x00FF));
- emit6bytes(code);
-}
-
// RRS format: <insn> R1,R2,M3,D4(B4)
// +--------+----+----+----+-------------+----+---+--------+
// | OpCode | R1 | R2 | B4 | D4 | M3 |///| OpCode |
@@ -1333,26 +1250,13 @@ void Assembler::rrfe_form(Opcode op, Condition m3, Condition m4, Register r1,
// end of S390 Instruction generation
// start of S390 instruction
-RX_FORM_EMIT(bc, BC)
-RXE_FORM_EMIT(ceb, CEB)
SS1_FORM_EMIT(ed, ED)
-RX_FORM_EMIT(ex, EX)
-RX_FORM_EMIT(le_z, LE)
-RXY_FORM_EMIT(ley, LEY)
-RXY_FORM_EMIT(lrv, LRV)
-RXY_FORM_EMIT(lrvg, LRVG)
-RXY_FORM_EMIT(lrvh, LRVH)
SS1_FORM_EMIT(mvn, MVN)
SS1_FORM_EMIT(nc, NC)
SI_FORM_EMIT(ni, NI)
RI1_FORM_EMIT(nilh, NILH)
RI1_FORM_EMIT(nill, NILL)
RI1_FORM_EMIT(oill, OILL)
-RXY_FORM_EMIT(pfd, PFD)
-RXY_FORM_EMIT(slgf, SLGF)
-RXY_FORM_EMIT(strvh, STRVH)
-RXY_FORM_EMIT(strv, STRV)
-RXY_FORM_EMIT(strvg, STRVG)
RI1_FORM_EMIT(tmll, TMLL)
SS1_FORM_EMIT(tr, TR)
S_FORM_EMIT(ts, TS)
@@ -1360,16 +1264,6 @@ S_FORM_EMIT(ts, TS)
// -------------------------
// Load Address Instructions
// -------------------------
-// Load Address Register-Storage
-void Assembler::la(Register r1, const MemOperand& opnd) {
- rx_form(LA, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Load Address Register-Storage
-void Assembler::lay(Register r1, const MemOperand& opnd) {
- rxy_form(LAY, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
// Load Address Relative Long
void Assembler::larl(Register r1, Label* l) {
larl(r1, Operand(branch_offset(l)));
@@ -1378,98 +1272,15 @@ void Assembler::larl(Register r1, Label* l) {
// -----------------
// Load Instructions
// -----------------
-// Load Byte Register-Storage (32<-8)
-void Assembler::lb(Register r, const MemOperand& src) {
- rxy_form(LB, r, src.rx(), src.rb(), src.offset());
-}
-
-// Load Byte Register-Storage (64<-8)
-void Assembler::lgb(Register r, const MemOperand& src) {
- rxy_form(LGB, r, src.rx(), src.rb(), src.offset());
-}
-
-// Load Halfword Register-Storage (32<-16)
-void Assembler::lh(Register r, const MemOperand& src) {
- rx_form(LH, r, src.rx(), src.rb(), src.offset());
-}
-
-// Load Halfword Register-Storage (32<-16)
-void Assembler::lhy(Register r, const MemOperand& src) {
- rxy_form(LHY, r, src.rx(), src.rb(), src.offset());
-}
-
-// Load Halfword Register-Storage (64<-16)
-void Assembler::lgh(Register r, const MemOperand& src) {
- rxy_form(LGH, r, src.rx(), src.rb(), src.offset());
-}
-
-// Load Register-Storage (32)
-void Assembler::l(Register r, const MemOperand& src) {
- rx_form(L, r, src.rx(), src.rb(), src.offset());
-}
-
-// Load Register-Storage (32)
-void Assembler::ly(Register r, const MemOperand& src) {
- rxy_form(LY, r, src.rx(), src.rb(), src.offset());
-}
-
-// Load Register-Storage (64)
-void Assembler::lg(Register r, const MemOperand& src) {
- rxy_form(LG, r, src.rx(), src.rb(), src.offset());
-}
-
-// Load Register-Storage (64<-32)
-void Assembler::lgf(Register r, const MemOperand& src) {
- rxy_form(LGF, r, src.rx(), src.rb(), src.offset());
-}
-
// Load Halfword Immediate (32)
void Assembler::lhi(Register r, const Operand& imm) { ri_form(LHI, r, imm); }
// Load Halfword Immediate (64)
void Assembler::lghi(Register r, const Operand& imm) { ri_form(LGHI, r, imm); }
-// --------------------------
-// Load And Test Instructions
-// --------------------------
-// Load and Test Register-Storage (32)
-void Assembler::lt_z(Register r1, const MemOperand& opnd) {
- rxy_form(LT, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Load and Test Register-Storage (64)
-void Assembler::ltg(Register r1, const MemOperand& opnd) {
- rxy_form(LTG, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
// -------------------------
// Load Logical Instructions
// -------------------------
-// Load Logical Character (32) - loads a byte and zero ext.
-void Assembler::llc(Register r1, const MemOperand& opnd) {
- rxy_form(LLC, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Load Logical Character (64) - loads a byte and zero ext.
-void Assembler::llgc(Register r1, const MemOperand& opnd) {
- rxy_form(LLGC, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Load Logical halfword Register-Storage (64<-32)
-void Assembler::llgf(Register r1, const MemOperand& opnd) {
- rxy_form(LLGF, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Load Logical halfword Register-Storage (32)
-void Assembler::llh(Register r1, const MemOperand& opnd) {
- rxy_form(LLH, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Load Logical halfword Register-Storage (64)
-void Assembler::llgh(Register r1, const MemOperand& opnd) {
- rxy_form(LLGH, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
// Load On Condition R-R (32)
void Assembler::locr(Condition m3, Register r1, Register r2) {
rrf2_form(LOCR << 16 | m3 * B12 | r1.code() * B4 | r2.code());
@@ -1482,28 +1293,18 @@ void Assembler::locgr(Condition m3, Register r1, Register r2) {
// Load On Condition R-M (32)
void Assembler::loc(Condition m3, Register r1, const MemOperand& src) {
- rxy_form(LOC, r1, m3, src.rb(), src.offset());
+ rsy_form(LOC, r1, m3, src.rb(), src.offset());
}
// Load On Condition R-M (64)
void Assembler::locg(Condition m3, Register r1, const MemOperand& src) {
- rxy_form(LOCG, r1, m3, src.rb(), src.offset());
+ rsy_form(LOCG, r1, m3, src.rb(), src.offset());
}
// -------------------
// Branch Instructions
// -------------------
-
-// Branch on Count (32)
-void Assembler::bct(Register r, const MemOperand& opnd) {
- rx_form(BCT, r, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
// Branch on Count (64)
-void Assembler::bctg(Register r, const MemOperand& opnd) {
- rxy_form(BCTG, r, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
// Branch Relative and Save (32)
void Assembler::bras(Register r, const Operand& opnd) {
ri_form(BRAS, r, opnd);
@@ -1533,31 +1334,6 @@ void Assembler::brctg(Register r1, const Operand& imm) {
// --------------------
// Compare Instructions
// --------------------
-// Compare Register-Storage (32)
-void Assembler::c(Register r, const MemOperand& opnd) {
- rx_form(C, r, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Compare Register-Storage (32)
-void Assembler::cy(Register r, const MemOperand& opnd) {
- rxy_form(CY, r, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Compare Register-Storage (64)
-void Assembler::cg(Register r, const MemOperand& opnd) {
- rxy_form(CG, r, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Compare Halfword Register-Storage (32)
-void Assembler::ch(Register r, const MemOperand& opnd) {
- rx_form(CH, r, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Compare Halfword Register-Storage (32)
-void Assembler::chy(Register r, const MemOperand& opnd) {
- rxy_form(CHY, r, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
// Compare Halfword Immediate (32)
void Assembler::chi(Register r, const Operand& opnd) { ri_form(CHI, r, opnd); }
@@ -1569,21 +1345,6 @@ void Assembler::cghi(Register r, const Operand& opnd) {
// ----------------------------
// Compare Logical Instructions
// ----------------------------
-// Compare Logical Register-Storage (32)
-void Assembler::cl(Register r, const MemOperand& opnd) {
- rx_form(CL, r, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Compare Logical Register-Storage (32)
-void Assembler::cly(Register r, const MemOperand& opnd) {
- rxy_form(CLY, r, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Compare Logical Register-Storage (64)
-void Assembler::clg(Register r, const MemOperand& opnd) {
- rxy_form(CLG, r, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
// Compare Immediate (Mem - Imm) (8)
void Assembler::cli(const MemOperand& opnd, const Operand& imm) {
si_form(CLI, imm, opnd.rb(), opnd.offset());
@@ -1654,26 +1415,6 @@ void Assembler::mvc(const MemOperand& opnd1, const MemOperand& opnd2,
// -----------------------
// 32-bit Add Instructions
// -----------------------
-// Add Register-Storage (32)
-void Assembler::a(Register r1, const MemOperand& opnd) {
- rx_form(A, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Add Register-Storage (32)
-void Assembler::ay(Register r1, const MemOperand& opnd) {
- rxy_form(AY, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Add Halfword Register-Storage (32)
-void Assembler::ah(Register r1, const MemOperand& opnd) {
- rx_form(AH, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Add Halfword Register-Storage (32)
-void Assembler::ahy(Register r1, const MemOperand& opnd) {
- rxy_form(AHY, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
// Add Halfword Immediate (32)
void Assembler::ahi(Register r1, const Operand& i2) { ri_form(AHI, r1, i2); }
@@ -1697,16 +1438,6 @@ void Assembler::asi(const MemOperand& opnd, const Operand& imm) {
// -----------------------
// 64-bit Add Instructions
// -----------------------
-// Add Register-Storage (64)
-void Assembler::ag(Register r1, const MemOperand& opnd) {
- rxy_form(AG, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Add Register-Storage (64<-32)
-void Assembler::agf(Register r1, const MemOperand& opnd) {
- rxy_form(AGF, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
// Add Halfword Immediate (64)
void Assembler::aghi(Register r1, const Operand& i2) { ri_form(AGHI, r1, i2); }
@@ -1730,16 +1461,6 @@ void Assembler::agsi(const MemOperand& opnd, const Operand& imm) {
// -------------------------------
// 32-bit Add Logical Instructions
// -------------------------------
-// Add Logical Register-Storage (32)
-void Assembler::al_z(Register r1, const MemOperand& opnd) {
- rx_form(AL, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Add Logical Register-Storage (32)
-void Assembler::aly(Register r1, const MemOperand& opnd) {
- rxy_form(ALY, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
// Add Logical Register-Register-Register (32)
void Assembler::alrk(Register r1, Register r2, Register r3) {
rrf1_form(ALRK, r1, r2, r3);
@@ -1748,11 +1469,6 @@ void Assembler::alrk(Register r1, Register r2, Register r3) {
// -------------------------------
// 64-bit Add Logical Instructions
// -------------------------------
-// Add Logical Register-Storage (64)
-void Assembler::alg(Register r1, const MemOperand& opnd) {
- rxy_form(ALG, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
// Add Logical Register-Register-Register (64)
void Assembler::algrk(Register r1, Register r2, Register r3) {
rrf1_form(ALGRK, r1, r2, r3);
@@ -1761,26 +1477,6 @@ void Assembler::algrk(Register r1, Register r2, Register r3) {
// ----------------------------
// 32-bit Subtract Instructions
// ----------------------------
-// Subtract Register-Storage (32)
-void Assembler::s(Register r1, const MemOperand& opnd) {
- rx_form(S, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Subtract Register-Storage (32)
-void Assembler::sy(Register r1, const MemOperand& opnd) {
- rxy_form(SY, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Subtract Halfword Register-Storage (32)
-void Assembler::sh(Register r1, const MemOperand& opnd) {
- rx_form(SH, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Subtract Halfword Register-Storage (32)
-void Assembler::shy(Register r1, const MemOperand& opnd) {
- rxy_form(SHY, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
// Subtract Register-Register-Register (32)
void Assembler::srk(Register r1, Register r2, Register r3) {
rrf1_form(SRK, r1, r2, r3);
@@ -1789,16 +1485,6 @@ void Assembler::srk(Register r1, Register r2, Register r3) {
// ----------------------------
// 64-bit Subtract Instructions
// ----------------------------
-// Subtract Register-Storage (64)
-void Assembler::sg(Register r1, const MemOperand& opnd) {
- rxy_form(SG, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Subtract Register-Storage (64<-32)
-void Assembler::sgf(Register r1, const MemOperand& opnd) {
- rxy_form(SGF, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
// Subtract Register-Register-Register (64)
void Assembler::sgrk(Register r1, Register r2, Register r3) {
rrf1_form(SGRK, r1, r2, r3);
@@ -1807,16 +1493,6 @@ void Assembler::sgrk(Register r1, Register r2, Register r3) {
// ------------------------------------
// 32-bit Subtract Logical Instructions
// ------------------------------------
-// Subtract Logical Register-Storage (32)
-void Assembler::sl(Register r1, const MemOperand& opnd) {
- rx_form(SL, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Subtract Logical Register-Storage (32)
-void Assembler::sly(Register r1, const MemOperand& opnd) {
- rxy_form(SLY, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
// Subtract Logical Register-Register-Register (32)
void Assembler::slrk(Register r1, Register r2, Register r3) {
rrf1_form(SLRK, r1, r2, r3);
@@ -1825,11 +1501,6 @@ void Assembler::slrk(Register r1, Register r2, Register r3) {
// ------------------------------------
// 64-bit Subtract Logical Instructions
// ------------------------------------
-// Subtract Logical Register-Storage (64)
-void Assembler::slg(Register r1, const MemOperand& opnd) {
- rxy_form(SLG, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
// Subtract Logical Register-Register-Register (64)
void Assembler::slgrk(Register r1, Register r2, Register r3) {
rrf1_form(SLGRK, r1, r2, r3);
@@ -1838,42 +1509,6 @@ void Assembler::slgrk(Register r1, Register r2, Register r3) {
// ----------------------------
// 32-bit Multiply Instructions
// ----------------------------
-// Multiply Register-Storage (64<32)
-void Assembler::m(Register r1, const MemOperand& opnd) {
- DCHECK(r1.code() % 2 == 0);
- rx_form(M, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-void Assembler::mfy(Register r1, const MemOperand& opnd) {
- DCHECK(r1.code() % 2 == 0);
- rxy_form(MFY, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Multiply Logical Register-Storage (64<32)
-void Assembler::ml(Register r1, const MemOperand& opnd) {
- rxy_form(ML, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Multiply Single Register-Storage (32)
-void Assembler::ms(Register r1, const MemOperand& opnd) {
- rx_form(MS, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Multiply Single Register-Storage (32)
-void Assembler::msy(Register r1, const MemOperand& opnd) {
- rxy_form(MSY, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Multiply Halfword Register-Storage (32)
-void Assembler::mh(Register r1, const MemOperand& opnd) {
- rx_form(MH, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Multiply Halfword Register-Storage (32)
-void Assembler::mhy(Register r1, const MemOperand& opnd) {
- rxy_form(MHY, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
// Multiply Halfword Immediate (32)
void Assembler::mhi(Register r1, const Operand& opnd) {
ri_form(MHI, r1, opnd);
@@ -1892,107 +1527,39 @@ void Assembler::msgrkc(Register r1, Register r2, Register r3) {
// ----------------------------
// 64-bit Multiply Instructions
// ----------------------------
-// Multiply Logical Register-Storage (128<64)
-void Assembler::mlg(Register r1, const MemOperand& opnd) {
- rxy_form(MLG, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
// Multiply Halfword Immediate (64)
void Assembler::mghi(Register r1, const Operand& opnd) {
ri_form(MGHI, r1, opnd);
}
-// Multiply Single Register-Storage (64)
-void Assembler::msg(Register r1, const MemOperand& opnd) {
- rxy_form(MSG, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// --------------------------
-// 32-bit Divide Instructions
-// --------------------------
-// Divide Register-Storage (32<-64)
-void Assembler::d(Register r1, const MemOperand& opnd) {
- rx_form(D, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Divide Logical Register-Storage (32<-64)
-void Assembler::dl(Register r1, const MemOperand& opnd) {
- rx_form(DL, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
// --------------------
// Bitwise Instructions
// --------------------
-// AND Register-Storage (32)
-void Assembler::n(Register r1, const MemOperand& opnd) {
- rx_form(N, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// AND Register-Storage (32)
-void Assembler::ny(Register r1, const MemOperand& opnd) {
- rxy_form(NY, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
// AND Register-Register-Register (32)
void Assembler::nrk(Register r1, Register r2, Register r3) {
rrf1_form(NRK, r1, r2, r3);
}
-// AND Register-Storage (64)
-void Assembler::ng(Register r1, const MemOperand& opnd) {
- rxy_form(NG, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
// AND Register-Register-Register (64)
void Assembler::ngrk(Register r1, Register r2, Register r3) {
rrf1_form(NGRK, r1, r2, r3);
}
-// OR Register-Storage (32)
-void Assembler::o(Register r1, const MemOperand& opnd) {
- rx_form(O, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// OR Register-Storage (32)
-void Assembler::oy(Register r1, const MemOperand& opnd) {
- rxy_form(OY, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
// OR Register-Register-Register (32)
void Assembler::ork(Register r1, Register r2, Register r3) {
rrf1_form(ORK, r1, r2, r3);
}
-// OR Register-Storage (64)
-void Assembler::og(Register r1, const MemOperand& opnd) {
- rxy_form(OG, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
// OR Register-Register-Register (64)
void Assembler::ogrk(Register r1, Register r2, Register r3) {
rrf1_form(OGRK, r1, r2, r3);
}
-// XOR Register-Storage (32)
-void Assembler::x(Register r1, const MemOperand& opnd) {
- rx_form(X, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// XOR Register-Storage (32)
-void Assembler::xy(Register r1, const MemOperand& opnd) {
- rxy_form(XY, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
// XOR Register-Register-Register (32)
void Assembler::xrk(Register r1, Register r2, Register r3) {
rrf1_form(XRK, r1, r2, r3);
}
-// XOR Register-Storage (64)
-void Assembler::xg(Register r1, const MemOperand& opnd) {
- rxy_form(XG, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
// XOR Register-Register-Register (64)
void Assembler::xgrk(Register r1, Register r2, Register r3) {
rrf1_form(XGRK, r1, r2, r3);
@@ -2222,36 +1789,6 @@ void Assembler::jump(Handle<Code> target, RelocInfo::Mode rmode,
brcl(cond, Operand(target_index));
}
-// Store (32)
-void Assembler::st(Register src, const MemOperand& dst) {
- rx_form(ST, src, dst.rx(), dst.rb(), dst.offset());
-}
-
-// Store (32)
-void Assembler::sty(Register src, const MemOperand& dst) {
- rxy_form(STY, src, dst.rx(), dst.rb(), dst.offset());
-}
-
-// Store Halfword
-void Assembler::sth(Register src, const MemOperand& dst) {
- rx_form(STH, src, dst.rx(), dst.rb(), dst.offset());
-}
-
-// Store Halfword
-void Assembler::sthy(Register src, const MemOperand& dst) {
- rxy_form(STHY, src, dst.rx(), dst.rb(), dst.offset());
-}
-
-// Store Character
-void Assembler::stc(Register src, const MemOperand& dst) {
- rx_form(STC, src, dst.rx(), dst.rb(), dst.offset());
-}
-
-// Store Character
-void Assembler::stcy(Register src, const MemOperand& dst) {
- rxy_form(STCY, src, dst.rx(), dst.rb(), dst.offset());
-}
-
// 32-bit Load Multiple - short displacement (12-bits unsigned)
void Assembler::lm(Register r1, Register r2, const MemOperand& src) {
rs_form(LM, r1, r2, src.rb(), src.offset());
@@ -2277,22 +1814,6 @@ void Assembler::mvghi(const MemOperand& opnd1, const Operand& i2) {
sil_form(MVGHI, opnd1.getBaseRegister(), opnd1.getDisplacement(), i2);
}
-// Store Register (64)
-void Assembler::stg(Register src, const MemOperand& dst) {
- DCHECK(!(dst.rb().code() == 15 && dst.offset() < 0));
- rxy_form(STG, src, dst.rx(), dst.rb(), dst.offset());
-}
-
-// Insert Character
-void Assembler::ic_z(Register r1, const MemOperand& opnd) {
- rx_form(IC_z, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Insert Character
-void Assembler::icy(Register r1, const MemOperand& opnd) {
- rxy_form(ICY, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
// Insert Immediate (high high)
void Assembler::iihh(Register r1, const Operand& opnd) {
ri_form(IIHH, r1, opnd);
@@ -2323,12 +1844,6 @@ void Assembler::adb(DoubleRegister r1, const MemOperand& opnd) {
opnd.offset());
}
-// Compare Register-Storage (LB)
-void Assembler::cdb(DoubleRegister r1, const MemOperand& opnd) {
- rx_form(CD, Register::from_code(r1.code()), opnd.rx(), opnd.rb(),
- opnd.offset());
-}
-
// Divide Register-Storage (LB)
void Assembler::ddb(DoubleRegister r1, const MemOperand& opnd) {
rxe_form(DDB, Register::from_code(r1.code()), opnd.rx(), opnd.rb(),
@@ -2347,56 +1862,20 @@ void Assembler::sdb(DoubleRegister r1, const MemOperand& opnd) {
opnd.offset());
}
-// Square Root (LB)
-void Assembler::sqdb(DoubleRegister r1, const MemOperand& opnd) {
- rxe_form(SQDB, Register::from_code(r1.code()), opnd.rx(), opnd.rb(),
+void Assembler::ceb(DoubleRegister r1, const MemOperand& opnd) {
+ rxe_form(CEB, Register::from_code(r1.code()), opnd.rx(), opnd.rb(),
opnd.offset());
}
-// Store Double (64)
-void Assembler::std(DoubleRegister r1, const MemOperand& opnd) {
- rx_form(STD, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Store Double (64)
-void Assembler::stdy(DoubleRegister r1, const MemOperand& opnd) {
- DCHECK(!(opnd.rb().code() == 15 && opnd.offset() < 0));
- rxy_form(STDY, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Store Float (32)
-void Assembler::ste(DoubleRegister r1, const MemOperand& opnd) {
- rx_form(STE, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Store Float (32)
-void Assembler::stey(DoubleRegister r1, const MemOperand& opnd) {
- DCHECK(!(opnd.rb().code() == 15 && opnd.offset() < 0));
- rxy_form(STEY, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Load Double (64)
-void Assembler::ld(DoubleRegister r1, const MemOperand& opnd) {
- DCHECK(is_uint12(opnd.offset()));
- rx_form(LD, r1, opnd.rx(), opnd.rb(), opnd.offset() & 0xfff);
-}
-
-// Load Double (64)
-void Assembler::ldy(DoubleRegister r1, const MemOperand& opnd) {
- DCHECK(is_int20(opnd.offset()));
- rxy_form(LDY, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Load Float (32)
-void Assembler::le_z(DoubleRegister r1, const MemOperand& opnd) {
- DCHECK(is_uint12(opnd.offset()));
- rx_form(LE, r1, opnd.rx(), opnd.rb(), opnd.offset() & 0xfff);
+void Assembler::cdb(DoubleRegister r1, const MemOperand& opnd) {
+ rxe_form(CDB, Register::from_code(r1.code()), opnd.rx(), opnd.rb(),
+ opnd.offset());
}
-// Load Float (32)
-void Assembler::ley(DoubleRegister r1, const MemOperand& opnd) {
- DCHECK(is_int20(opnd.offset()));
- rxy_form(LEY, r1, opnd.rx(), opnd.rb(), opnd.offset());
+// Square Root (LB)
+void Assembler::sqdb(DoubleRegister r1, const MemOperand& opnd) {
+ rxe_form(SQDB, Register::from_code(r1.code()), opnd.rx(), opnd.rb(),
+ opnd.offset());
}
// Convert to Fixed point (64<-S)
diff --git a/deps/v8/src/s390/assembler-s390.h b/deps/v8/src/s390/assembler-s390.h
index 89a3182f1a..24146dfd05 100644
--- a/deps/v8/src/s390/assembler-s390.h
+++ b/deps/v8/src/s390/assembler-s390.h
@@ -187,6 +187,7 @@ const Register kRootRegister = r10; // Roots array pointer.
const Register cp = r13; // JavaScript context pointer.
static const bool kSimpleFPAliasing = true;
+static const bool kSimdMaskRegisters = false;
// Double word FP register.
struct DoubleRegister {
@@ -447,17 +448,10 @@ class Assembler : public AssemblerBase {
INLINE(static void set_target_address_at(
Isolate* isolate, Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
- INLINE(static Address target_address_at(Address pc, Code* code)) {
- Address constant_pool = NULL;
- return target_address_at(pc, constant_pool);
- }
+ INLINE(static Address target_address_at(Address pc, Code* code));
INLINE(static void set_target_address_at(
Isolate* isolate, Address pc, Code* code, Address target,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
- Address constant_pool = NULL;
- set_target_address_at(isolate, pc, constant_pool, target,
- icache_flush_mode);
- }
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
@@ -702,6 +696,70 @@ class Assembler : public AssemblerBase {
getfield<uint32_t, 4, 24, 28>(f3) | getfield<uint32_t, 4, 28, 32>(f4));
}
+#define DECLARE_S390_RX_INSTRUCTIONS(name, op_name, op_value) \
+ template <class R1> \
+ inline void name(R1 r1, Register x2, Register b2, Disp d2) { \
+ rx_format(op_name, r1.code(), x2.code(), b2.code(), d2); \
+ } \
+ template <class R1> \
+ inline void name(R1 r1, const MemOperand& opnd) { \
+ name(r1, opnd.getIndexRegister(), \
+ opnd.getBaseRegister(), opnd.getDisplacement()); \
+ }
+
+ inline void rx_format(Opcode opcode, int f1, int f2, int f3, int f4) {
+ DCHECK(is_uint8(opcode));
+ DCHECK(is_uint12(f4));
+ emit4bytes(getfield<uint32_t, 4, 0, 8>(opcode) |
+ getfield<uint32_t, 4, 8, 12>(f1) |
+ getfield<uint32_t, 4, 12, 16>(f2) |
+ getfield<uint32_t, 4, 16, 20>(f3) |
+ getfield<uint32_t, 4, 20, 32>(f4));
+ }
+ S390_RX_A_OPCODE_LIST(DECLARE_S390_RX_INSTRUCTIONS)
+
+ void bc(Condition cond, const MemOperand& opnd) {
+ bc(cond, opnd.getIndexRegister(),
+ opnd.getBaseRegister(), opnd.getDisplacement());
+ }
+ void bc(Condition cond, Register x2, Register b2, Disp d2) {
+ rx_format(BC, cond, x2.code(), b2.code(), d2);
+ }
+#undef DECLARE_S390_RX_INSTRUCTIONS
+
+#define DECLARE_S390_RXY_INSTRUCTIONS(name, op_name, op_value) \
+ template <class R1, class R2> \
+ inline void name(R1 r1, R2 r2, Register b2, Disp d2) { \
+ rxy_format(op_name, r1.code(), r2.code(), b2.code(), d2); \
+ } \
+ template <class R1> \
+ inline void name(R1 r1, const MemOperand& opnd) { \
+ name(r1, opnd.getIndexRegister(), \
+ opnd.getBaseRegister(), opnd.getDisplacement()); \
+ }
+
+ inline void rxy_format(Opcode opcode, int f1, int f2, int f3, int f4) {
+ DCHECK(is_uint16(opcode));
+ DCHECK(is_int20(f4));
+ emit6bytes(getfield<uint64_t, 6, 0, 8>(opcode >> 8) |
+ getfield<uint64_t, 6, 8, 12>(f1) |
+ getfield<uint64_t, 6, 12, 16>(f2) |
+ getfield<uint64_t, 6, 16, 20>(f3) |
+ getfield<uint64_t, 6, 20, 32>(f4 & 0x0fff) |
+ getfield<uint64_t, 6, 32, 40>(f4 >> 12) |
+ getfield<uint64_t, 6, 40, 48>(opcode & 0x00ff));
+ }
+ S390_RXY_A_OPCODE_LIST(DECLARE_S390_RXY_INSTRUCTIONS)
+
+ void pfd(Condition cond, const MemOperand& opnd) {
+ pfd(cond, opnd.getIndexRegister(),
+ opnd.getBaseRegister(), opnd.getDisplacement());
+ }
+ void pfd(Condition cond, Register x2, Register b2, Disp d2) {
+ rxy_format(PFD, cond, x2.code(), b2.code(), d2);
+ }
+#undef DECLARE_S390_RXY_INSTRUCTIONS
+
// Helper for unconditional branch to Label with update to save register
void b(Register r, Label* l) {
int32_t halfwords = branch_offset(l) / 2;
@@ -790,10 +848,6 @@ class Assembler : public AssemblerBase {
#define RR2_FORM(name) void name(Condition m1, Register r2)
-#define RX_FORM(name) \
- void name(Register r1, Register x2, Register b2, Disp d2); \
- void name(Register r1, const MemOperand& opnd)
-
#define RI1_FORM(name) void name(Register r, const Operand& i)
#define RI2_FORM(name) void name(Condition m, const Operand& i)
@@ -812,10 +866,6 @@ class Assembler : public AssemblerBase {
void name(Register r1, Register r3, const MemOperand& opnd); \
void name(Register r1, Register r3, Register b2, Register x2, Disp d2)
-#define RXY_FORM(name) \
- void name(Register r1, Register x2, Register b2, Disp d2); \
- void name(Register r1, const MemOperand& opnd)
-
#define RSI_FORM(name) void name(Register r1, Register r3, const Operand& i)
#define RIS_FORM(name) \
@@ -957,26 +1007,14 @@ class Assembler : public AssemblerBase {
}
// S390 instruction sets
- RX_FORM(bc);
- RX_FORM(cd);
- RXE_FORM(cdb);
- RXE_FORM(ceb);
RXE_FORM(ddb);
SS1_FORM(ed);
- RX_FORM(ex);
RRF2_FORM(fidbr);
- RX_FORM(ic_z);
- RXY_FORM(icy);
RI1_FORM(iihh);
RI1_FORM(iihl);
RI1_FORM(iilh);
RI1_FORM(iill);
- RX_FORM(le_z);
- RXY_FORM(ley);
RSY1_FORM(loc);
- RXY_FORM(lrv);
- RXY_FORM(lrvh);
- RXY_FORM(lrvg);
RXE_FORM(mdb);
SS4_FORM(mvck);
SSF_FORM(mvcos);
@@ -987,48 +1025,19 @@ class Assembler : public AssemblerBase {
RI1_FORM(nilh);
RI1_FORM(nill);
RI1_FORM(oill);
- RXY_FORM(pfd);
RXE_FORM(sdb);
- RXY_FORM(slgf);
RS1_FORM(srdl);
- RX_FORM(ste);
- RXY_FORM(stey);
- RXY_FORM(strv);
- RXY_FORM(strvh);
- RXY_FORM(strvg);
RI1_FORM(tmll);
SS1_FORM(tr);
S_FORM(ts);
// Load Address Instructions
- void la(Register r, const MemOperand& opnd);
- void lay(Register r, const MemOperand& opnd);
void larl(Register r, Label* l);
// Load Instructions
- void lb(Register r, const MemOperand& src);
- void lgb(Register r, const MemOperand& src);
- void lh(Register r, const MemOperand& src);
- void lhy(Register r, const MemOperand& src);
- void lgh(Register r, const MemOperand& src);
- void l(Register r, const MemOperand& src);
- void ly(Register r, const MemOperand& src);
- void lg(Register r, const MemOperand& src);
- void lgf(Register r, const MemOperand& src);
void lhi(Register r, const Operand& imm);
void lghi(Register r, const Operand& imm);
- // Load And Test Instructions
- void lt_z(Register r, const MemOperand& src);
- void ltg(Register r, const MemOperand& src);
-
- // Load Logical Instructions
- void llc(Register r, const MemOperand& src);
- void llgc(Register r, const MemOperand& src);
- void llgf(Register r, const MemOperand& src);
- void llh(Register r, const MemOperand& src);
- void llgh(Register r, const MemOperand& src);
-
// Load Multiple Instructions
void lm(Register r1, Register r2, const MemOperand& src);
void lmy(Register r1, Register r2, const MemOperand& src);
@@ -1041,13 +1050,6 @@ class Assembler : public AssemblerBase {
void locg(Condition m3, Register r1, const MemOperand& src);
// Store Instructions
- void st(Register r, const MemOperand& src);
- void stc(Register r, const MemOperand& src);
- void stcy(Register r, const MemOperand& src);
- void stg(Register r, const MemOperand& src);
- void sth(Register r, const MemOperand& src);
- void sthy(Register r, const MemOperand& src);
- void sty(Register r, const MemOperand& src);
// Store Multiple Instructions
void stm(Register r1, Register r2, const MemOperand& src);
@@ -1055,18 +1057,10 @@ class Assembler : public AssemblerBase {
void stmg(Register r1, Register r2, const MemOperand& src);
// Compare Instructions
- void c(Register r, const MemOperand& opnd);
- void cy(Register r, const MemOperand& opnd);
- void cg(Register r, const MemOperand& opnd);
- void ch(Register r, const MemOperand& opnd);
- void chy(Register r, const MemOperand& opnd);
void chi(Register r, const Operand& opnd);
void cghi(Register r, const Operand& opnd);
// Compare Logical Instructions
- void cl(Register r, const MemOperand& opnd);
- void cly(Register r, const MemOperand& opnd);
- void clg(Register r, const MemOperand& opnd);
void cli(const MemOperand& mem, const Operand& imm);
void cliy(const MemOperand& mem, const Operand& imm);
void clc(const MemOperand& opnd1, const MemOperand& opnd2, Length length);
@@ -1128,110 +1122,58 @@ class Assembler : public AssemblerBase {
void mvc(const MemOperand& opnd1, const MemOperand& opnd2, uint32_t length);
// Branch Instructions
- void bct(Register r, const MemOperand& opnd);
- void bctg(Register r, const MemOperand& opnd);
void bras(Register r, const Operand& opnd);
void brc(Condition c, const Operand& opnd);
void brct(Register r1, const Operand& opnd);
void brctg(Register r1, const Operand& opnd);
// 32-bit Add Instructions
- void a(Register r1, const MemOperand& opnd);
- void ay(Register r1, const MemOperand& opnd);
- void ah(Register r1, const MemOperand& opnd);
- void ahy(Register r1, const MemOperand& opnd);
void ahi(Register r1, const Operand& opnd);
void ahik(Register r1, Register r3, const Operand& opnd);
void ark(Register r1, Register r2, Register r3);
void asi(const MemOperand&, const Operand&);
// 64-bit Add Instructions
- void ag(Register r1, const MemOperand& opnd);
- void agf(Register r1, const MemOperand& opnd);
void aghi(Register r1, const Operand& opnd);
void aghik(Register r1, Register r3, const Operand& opnd);
void agrk(Register r1, Register r2, Register r3);
void agsi(const MemOperand&, const Operand&);
// 32-bit Add Logical Instructions
- void al_z(Register r1, const MemOperand& opnd);
- void aly(Register r1, const MemOperand& opnd);
void alrk(Register r1, Register r2, Register r3);
// 64-bit Add Logical Instructions
- void alg(Register r1, const MemOperand& opnd);
void algrk(Register r1, Register r2, Register r3);
// 32-bit Subtract Instructions
- void s(Register r1, const MemOperand& opnd);
- void sy(Register r1, const MemOperand& opnd);
- void sh(Register r1, const MemOperand& opnd);
- void shy(Register r1, const MemOperand& opnd);
void srk(Register r1, Register r2, Register r3);
// 64-bit Subtract Instructions
- void sg(Register r1, const MemOperand& opnd);
- void sgf(Register r1, const MemOperand& opnd);
void sgrk(Register r1, Register r2, Register r3);
// 32-bit Subtract Logical Instructions
- void sl(Register r1, const MemOperand& opnd);
- void sly(Register r1, const MemOperand& opnd);
void slrk(Register r1, Register r2, Register r3);
// 64-bit Subtract Logical Instructions
- void slg(Register r1, const MemOperand& opnd);
void slgrk(Register r1, Register r2, Register r3);
// 32-bit Multiply Instructions
- void m(Register r1, const MemOperand& opnd);
- void mfy(Register r1, const MemOperand& opnd);
- void ml(Register r1, const MemOperand& opnd);
- void ms(Register r1, const MemOperand& opnd);
- void msy(Register r1, const MemOperand& opnd);
- void mh(Register r1, const MemOperand& opnd);
- void mhy(Register r1, const MemOperand& opnd);
void mhi(Register r1, const Operand& opnd);
void msrkc(Register r1, Register r2, Register r3);
void msgrkc(Register r1, Register r2, Register r3);
// 64-bit Multiply Instructions
- void mlg(Register r1, const MemOperand& opnd);
void mghi(Register r1, const Operand& opnd);
- void msg(Register r1, const MemOperand& opnd);
-
- // 32-bit Divide Instructions
- void d(Register r1, const MemOperand& opnd);
- void dl(Register r1, const MemOperand& opnd);
// Bitwise Instructions (AND / OR / XOR)
- void n(Register r1, const MemOperand& opnd);
- void ny(Register r1, const MemOperand& opnd);
void nrk(Register r1, Register r2, Register r3);
- void ng(Register r1, const MemOperand& opnd);
void ngrk(Register r1, Register r2, Register r3);
- void o(Register r1, const MemOperand& opnd);
- void oy(Register r1, const MemOperand& opnd);
void ork(Register r1, Register r2, Register r3);
- void og(Register r1, const MemOperand& opnd);
void ogrk(Register r1, Register r2, Register r3);
- void x(Register r1, const MemOperand& opnd);
- void xy(Register r1, const MemOperand& opnd);
void xrk(Register r1, Register r2, Register r3);
- void xg(Register r1, const MemOperand& opnd);
void xgrk(Register r1, Register r2, Register r3);
void xc(const MemOperand& opnd1, const MemOperand& opnd2, Length length);
- // Floating Point Load / Store Instructions
- void ld(DoubleRegister r1, const MemOperand& opnd);
- void ldy(DoubleRegister r1, const MemOperand& opnd);
- void le_z(DoubleRegister r1, const MemOperand& opnd);
- void ley(DoubleRegister r1, const MemOperand& opnd);
- void std(DoubleRegister r1, const MemOperand& opnd);
- void stdy(DoubleRegister r1, const MemOperand& opnd);
- void ste(DoubleRegister r1, const MemOperand& opnd);
- void stey(DoubleRegister r1, const MemOperand& opnd);
-
// Floating <-> Fixed Point Conversion Instructions
void cdlfbr(Condition m3, Condition m4, DoubleRegister fltReg,
Register fixReg);
@@ -1257,6 +1199,7 @@ class Assembler : public AssemblerBase {
// Floating Point Compare Instructions
void cdb(DoubleRegister r1, const MemOperand& opnd);
+ void ceb(DoubleRegister r1, const MemOperand& opnd);
// Floating Point Arithmetic Instructions
void adb(DoubleRegister r1, const MemOperand& opnd);
@@ -1448,11 +1391,6 @@ class Assembler : public AssemblerBase {
inline void rr2_form(uint8_t op, Condition m1, Register r2);
- inline void rx_form(Opcode op, Register r1, Register x2, Register b2,
- Disp d2);
- inline void rx_form(Opcode op, DoubleRegister r1, Register x2, Register b2,
- Disp d2);
-
inline void ri_form(Opcode op, Register r1, const Operand& i2);
inline void ri_form(Opcode op, Condition m1, const Operand& i2);
@@ -1492,13 +1430,6 @@ class Assembler : public AssemblerBase {
inline void rxf_form(Opcode op, Register r1, Register r3, Register b2,
Register x2, Disp d2);
- inline void rxy_form(Opcode op, Register r1, Register x2, Register b2,
- Disp d2);
- inline void rxy_form(Opcode op, Register r1, Condition m3, Register b2,
- Disp d2);
- inline void rxy_form(Opcode op, DoubleRegister r1, Register x2, Register b2,
- Disp d2);
-
inline void s_form(Opcode op, Register b1, Disp d2);
inline void si_form(Opcode op, const Operand& i2, Register b1, Disp d1);
diff --git a/deps/v8/src/s390/code-stubs-s390.cc b/deps/v8/src/s390/code-stubs-s390.cc
index 851f34552c..52a8db1229 100644
--- a/deps/v8/src/s390/code-stubs-s390.cc
+++ b/deps/v8/src/s390/code-stubs-s390.cc
@@ -206,9 +206,6 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
// Call runtime on identical symbols since we need to throw a TypeError.
__ CmpP(r6, Operand(SYMBOL_TYPE));
__ beq(slow);
- // Call runtime on identical SIMD values since we must throw a TypeError.
- __ CmpP(r6, Operand(SIMD128_VALUE_TYPE));
- __ beq(slow);
} else {
__ CompareObjectType(r2, r6, r6, HEAP_NUMBER_TYPE);
__ beq(&heap_number);
@@ -219,9 +216,6 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
// Call runtime on identical symbols since we need to throw a TypeError.
__ CmpP(r6, Operand(SYMBOL_TYPE));
__ beq(slow);
- // Call runtime on identical SIMD values since we must throw a TypeError.
- __ CmpP(r6, Operand(SIMD128_VALUE_TYPE));
- __ beq(slow);
// Normally here we fall through to return_equal, but undefined is
// special: (undefined == undefined) == true, but
// (undefined <= undefined) == false! See ECMAScript 11.8.5.
@@ -1086,9 +1080,9 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Push a bad frame pointer to fail if it is used.
__ LoadImmP(r10, Operand(-1));
- int marker = type();
- __ LoadSmiLiteral(r9, Smi::FromInt(marker));
- __ LoadSmiLiteral(r8, Smi::FromInt(marker));
+ StackFrame::Type marker = type();
+ __ Load(r9, Operand(StackFrame::TypeToMarker(marker)));
+ __ Load(r8, Operand(StackFrame::TypeToMarker(marker)));
// Save copies of the top frame descriptor on the stack.
__ mov(r7, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
__ LoadP(r7, MemOperand(r7));
@@ -1106,11 +1100,11 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ LoadAndTestP(r8, MemOperand(r7));
__ bne(&non_outermost_js, Label::kNear);
__ StoreP(fp, MemOperand(r7));
- __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ __ Load(ip, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
Label cont;
__ b(&cont, Label::kNear);
__ bind(&non_outermost_js);
- __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME));
+ __ Load(ip, Operand(StackFrame::INNER_JSENTRY_FRAME));
__ bind(&cont);
__ StoreP(ip, MemOperand(sp)); // frame-type
@@ -1177,7 +1171,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Check if the current stack frame is marked as the outermost JS frame.
Label non_outermost_js_2;
__ pop(r7);
- __ CmpSmiLiteral(r7, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME), r0);
+ __ CmpP(r7, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ bne(&non_outermost_js_2, Label::kNear);
__ mov(r8, Operand::Zero());
__ mov(r7, Operand(ExternalReference(js_entry_sp)));
@@ -1219,52 +1213,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ b(r14);
}
-void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
- Label miss;
- Register receiver = LoadDescriptor::ReceiverRegister();
- // Ensure that the vector and slot registers won't be clobbered before
- // calling the miss handler.
- DCHECK(!AreAliased(r6, r7, LoadWithVectorDescriptor::VectorRegister(),
- LoadWithVectorDescriptor::SlotRegister()));
-
- NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r6,
- r7, &miss);
- __ bind(&miss);
- PropertyAccessCompiler::TailCallBuiltin(
- masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
-}
-
-void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
- // Return address is in lr.
- Label miss;
-
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register index = LoadDescriptor::NameRegister();
- Register scratch = r7;
- Register result = r2;
- DCHECK(!scratch.is(receiver) && !scratch.is(index));
- DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
- result.is(LoadWithVectorDescriptor::SlotRegister()));
-
- // StringCharAtGenerator doesn't use the result register until it's passed
- // the different miss possibilities. If it did, we would have a conflict
- // when FLAG_vector_ics is true.
- StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
- &miss, // When not a string.
- &miss, // When not a number.
- &miss, // When index out of range.
- RECEIVER_IS_STRING);
- char_at_generator.GenerateFast(masm);
- __ Ret();
-
- StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
-
- __ bind(&miss);
- PropertyAccessCompiler::TailCallBuiltin(
- masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
-}
-
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
@@ -1370,7 +1318,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// (6) External string. Make it, offset-wise, look like a sequential string.
// Go to (4).
// (7) Short external string or not a string? If yes, bail out to runtime.
- // (8) Sliced string. Replace subject with parent. Go to (1).
+ // (8) Sliced or thin string. Replace subject with parent. Go to (1).
Label seq_string /* 4 */, external_string /* 6 */, check_underlying /* 1 */,
not_seq_nor_cons /* 5 */, not_long_external /* 7 */;
@@ -1382,7 +1330,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// (1) Sequential string? If yes, go to (4).
STATIC_ASSERT((kIsNotStringMask | kStringRepresentationMask |
- kShortExternalStringMask) == 0x93);
+ kShortExternalStringMask) == 0xa7);
__ mov(r3, Operand(kIsNotStringMask | kStringRepresentationMask |
kShortExternalStringMask));
__ AndP(r3, r2);
@@ -1392,6 +1340,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// (2) Sequential or cons? If not, go to (5).
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+ STATIC_ASSERT(kThinStringTag > kExternalStringTag);
STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
STATIC_ASSERT(kExternalStringTag < 0xffffu);
@@ -1420,9 +1369,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ ble(&runtime);
__ SmiUntag(r3);
- STATIC_ASSERT(4 == kOneByteStringTag);
+ STATIC_ASSERT(8 == kOneByteStringTag);
STATIC_ASSERT(kTwoByteStringTag == 0);
- STATIC_ASSERT(kStringEncodingMask == 4);
+ STATIC_ASSERT(kStringEncodingMask == 8);
__ ExtractBitMask(r5, r2, kStringEncodingMask, SetRC);
__ beq(&encoding_type_UC16, Label::kNear);
__ LoadP(code,
@@ -1679,12 +1628,19 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ AndP(r0, r3);
__ bne(&runtime);
- // (8) Sliced string. Replace subject with parent. Go to (4).
+ // (8) Sliced or thin string. Replace subject with parent. Go to (4).
+ Label thin_string;
+ __ CmpP(r3, Operand(kThinStringTag));
+ __ beq(&thin_string);
// Load offset into ip and replace subject string with parent.
__ LoadP(ip, FieldMemOperand(subject, SlicedString::kOffsetOffset));
__ SmiUntag(ip);
__ LoadP(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
__ b(&check_underlying); // Go to (4).
+
+ __ bind(&thin_string);
+ __ LoadP(subject, FieldMemOperand(subject, ThinString::kActualOffset));
+ __ b(&check_underlying); // Go to (4).
#endif // V8_INTERPRETED_REGEXP
}
@@ -1852,188 +1808,6 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
-// Note: feedback_vector and slot are clobbered after the call.
-static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
- Register slot, Register temp) {
- const int count_offset = FixedArray::kHeaderSize + kPointerSize;
- __ SmiToPtrArrayOffset(temp, slot);
- __ AddP(feedback_vector, feedback_vector, temp);
- __ LoadP(slot, FieldMemOperand(feedback_vector, count_offset));
- __ AddSmiLiteral(slot, slot, Smi::FromInt(1), temp);
- __ StoreP(slot, FieldMemOperand(feedback_vector, count_offset), temp);
-}
-
-void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
- // r2 - number of arguments
- // r3 - function
- // r5 - slot id
- // r4 - vector
- // r6 - allocation site (loaded from vector[slot])
- __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r7);
- __ CmpP(r3, r7);
- __ bne(miss);
-
- // Increment the call count for monomorphic function calls.
- IncrementCallCount(masm, r4, r5, r1);
-
- __ LoadRR(r4, r6);
- __ LoadRR(r5, r3);
- ArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
-}
-
-void CallICStub::Generate(MacroAssembler* masm) {
- // r2 - number of arguments
- // r3 - function
- // r5 - slot id (Smi)
- // r4 - vector
- Label extra_checks_or_miss, call, call_function, call_count_incremented;
-
- // The checks. First, does r3 match the recorded monomorphic target?
- __ SmiToPtrArrayOffset(r8, r5);
- __ AddP(r8, r4, r8);
- __ LoadP(r6, FieldMemOperand(r8, FixedArray::kHeaderSize));
-
- // We don't know that we have a weak cell. We might have a private symbol
- // or an AllocationSite, but the memory is safe to examine.
- // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
- // FixedArray.
- // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
- // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
- // computed, meaning that it can't appear to be a pointer. If the low bit is
- // 0, then hash is computed, but the 0 bit prevents the field from appearing
- // to be a pointer.
- STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
- STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
- WeakCell::kValueOffset &&
- WeakCell::kValueOffset == Symbol::kHashFieldSlot);
-
- __ LoadP(r7, FieldMemOperand(r6, WeakCell::kValueOffset));
- __ CmpP(r3, r7);
- __ bne(&extra_checks_or_miss, Label::kNear);
-
- // The compare above could have been a SMI/SMI comparison. Guard against this
- // convincing us that we have a monomorphic JSFunction.
- __ JumpIfSmi(r3, &extra_checks_or_miss);
-
- __ bind(&call_function);
-
- // Increment the call count for monomorphic function calls.
- IncrementCallCount(masm, r4, r5, r1);
-
- __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
- tail_call_mode()),
- RelocInfo::CODE_TARGET);
-
- __ bind(&extra_checks_or_miss);
- Label uninitialized, miss, not_allocation_site;
-
- __ CompareRoot(r6, Heap::kmegamorphic_symbolRootIndex);
- __ beq(&call);
-
- // Verify that r6 contains an AllocationSite
- __ LoadP(r7, FieldMemOperand(r6, HeapObject::kMapOffset));
- __ CompareRoot(r7, Heap::kAllocationSiteMapRootIndex);
- __ bne(&not_allocation_site);
-
- // We have an allocation site.
- HandleArrayCase(masm, &miss);
-
- __ bind(&not_allocation_site);
-
- // The following cases attempt to handle MISS cases without going to the
- // runtime.
- if (FLAG_trace_ic) {
- __ b(&miss);
- }
-
- __ CompareRoot(r6, Heap::kuninitialized_symbolRootIndex);
- __ beq(&uninitialized);
-
- // We are going megamorphic. If the feedback is a JSFunction, it is fine
- // to handle it here. More complex cases are dealt with in the runtime.
- __ AssertNotSmi(r6);
- __ CompareObjectType(r6, r7, r7, JS_FUNCTION_TYPE);
- __ bne(&miss);
- __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
- __ StoreP(ip, FieldMemOperand(r8, FixedArray::kHeaderSize), r0);
-
- __ bind(&call);
-
- // Increment the call count for megamorphic function calls.
- IncrementCallCount(masm, r4, r5, r1);
-
- __ bind(&call_count_incremented);
- __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
- RelocInfo::CODE_TARGET);
-
- __ bind(&uninitialized);
-
- // We are going monomorphic, provided we actually have a JSFunction.
- __ JumpIfSmi(r3, &miss);
-
- // Goto miss case if we do not have a function.
- __ CompareObjectType(r3, r6, r6, JS_FUNCTION_TYPE);
- __ bne(&miss);
-
- // Make sure the function is not the Array() function, which requires special
- // behavior on MISS.
- __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r6);
- __ CmpP(r3, r6);
- __ beq(&miss);
-
- // Make sure the function belongs to the same native context.
- __ LoadP(r6, FieldMemOperand(r3, JSFunction::kContextOffset));
- __ LoadP(r6, ContextMemOperand(r6, Context::NATIVE_CONTEXT_INDEX));
- __ LoadP(ip, NativeContextMemOperand());
- __ CmpP(r6, ip);
- __ bne(&miss);
-
- // Store the function. Use a stub since we need a frame for allocation.
- // r4 - vector
- // r5 - slot
- // r3 - function
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- CreateWeakCellStub create_stub(masm->isolate());
- __ SmiTag(r2);
- __ Push(r2, r4, r5, cp, r3);
- __ CallStub(&create_stub);
- __ Pop(r4, r5, cp, r3);
- __ Pop(r2);
- __ SmiUntag(r2);
- }
-
- __ b(&call_function);
-
- // We are here because tracing is on or we encountered a MISS case we can't
- // handle here.
- __ bind(&miss);
- GenerateMiss(masm);
-
- __ b(&call_count_incremented);
-}
-
-void CallICStub::GenerateMiss(MacroAssembler* masm) {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve the number of arguments as Smi.
- __ SmiTag(r2);
-
- // Push the receiver and the function and feedback info.
- __ Push(r2, r3, r4, r5);
-
- // Call the entry.
- __ CallRuntime(Runtime::kCallIC_Miss);
-
- // Move result to r3 and exit the internal frame.
- __ LoadRR(r3, r2);
-
- // Restore number of arguments.
- __ Pop(r2);
- __ SmiUntag(r2);
-}
-
// StringCharCodeAtGenerator
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
// If the receiver is a smi trigger the non-string case.
@@ -2119,44 +1893,6 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
}
-// -------------------------------------------------------------------------
-// StringCharFromCodeGenerator
-
-void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
- // Fast case of Heap::LookupSingleCharacterStringFromCode.
- DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU + 1));
- __ LoadSmiLiteral(r0, Smi::FromInt(~String::kMaxOneByteCharCodeU));
- __ OrP(r0, r0, Operand(kSmiTagMask));
- __ AndP(r0, code_, r0);
- __ bne(&slow_case_);
-
- __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
- // At this point code register contains smi tagged one-byte char code.
- __ LoadRR(r0, code_);
- __ SmiToPtrArrayOffset(code_, code_);
- __ AddP(result_, code_);
- __ LoadRR(code_, r0);
- __ LoadP(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
- __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
- __ beq(&slow_case_);
- __ bind(&exit_);
-}
-
-void StringCharFromCodeGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
- __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
-
- __ bind(&slow_case_);
- call_helper.BeforeCall(masm);
- __ push(code_);
- __ CallRuntime(Runtime::kStringCharFromCode);
- __ Move(result_, r2);
- call_helper.AfterCall(masm);
- __ b(&exit_);
-
- __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
-}
-
void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
Register left,
Register right,
@@ -3015,12 +2751,6 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
__ Ret();
}
-void CallICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadFeedbackVector(r4);
- CallICStub stub(isolate(), state());
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
-}
-
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
PredictableCodeSizeScope predictable(masm,
@@ -3396,539 +3126,6 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
GenerateCase(masm, FAST_ELEMENTS);
}
-void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r3 : function
- // -- cp : context
- // -- fp : frame pointer
- // -- lr : return address
- // -----------------------------------
- __ AssertFunction(r3);
-
- // Make r4 point to the JavaScript frame.
- __ LoadRR(r4, fp);
- if (skip_stub_frame()) {
- // For Ignition we need to skip the handler/stub frame to reach the
- // JavaScript frame for the function.
- __ LoadP(r4, MemOperand(r4, StandardFrameConstants::kCallerFPOffset));
- }
- if (FLAG_debug_code) {
- Label ok;
- __ LoadP(ip, MemOperand(r4, StandardFrameConstants::kFunctionOffset));
- __ CmpP(ip, r3);
- __ b(&ok, Label::kNear);
- __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
- __ bind(&ok);
- }
-
- // Check if we have rest parameters (only possible if we have an
- // arguments adaptor frame below the function frame).
- Label no_rest_parameters;
- __ LoadP(r4, MemOperand(r4, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(ip, MemOperand(r4, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ LoadSmiLiteral(r0, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ CmpP(ip, r0);
- __ bne(&no_rest_parameters);
-
- // Check if the arguments adaptor frame contains more arguments than
- // specified by the function's internal formal parameter count.
- Label rest_parameters;
- __ LoadP(r2, MemOperand(r4, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ LoadP(r5, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
- __ LoadW(
- r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
-#if V8_TARGET_ARCH_S390X
- __ SmiTag(r5);
-#endif
- __ SubP(r2, r2, r5);
- __ bgt(&rest_parameters);
-
- // Return an empty rest parameter array.
- __ bind(&no_rest_parameters);
- {
- // ----------- S t a t e -------------
- // -- cp : context
- // -- lr : return address
- // -----------------------------------
-
- // Allocate an empty rest parameter array.
- Label allocate, done_allocate;
- __ Allocate(JSArray::kSize, r2, r3, r4, &allocate, NO_ALLOCATION_FLAGS);
- __ bind(&done_allocate);
-
- // Setup the rest parameter array in r0.
- __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r3);
- __ StoreP(r3, FieldMemOperand(r2, JSArray::kMapOffset), r0);
- __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
- __ StoreP(r3, FieldMemOperand(r2, JSArray::kPropertiesOffset), r0);
- __ StoreP(r3, FieldMemOperand(r2, JSArray::kElementsOffset), r0);
- __ LoadImmP(r3, Operand::Zero());
- __ StoreP(r3, FieldMemOperand(r2, JSArray::kLengthOffset), r0);
- STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
- __ Ret();
-
- // Fall back to %AllocateInNewSpace.
- __ bind(&allocate);
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(Smi::FromInt(JSArray::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- }
- __ b(&done_allocate);
- }
-
- __ bind(&rest_parameters);
- {
- // Compute the pointer to the first rest parameter (skippping the receiver).
- __ SmiToPtrArrayOffset(r8, r2);
- __ AddP(r4, r4, r8);
- __ AddP(r4, r4, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // ----------- S t a t e -------------
- // -- cp : context
- // -- r2 : number of rest parameters (tagged)
- // -- r3 : function
- // -- r4 : pointer just past first rest parameters
- // -- r8 : size of rest parameters
- // -- lr : return address
- // -----------------------------------
-
- // Allocate space for the rest parameter array plus the backing store.
- Label allocate, done_allocate;
- __ mov(r9, Operand(JSArray::kSize + FixedArray::kHeaderSize));
- __ AddP(r9, r9, r8);
- __ Allocate(r9, r5, r6, r7, &allocate, NO_ALLOCATION_FLAGS);
- __ bind(&done_allocate);
-
- // Setup the elements array in r5.
- __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
- __ StoreP(r3, FieldMemOperand(r5, FixedArray::kMapOffset), r0);
- __ StoreP(r2, FieldMemOperand(r5, FixedArray::kLengthOffset), r0);
- __ AddP(r6, r5,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
- {
- Label loop;
- __ SmiUntag(r1, r2);
- // __ mtctr(r0);
- __ bind(&loop);
- __ lay(r4, MemOperand(r4, -kPointerSize));
- __ LoadP(ip, MemOperand(r4));
- __ la(r6, MemOperand(r6, kPointerSize));
- __ StoreP(ip, MemOperand(r6));
- // __ bdnz(&loop);
- __ BranchOnCount(r1, &loop);
- __ AddP(r6, r6, Operand(kPointerSize));
- }
-
- // Setup the rest parameter array in r6.
- __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r3);
- __ StoreP(r3, MemOperand(r6, JSArray::kMapOffset));
- __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
- __ StoreP(r3, MemOperand(r6, JSArray::kPropertiesOffset));
- __ StoreP(r5, MemOperand(r6, JSArray::kElementsOffset));
- __ StoreP(r2, MemOperand(r6, JSArray::kLengthOffset));
- STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
- __ AddP(r2, r6, Operand(kHeapObjectTag));
- __ Ret();
-
- // Fall back to %AllocateInNewSpace (if not too big).
- Label too_big_for_new_space;
- __ bind(&allocate);
- __ CmpP(r9, Operand(kMaxRegularHeapObjectSize));
- __ bgt(&too_big_for_new_space);
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(r9);
- __ Push(r2, r4, r9);
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- __ LoadRR(r5, r2);
- __ Pop(r2, r4);
- }
- __ b(&done_allocate);
-
- // Fall back to %NewRestParameter.
- __ bind(&too_big_for_new_space);
- __ push(r3);
- __ TailCallRuntime(Runtime::kNewRestParameter);
- }
-}
-
-void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r3 : function
- // -- cp : context
- // -- fp : frame pointer
- // -- lr : return address
- // -----------------------------------
- __ AssertFunction(r3);
-
- // Make r9 point to the JavaScript frame.
- __ LoadRR(r9, fp);
- if (skip_stub_frame()) {
- // For Ignition we need to skip the handler/stub frame to reach the
- // JavaScript frame for the function.
- __ LoadP(r9, MemOperand(r9, StandardFrameConstants::kCallerFPOffset));
- }
- if (FLAG_debug_code) {
- Label ok;
- __ LoadP(ip, MemOperand(r9, StandardFrameConstants::kFunctionOffset));
- __ CmpP(ip, r3);
- __ beq(&ok, Label::kNear);
- __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
- __ bind(&ok);
- }
-
- // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
- __ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
- __ LoadW(
- r4, FieldMemOperand(r4, SharedFunctionInfo::kFormalParameterCountOffset));
-#if V8_TARGET_ARCH_S390X
- __ SmiTag(r4);
-#endif
- __ SmiToPtrArrayOffset(r5, r4);
- __ AddP(r5, r9, r5);
- __ AddP(r5, r5, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // r3 : function
- // r4 : number of parameters (tagged)
- // r5 : parameters pointer
- // r9 : JavaScript frame pointer
- // Registers used over whole function:
- // r7 : arguments count (tagged)
- // r8 : mapped parameter count (tagged)
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ LoadP(r6, MemOperand(r9, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(r2, MemOperand(r6, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ LoadSmiLiteral(r0, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ CmpP(r2, r0);
- __ beq(&adaptor_frame);
-
- // No adaptor, parameter count = argument count.
- __ LoadRR(r7, r4);
- __ LoadRR(r8, r4);
- __ b(&try_allocate);
-
- // We have an adaptor frame. Patch the parameters pointer.
- __ bind(&adaptor_frame);
- __ LoadP(r7, MemOperand(r6, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiToPtrArrayOffset(r5, r7);
- __ AddP(r5, r5, r6);
- __ AddP(r5, r5, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // r7 = argument count (tagged)
- // r8 = parameter count (tagged)
- // Compute the mapped parameter count = min(r4, r7) in r8.
- __ CmpP(r4, r7);
- Label skip;
- __ LoadRR(r8, r4);
- __ blt(&skip);
- __ LoadRR(r8, r7);
- __ bind(&skip);
-
- __ bind(&try_allocate);
-
- // Compute the sizes of backing store, parameter map, and arguments object.
- // 1. Parameter map, has 2 extra words containing context and backing store.
- const int kParameterMapHeaderSize =
- FixedArray::kHeaderSize + 2 * kPointerSize;
- // If there are no mapped parameters, we do not need the parameter_map.
- __ CmpSmiLiteral(r8, Smi::kZero, r0);
- Label skip2, skip3;
- __ bne(&skip2);
- __ LoadImmP(r1, Operand::Zero());
- __ b(&skip3);
- __ bind(&skip2);
- __ SmiToPtrArrayOffset(r1, r8);
- __ AddP(r1, r1, Operand(kParameterMapHeaderSize));
- __ bind(&skip3);
-
- // 2. Backing store.
- __ SmiToPtrArrayOffset(r6, r7);
- __ AddP(r1, r1, r6);
- __ AddP(r1, r1, Operand(FixedArray::kHeaderSize));
-
- // 3. Arguments object.
- __ AddP(r1, r1, Operand(JSSloppyArgumentsObject::kSize));
-
- // Do the allocation of all three objects in one go.
- __ Allocate(r1, r2, r1, r6, &runtime, NO_ALLOCATION_FLAGS);
-
- // r2 = address of new object(s) (tagged)
- // r4 = argument count (smi-tagged)
- // Get the arguments boilerplate from the current native context into r3.
- const int kNormalOffset =
- Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
- const int kAliasedOffset =
- Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
-
- __ LoadP(r6, NativeContextMemOperand());
- __ CmpP(r8, Operand::Zero());
- Label skip4, skip5;
- __ bne(&skip4);
- __ LoadP(r6, MemOperand(r6, kNormalOffset));
- __ b(&skip5);
- __ bind(&skip4);
- __ LoadP(r6, MemOperand(r6, kAliasedOffset));
- __ bind(&skip5);
-
- // r2 = address of new object (tagged)
- // r4 = argument count (smi-tagged)
- // r6 = address of arguments map (tagged)
- // r8 = mapped parameter count (tagged)
- __ StoreP(r6, FieldMemOperand(r2, JSObject::kMapOffset), r0);
- __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
- __ StoreP(r1, FieldMemOperand(r2, JSObject::kPropertiesOffset), r0);
- __ StoreP(r1, FieldMemOperand(r2, JSObject::kElementsOffset), r0);
-
- // Set up the callee in-object property.
- __ AssertNotSmi(r3);
- __ StoreP(r3, FieldMemOperand(r2, JSSloppyArgumentsObject::kCalleeOffset),
- r0);
-
- // Use the length (smi tagged) and set that as an in-object property too.
- __ AssertSmi(r7);
- __ StoreP(r7, FieldMemOperand(r2, JSSloppyArgumentsObject::kLengthOffset),
- r0);
-
- // Set up the elements pointer in the allocated arguments object.
- // If we allocated a parameter map, r6 will point there, otherwise
- // it will point to the backing store.
- __ AddP(r6, r2, Operand(JSSloppyArgumentsObject::kSize));
- __ StoreP(r6, FieldMemOperand(r2, JSObject::kElementsOffset), r0);
-
- // r2 = address of new object (tagged)
- // r4 = argument count (tagged)
- // r6 = address of parameter map or backing store (tagged)
- // r8 = mapped parameter count (tagged)
- // Initialize parameter map. If there are no mapped arguments, we're done.
- Label skip_parameter_map;
- __ CmpSmiLiteral(r8, Smi::kZero, r0);
- Label skip6;
- __ bne(&skip6);
- // Move backing store address to r3, because it is
- // expected there when filling in the unmapped arguments.
- __ LoadRR(r3, r6);
- __ b(&skip_parameter_map);
- __ bind(&skip6);
-
- __ LoadRoot(r7, Heap::kSloppyArgumentsElementsMapRootIndex);
- __ StoreP(r7, FieldMemOperand(r6, FixedArray::kMapOffset), r0);
- __ AddSmiLiteral(r7, r8, Smi::FromInt(2), r0);
- __ StoreP(r7, FieldMemOperand(r6, FixedArray::kLengthOffset), r0);
- __ StoreP(cp, FieldMemOperand(r6, FixedArray::kHeaderSize + 0 * kPointerSize),
- r0);
- __ SmiToPtrArrayOffset(r7, r8);
- __ AddP(r7, r7, r6);
- __ AddP(r7, r7, Operand(kParameterMapHeaderSize));
- __ StoreP(r7, FieldMemOperand(r6, FixedArray::kHeaderSize + 1 * kPointerSize),
- r0);
-
- // Copy the parameter slots and the holes in the arguments.
- // We need to fill in mapped_parameter_count slots. They index the context,
- // where parameters are stored in reverse order, at
- // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
- // The mapped parameter thus need to get indices
- // MIN_CONTEXT_SLOTS+parameter_count-1 ..
- // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
- // We loop from right to left.
- Label parameters_loop;
- __ LoadRR(r7, r8);
- __ AddSmiLiteral(r1, r4, Smi::FromInt(Context::MIN_CONTEXT_SLOTS), r0);
- __ SubP(r1, r1, r8);
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ SmiToPtrArrayOffset(r3, r7);
- __ AddP(r3, r3, r6);
- __ AddP(r3, r3, Operand(kParameterMapHeaderSize));
-
- // r3 = address of backing store (tagged)
- // r6 = address of parameter map (tagged)
- // r7 = temporary scratch (a.o., for address calculation)
- // r9 = temporary scratch (a.o., for address calculation)
- // ip = the hole value
- __ SmiUntag(r7);
- __ push(r4);
- __ LoadRR(r4, r7);
- __ ShiftLeftP(r7, r7, Operand(kPointerSizeLog2));
- __ AddP(r9, r3, r7);
- __ AddP(r7, r6, r7);
- __ AddP(r9, r9, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ AddP(r7, r7, Operand(kParameterMapHeaderSize - kHeapObjectTag));
-
- __ bind(&parameters_loop);
- __ StoreP(r1, MemOperand(r7, -kPointerSize));
- __ lay(r7, MemOperand(r7, -kPointerSize));
- __ StoreP(ip, MemOperand(r9, -kPointerSize));
- __ lay(r9, MemOperand(r9, -kPointerSize));
- __ AddSmiLiteral(r1, r1, Smi::FromInt(1), r0);
- __ BranchOnCount(r4, &parameters_loop);
- __ pop(r4);
-
- // Restore r7 = argument count (tagged).
- __ LoadP(r7, FieldMemOperand(r2, JSSloppyArgumentsObject::kLengthOffset));
-
- __ bind(&skip_parameter_map);
- // r2 = address of new object (tagged)
- // r3 = address of backing store (tagged)
- // r7 = argument count (tagged)
- // r8 = mapped parameter count (tagged)
- // r1 = scratch
- // Copy arguments header and remaining slots (if there are any).
- __ LoadRoot(r1, Heap::kFixedArrayMapRootIndex);
- __ StoreP(r1, FieldMemOperand(r3, FixedArray::kMapOffset), r0);
- __ StoreP(r7, FieldMemOperand(r3, FixedArray::kLengthOffset), r0);
- __ SubP(r1, r7, r8);
- __ Ret(eq);
-
- Label arguments_loop;
- __ SmiUntag(r1);
- __ LoadRR(r4, r1);
-
- __ SmiToPtrArrayOffset(r0, r8);
- __ SubP(r5, r5, r0);
- __ AddP(r1, r3, r0);
- __ AddP(r1, r1,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
-
- __ bind(&arguments_loop);
- __ LoadP(r6, MemOperand(r5, -kPointerSize));
- __ lay(r5, MemOperand(r5, -kPointerSize));
- __ StoreP(r6, MemOperand(r1, kPointerSize));
- __ la(r1, MemOperand(r1, kPointerSize));
- __ BranchOnCount(r4, &arguments_loop);
-
- // Return.
- __ Ret();
-
- // Do the runtime call to allocate the arguments object.
- // r7 = argument count (tagged)
- __ bind(&runtime);
- __ Push(r3, r5, r7);
- __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r3 : function
- // -- cp : context
- // -- fp : frame pointer
- // -- lr : return address
- // -----------------------------------
- __ AssertFunction(r3);
-
- // Make r4 point to the JavaScript frame.
- __ LoadRR(r4, fp);
- if (skip_stub_frame()) {
- // For Ignition we need to skip the handler/stub frame to reach the
- // JavaScript frame for the function.
- __ LoadP(r4, MemOperand(r4, StandardFrameConstants::kCallerFPOffset));
- }
- if (FLAG_debug_code) {
- Label ok;
- __ LoadP(ip, MemOperand(r4, StandardFrameConstants::kFunctionOffset));
- __ CmpP(ip, r3);
- __ beq(&ok, Label::kNear);
- __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
- __ bind(&ok);
- }
-
- // Check if we have an arguments adaptor frame below the function frame.
- Label arguments_adaptor, arguments_done;
- __ LoadP(r5, MemOperand(r4, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(ip, MemOperand(r5, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ LoadSmiLiteral(r0, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ CmpP(ip, r0);
- __ beq(&arguments_adaptor);
- {
- __ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
- __ LoadW(r2, FieldMemOperand(
- r6, SharedFunctionInfo::kFormalParameterCountOffset));
-#if V8_TARGET_ARCH_S390X
- __ SmiTag(r2);
-#endif
- __ SmiToPtrArrayOffset(r8, r2);
- __ AddP(r4, r4, r8);
- }
- __ b(&arguments_done);
- __ bind(&arguments_adaptor);
- {
- __ LoadP(r2, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiToPtrArrayOffset(r8, r2);
- __ AddP(r4, r5, r8);
- }
- __ bind(&arguments_done);
- __ AddP(r4, r4, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // ----------- S t a t e -------------
- // -- cp : context
- // -- r2 : number of rest parameters (tagged)
- // -- r3 : function
- // -- r4 : pointer just past first rest parameters
- // -- r8 : size of rest parameters
- // -- lr : return address
- // -----------------------------------
-
- // Allocate space for the strict arguments object plus the backing store.
- Label allocate, done_allocate;
- __ mov(r9, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
- __ AddP(r9, r9, r8);
- __ Allocate(r9, r5, r6, r7, &allocate, NO_ALLOCATION_FLAGS);
- __ bind(&done_allocate);
-
- // Setup the elements array in r5.
- __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
- __ StoreP(r3, FieldMemOperand(r5, FixedArray::kMapOffset), r0);
- __ StoreP(r2, FieldMemOperand(r5, FixedArray::kLengthOffset), r0);
- __ AddP(r6, r5,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
- {
- Label loop, done_loop;
- __ SmiUntag(r1, r2);
- __ LoadAndTestP(r1, r1);
- __ beq(&done_loop);
- __ bind(&loop);
- __ lay(r4, MemOperand(r4, -kPointerSize));
- __ LoadP(ip, MemOperand(r4));
- __ la(r6, MemOperand(r6, kPointerSize));
- __ StoreP(ip, MemOperand(r6));
- __ BranchOnCount(r1, &loop);
- __ bind(&done_loop);
- __ AddP(r6, r6, Operand(kPointerSize));
- }
-
- // Setup the rest parameter array in r6.
- __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, r3);
- __ StoreP(r3, MemOperand(r6, JSStrictArgumentsObject::kMapOffset));
- __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
- __ StoreP(r3, MemOperand(r6, JSStrictArgumentsObject::kPropertiesOffset));
- __ StoreP(r5, MemOperand(r6, JSStrictArgumentsObject::kElementsOffset));
- __ StoreP(r2, MemOperand(r6, JSStrictArgumentsObject::kLengthOffset));
- STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
- __ AddP(r2, r6, Operand(kHeapObjectTag));
- __ Ret();
-
- // Fall back to %AllocateInNewSpace (if not too big).
- Label too_big_for_new_space;
- __ bind(&allocate);
- __ CmpP(r9, Operand(kMaxRegularHeapObjectSize));
- __ bgt(&too_big_for_new_space);
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(r9);
- __ Push(r2, r4, r9);
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- __ LoadRR(r5, r2);
- __ Pop(r2, r4);
- }
- __ b(&done_allocate);
-
- // Fall back to %NewStrictArguments.
- __ bind(&too_big_for_new_space);
- __ push(r3);
- __ TailCallRuntime(Runtime::kNewStrictArguments);
-}
-
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
return ref0.address() - ref1.address();
}
diff --git a/deps/v8/src/s390/codegen-s390.cc b/deps/v8/src/s390/codegen-s390.cc
index 02cc8c206c..6b84200510 100644
--- a/deps/v8/src/s390/codegen-s390.cc
+++ b/deps/v8/src/s390/codegen-s390.cc
@@ -70,6 +70,9 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string,
Register index, Register result,
Label* call_runtime) {
+ Label indirect_string_loaded;
+ __ bind(&indirect_string_loaded);
+
// Fetch the instance type of the receiver into result register.
__ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
__ LoadlB(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
@@ -81,19 +84,25 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string,
__ beq(&check_sequential, Label::kNear /*, cr0*/);
// Dispatch on the indirect string shape: slice or cons.
- Label cons_string;
- __ mov(ip, Operand(kSlicedNotConsMask));
- __ LoadRR(r0, result);
- __ AndP(r0, ip /*, SetRC*/); // Should be okay to remove RC
- __ beq(&cons_string, Label::kNear /*, cr0*/);
+ Label cons_string, thin_string;
+ __ LoadRR(ip, result);
+ __ nilf(ip, Operand(kStringRepresentationMask));
+ __ CmpP(ip, Operand(kConsStringTag));
+ __ beq(&cons_string);
+ __ CmpP(ip, Operand(kThinStringTag));
+ __ beq(&thin_string);
// Handle slices.
- Label indirect_string_loaded;
__ LoadP(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
__ LoadP(string, FieldMemOperand(string, SlicedString::kParentOffset));
__ SmiUntag(ip, result);
__ AddP(index, ip);
- __ b(&indirect_string_loaded, Label::kNear);
+ __ b(&indirect_string_loaded);
+
+ // Handle thin strings.
+ __ bind(&thin_string);
+ __ LoadP(string, FieldMemOperand(string, ThinString::kActualOffset));
+ __ b(&indirect_string_loaded);
// Handle cons strings.
// Check whether the right hand side is the empty string (i.e. if
@@ -106,10 +115,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string,
__ bne(call_runtime);
// Get the first of the two strings and load its instance type.
__ LoadP(string, FieldMemOperand(string, ConsString::kFirstOffset));
-
- __ bind(&indirect_string_loaded);
- __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
- __ LoadlB(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+ __ b(&indirect_string_loaded);
// Distinguish sequential and external strings. Only these two string
// representations can reach here (slices and flat cons strings have been
diff --git a/deps/v8/src/s390/constants-s390.h b/deps/v8/src/s390/constants-s390.h
index c1cace2634..59bf34bbdd 100644
--- a/deps/v8/src/s390/constants-s390.h
+++ b/deps/v8/src/s390/constants-s390.h
@@ -869,7 +869,7 @@ typedef uint64_t SixByteInstr;
V(dsg, DSG, 0xE30D) /* type = RXY_A DIVIDE SINGLE (64) */ \
V(cvbg, CVBG, 0xE30E) /* type = RXY_A CONVERT TO BINARY (64) */ \
V(lrvg, LRVG, 0xE30F) /* type = RXY_A LOAD REVERSED (64) */ \
- V(lt, LT, 0xE312) /* type = RXY_A LOAD AND TEST (32) */ \
+ V(lt_z, LT, 0xE312) /* type = RXY_A LOAD AND TEST (32) */ \
V(lray, LRAY, 0xE313) /* type = RXY_A LOAD REAL ADDRESS (32) */ \
V(lgf, LGF, 0xE314) /* type = RXY_A LOAD (64<-32) */ \
V(lgh, LGH, 0xE315) /* type = RXY_A LOAD HALFWORD (64<-16) */ \
@@ -1144,7 +1144,7 @@ typedef uint64_t SixByteInstr;
V(ex, EX, 0x44) /* type = RX_A EXECUTE */ \
V(bal, BAL, 0x45) /* type = RX_A BRANCH AND LINK */ \
V(bct, BCT, 0x46) /* type = RX_A BRANCH ON COUNT (32) */ \
- V(bc, BC, 0x47) /* type = RX_A BRANCH ON CONDITION */ \
+ V(lh, LH, 0x48) /* type = RX_A LOAD HALFWORD (32<-16) */ \
V(ch, CH, 0x49) /* type = RX_A COMPARE HALFWORD (32<-16) */ \
V(ah, AH, 0x4A) /* type = RX_A ADD HALFWORD (32<-16) */ \
V(sh, SH, 0x4B) /* type = RX_A SUBTRACT HALFWORD (32<-16) */ \
@@ -1164,7 +1164,7 @@ typedef uint64_t SixByteInstr;
V(s, S, 0x5B) /* type = RX_A SUBTRACT (32) */ \
V(m, M, 0x5C) /* type = RX_A MULTIPLY (64<-32) */ \
V(d, D, 0x5D) /* type = RX_A DIVIDE (32<-64) */ \
- V(al, AL, 0x5E) /* type = RX_A ADD LOGICAL (32) */ \
+ V(al_z, AL, 0x5E) /* type = RX_A ADD LOGICAL (32) */ \
V(sl, SL, 0x5F) /* type = RX_A SUBTRACT LOGICAL (32) */ \
V(std, STD, 0x60) /* type = RX_A STORE (long) */ \
V(mxd, MXD, 0x67) /* type = RX_A MULTIPLY (long to extended HFP) */ \
@@ -1178,7 +1178,7 @@ typedef uint64_t SixByteInstr;
V(sw, SW, 0x6F) /* type = RX_A SUBTRACT UNNORMALIZED (long HFP) */ \
V(ste, STE, 0x70) /* type = RX_A STORE (short) */ \
V(ms, MS, 0x71) /* type = RX_A MULTIPLY SINGLE (32) */ \
- V(le, LE, 0x78) /* type = RX_A LOAD (short) */ \
+ V(le_z, LE, 0x78) /* type = RX_A LOAD (short) */ \
V(ce, CE, 0x79) /* type = RX_A COMPARE (short HFP) */ \
V(ae, AE, 0x7A) /* type = RX_A ADD NORMALIZED (short HFP) */ \
V(se, SE, 0x7B) /* type = RX_A SUBTRACT NORMALIZED (short HFP) */ \
@@ -1188,10 +1188,11 @@ typedef uint64_t SixByteInstr;
V(au, AU, 0x7E) /* type = RX_A ADD UNNORMALIZED (short HFP) */ \
V(su, SU, 0x7F) /* type = RX_A SUBTRACT UNNORMALIZED (short HFP) */ \
V(ssm, SSM, 0x80) /* type = RX_A SET SYSTEM MASK */ \
- V(lra, LRA, 0xB1) /* type = RX_A LOAD REAL ADDRESS (32) */
+ V(lra, LRA, 0xB1) /* type = RX_A LOAD REAL ADDRESS (32) */ \
+ V(sth, STH, 0x40) /* type = RX_A STORE HALFWORD (16) */
#define S390_RX_B_OPCODE_LIST(V) \
- V(lh, LH, 0x48) /* type = RX_B LOAD HALFWORD (32<-16) */
+ V(bc, BC, 0x47) /* type = RX_B BRANCH ON CONDITION */
#define S390_RIE_A_OPCODE_LIST(V) \
V(cgit, CGIT, 0xEC70) /* type = RIE_A COMPARE IMMEDIATE AND TRAP (64<-16) */ \
@@ -1587,8 +1588,7 @@ typedef uint64_t SixByteInstr;
V(mer, MER, 0x3C) /* type = RR MULTIPLY (short to long HFP) */ \
V(der, DER, 0x3D) /* type = RR DIVIDE (short HFP) */ \
V(aur, AUR, 0x3E) /* type = RR ADD UNNORMALIZED (short HFP) */ \
- V(sur, SUR, 0x3F) /* type = RR SUBTRACT UNNORMALIZED (short HFP) */ \
- V(sth, STH, 0x40) /* type = RR STORE HALFWORD (16) */
+ V(sur, SUR, 0x3F) /* type = RR SUBTRACT UNNORMALIZED (short HFP) */
#define S390_RIE_F_OPCODE_LIST(V) \
V(risblg, RISBLG, \
diff --git a/deps/v8/src/s390/deoptimizer-s390.cc b/deps/v8/src/s390/deoptimizer-s390.cc
index 6ee8c74213..46d939128a 100644
--- a/deps/v8/src/s390/deoptimizer-s390.cc
+++ b/deps/v8/src/s390/deoptimizer-s390.cc
@@ -95,7 +95,7 @@ void Deoptimizer::SetPlatformCompiledStubRegisters(
void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
for (int i = 0; i < DoubleRegister::kNumRegisters; ++i) {
- double double_value = input_->GetDoubleRegister(i);
+ Float64 double_value = input_->GetDoubleRegister(i);
output_frame->SetDoubleRegister(i, double_value);
}
}
diff --git a/deps/v8/src/s390/disasm-s390.cc b/deps/v8/src/s390/disasm-s390.cc
index f33812096c..e35e59031a 100644
--- a/deps/v8/src/s390/disasm-s390.cc
+++ b/deps/v8/src/s390/disasm-s390.cc
@@ -712,6 +712,9 @@ bool Decoder::DecodeFourByte(Instruction* instr) {
case XGRK:
Format(instr, "xgrk\t'r5,'r6,'r3");
break;
+ case CGFR:
+ Format(instr, "cgfr\t'r5,'r6");
+ break;
case CGR:
Format(instr, "cgr\t'r5,'r6");
break;
@@ -721,6 +724,15 @@ bool Decoder::DecodeFourByte(Instruction* instr) {
case LLGFR:
Format(instr, "llgfr\t'r5,'r6");
break;
+ case POPCNT_Z:
+ Format(instr, "popcnt\t'r5,'r6");
+ break;
+ case LLGCR:
+ Format(instr, "llgcr\t'r5,'r6");
+ break;
+ case LLCR:
+ Format(instr, "llcr\t'r5,'r6");
+ break;
case LBR:
Format(instr, "lbr\t'r5,'r6");
break;
@@ -781,6 +793,12 @@ bool Decoder::DecodeFourByte(Instruction* instr) {
case DSGR:
Format(instr, "dsgr\t'r5,'r6");
break;
+ case DSGFR:
+ Format(instr, "dsgfr\t'r5,'r6");
+ break;
+ case MSGFR:
+ Format(instr, "msgfr\t'r5,'r6");
+ break;
case LZDR:
Format(instr, "lzdr\t'f5");
break;
@@ -1397,6 +1415,15 @@ bool Decoder::DecodeSixByte(Instruction* instr) {
case MSG:
Format(instr, "msg\t'r1,'d2('r2d,'r3)");
break;
+ case DSG:
+ Format(instr, "dsg\t'r1,'d2('r2d,'r3)");
+ break;
+ case DSGF:
+ Format(instr, "dsgf\t'r1,'d2('r2d,'r3)");
+ break;
+ case MSGF:
+ Format(instr, "msgf\t'r1,'d2('r2d,'r3)");
+ break;
case MSY:
Format(instr, "msy\t'r1,'d2('r2d,'r3)");
break;
@@ -1407,7 +1434,13 @@ bool Decoder::DecodeSixByte(Instruction* instr) {
Format(instr, "stdy\t'f1,'d2('r2d,'r3)");
break;
case ADB:
- Format(instr, "adb\t'r1,'d1('r2d, 'r3)");
+ Format(instr, "adb\t'f1,'d1('r2d, 'r3)");
+ break;
+ case CDB:
+ Format(instr, "cdb\t'f1,'d1('r2d, 'r3)");
+ break;
+ case CEB:
+ Format(instr, "ceb\t'f1,'d1('r2d, 'r3)");
break;
case SDB:
Format(instr, "sdb\t'r1,'d1('r2d, 'r3)");
diff --git a/deps/v8/src/s390/interface-descriptors-s390.cc b/deps/v8/src/s390/interface-descriptors-s390.cc
index 3ffcd5fc1c..8060cfe942 100644
--- a/deps/v8/src/s390/interface-descriptors-s390.cc
+++ b/deps/v8/src/s390/interface-descriptors-s390.cc
@@ -65,24 +65,6 @@ void FastNewClosureDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void FastNewRestParameterDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return r2; }
@@ -128,13 +110,13 @@ void CallFunctionDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
+void CallICTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {r3, r5};
+ Register registers[] = {r3, r2, r5};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
+void CallICDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3, r2, r5, r4};
data->InitializePlatformSpecific(arraysize(registers), registers);
@@ -161,6 +143,14 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void CallForwardVarargsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r4 : start index (to support rest parameters)
+ // r3 : the target to call
+ Register registers[] = {r3, r4};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void ConstructStubDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r2 : number of arguments
@@ -191,13 +181,12 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(0, nullptr, nullptr);
}
-#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
- void Allocate##Type##Descriptor::InitializePlatformSpecific( \
- CallInterfaceDescriptorData* data) { \
- data->InitializePlatformSpecific(0, nullptr, nullptr); \
- }
-SIMD128_TYPES(SIMD128_ALLOC_DESC)
-#undef SIMD128_ALLOC_DESC
+void ArrayConstructorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite
+ Register registers[] = {r3, r5, r2, r4};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -381,6 +370,14 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ r3, // loaded new FP
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/s390/macro-assembler-s390.cc b/deps/v8/src/s390/macro-assembler-s390.cc
index 7c890609c1..9084931fae 100644
--- a/deps/v8/src/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/s390/macro-assembler-s390.cc
@@ -933,7 +933,7 @@ void MacroAssembler::StubPrologue(StackFrame::Type type, Register base,
int prologue_offset) {
{
ConstantPoolUnavailableScope constant_pool_unavailable(this);
- LoadSmiLiteral(r1, Smi::FromInt(type));
+ Load(r1, Operand(StackFrame::TypeToMarker(type)));
PushCommonFrame(r1);
}
}
@@ -971,8 +971,8 @@ void MacroAssembler::Prologue(bool code_pre_aging, Register base,
void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
LoadP(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- LoadP(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
- LoadP(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
+ LoadP(vector, FieldMemOperand(vector, JSFunction::kFeedbackVectorOffset));
+ LoadP(vector, FieldMemOperand(vector, Cell::kValueOffset));
}
void MacroAssembler::EnterFrame(StackFrame::Type type,
@@ -984,7 +984,7 @@ void MacroAssembler::EnterFrame(StackFrame::Type type,
// type
// CodeObject <-- new sp
- LoadSmiLiteral(ip, Smi::FromInt(type));
+ Load(ip, Operand(StackFrame::TypeToMarker(type)));
PushCommonFrame(ip);
if (type == StackFrame::INTERNAL) {
@@ -1057,7 +1057,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
// all of the pushes that have happened inside of V8
// since we were called from C code
CleanseP(r14);
- LoadSmiLiteral(r1, Smi::FromInt(frame_type));
+ Load(r1, Operand(StackFrame::TypeToMarker(frame_type)));
PushCommonFrame(r1);
// Reserve room for saved entry sp and code object.
lay(sp, MemOperand(fp, -ExitFrameConstants::kFixedFrameSizeFromFp));
@@ -1101,17 +1101,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
StoreP(r1, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
-void MacroAssembler::InitializeNewString(Register string, Register length,
- Heap::RootListIndex map_index,
- Register scratch1, Register scratch2) {
- SmiTag(scratch1, length);
- LoadRoot(scratch2, map_index);
- StoreP(scratch1, FieldMemOperand(string, String::kLengthOffset));
- StoreP(FieldMemOperand(string, String::kHashFieldSlot),
- Operand(String::kEmptyHashField), scratch1);
- StoreP(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
-}
-
int MacroAssembler::ActivationFrameAlignment() {
#if !defined(USE_SIMULATOR)
// Running on the real platform. Use the alignment as mandated by the local
@@ -1463,13 +1452,15 @@ void MacroAssembler::IsObjectNameType(Register object, Register scratch,
bgt(fail);
}
-void MacroAssembler::DebugBreak() {
- LoadImmP(r2, Operand::Zero());
- mov(r3,
- Operand(ExternalReference(Runtime::kHandleDebuggerStatement, isolate())));
- CEntryStub ces(isolate(), 1);
- DCHECK(AllowThisStubCall(&ces));
- Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
+void MacroAssembler::MaybeDropFrames() {
+ // Check whether we need to drop frames to restart a function on the stack.
+ ExternalReference restart_fp =
+ ExternalReference::debug_restart_fp_address(isolate());
+ mov(r3, Operand(restart_fp));
+ LoadP(r3, MemOperand(r3));
+ CmpP(r3, Operand::Zero());
+ Jump(isolate()->builtins()->FrameDropperTrampoline(), RelocInfo::CODE_TARGET,
+ ne);
}
void MacroAssembler::PushStackHandler() {
@@ -1628,7 +1619,7 @@ void MacroAssembler::Allocate(int object_size, Register result,
// Prefetch the allocation_top's next cache line in advance to
// help alleviate potential cache misses.
// Mode 2 - Prefetch the data into a cache line for store access.
- pfd(r2, MemOperand(result, 256));
+ pfd(static_cast<Condition>(2), MemOperand(result, 256));
}
// Tag object.
@@ -1727,7 +1718,7 @@ void MacroAssembler::Allocate(Register object_size, Register result,
// Prefetch the allocation_top's next cache line in advance to
// help alleviate potential cache misses.
// Mode 2 - Prefetch the data into a cache line for store access.
- pfd(r2, MemOperand(result, 256));
+ pfd(static_cast<Condition>(2), MemOperand(result, 256));
}
// Tag object.
@@ -1787,7 +1778,7 @@ void MacroAssembler::FastAllocate(Register object_size, Register result,
// Prefetch the allocation_top's next cache line in advance to
// help alleviate potential cache misses.
// Mode 2 - Prefetch the data into a cache line for store access.
- pfd(r2, MemOperand(result, 256));
+ pfd(static_cast<Condition>(2), MemOperand(result, 256));
}
// Tag object.
@@ -1855,7 +1846,7 @@ void MacroAssembler::FastAllocate(int object_size, Register result,
// Prefetch the allocation_top's next cache line in advance to
// help alleviate potential cache misses.
// Mode 2 - Prefetch the data into a cache line for store access.
- pfd(r2, MemOperand(result, 256));
+ pfd(static_cast<Condition>(2), MemOperand(result, 256));
}
// Tag object.
@@ -1966,30 +1957,6 @@ void MacroAssembler::GetMapConstructor(Register result, Register map,
bind(&done);
}
-void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
- Register scratch, Label* miss) {
- // Get the prototype or initial map from the function.
- LoadP(result,
- FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
- // If the prototype or initial map is the hole, don't return it and
- // simply miss the cache instead. This will allow us to allocate a
- // prototype object on-demand in the runtime system.
- CompareRoot(result, Heap::kTheHoleValueRootIndex);
- beq(miss);
-
- // If the function does not have an initial map, we're done.
- Label done;
- CompareObjectType(result, scratch, scratch, MAP_TYPE);
- bne(&done, Label::kNear);
-
- // Get the prototype from the initial map.
- LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
-
- // All done.
- bind(&done);
-}
-
void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id,
Condition cond) {
DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
@@ -3270,6 +3237,88 @@ void MacroAssembler::Mul32(Register dst, const Operand& src1) {
msfi(dst, src1);
}
+#define Generate_MulHigh32(instr) \
+ { \
+ lgfr(dst, src1); \
+ instr(dst, src2); \
+ srlg(dst, dst, Operand(32)); \
+ }
+
+void MacroAssembler::MulHigh32(Register dst, Register src1,
+ const MemOperand& src2) {
+ Generate_MulHigh32(msgf);
+}
+
+void MacroAssembler::MulHigh32(Register dst, Register src1, Register src2) {
+ if (dst.is(src2)) {
+ std::swap(src1, src2);
+ }
+ Generate_MulHigh32(msgfr);
+}
+
+void MacroAssembler::MulHigh32(Register dst, Register src1,
+ const Operand& src2) {
+ Generate_MulHigh32(msgfi);
+}
+
+#undef Generate_MulHigh32
+
+#define Generate_MulHighU32(instr) \
+ { \
+ lr(r1, src1); \
+ instr(r0, src2); \
+ LoadlW(dst, r0); \
+ }
+
+void MacroAssembler::MulHighU32(Register dst, Register src1,
+ const MemOperand& src2) {
+ Generate_MulHighU32(ml);
+}
+
+void MacroAssembler::MulHighU32(Register dst, Register src1, Register src2) {
+ Generate_MulHighU32(mlr);
+}
+
+void MacroAssembler::MulHighU32(Register dst, Register src1,
+ const Operand& src2) {
+ USE(dst);
+ USE(src1);
+ USE(src2);
+ UNREACHABLE();
+}
+
+#undef Generate_MulHighU32
+
+#define Generate_Mul32WithOverflowIfCCUnequal(instr) \
+ { \
+ lgfr(dst, src1); \
+ instr(dst, src2); \
+ cgfr(dst, dst); \
+ }
+
+void MacroAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
+ const MemOperand& src2) {
+ Register result = dst;
+ if (src2.rx().is(dst) || src2.rb().is(dst)) dst = r0;
+ Generate_Mul32WithOverflowIfCCUnequal(msgf);
+ if (!result.is(dst)) llgfr(result, dst);
+}
+
+void MacroAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
+ Register src2) {
+ if (dst.is(src2)) {
+ std::swap(src1, src2);
+ }
+ Generate_Mul32WithOverflowIfCCUnequal(msgfr);
+}
+
+void MacroAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
+ const Operand& src2) {
+ Generate_Mul32WithOverflowIfCCUnequal(msgfi);
+}
+
+#undef Generate_Mul32WithOverflowIfCCUnequal
+
void MacroAssembler::Mul64(Register dst, const MemOperand& src1) {
if (is_int20(src1.offset())) {
msg(dst, src1);
@@ -3309,6 +3358,108 @@ void MacroAssembler::DivP(Register dividend, Register divider) {
#endif
}
+#define Generate_Div32(instr) \
+ { \
+ lgfr(r1, src1); \
+ instr(r0, src2); \
+ LoadlW(dst, r1); \
+ }
+
+void MacroAssembler::Div32(Register dst, Register src1,
+ const MemOperand& src2) {
+ Generate_Div32(dsgf);
+}
+
+void MacroAssembler::Div32(Register dst, Register src1, Register src2) {
+ Generate_Div32(dsgfr);
+}
+
+void MacroAssembler::Div32(Register dst, Register src1, const Operand& src2) {
+ USE(dst);
+ USE(src1);
+ USE(src2);
+ UNREACHABLE();
+}
+
+#undef Generate_Div32
+
+#define Generate_DivU32(instr) \
+ { \
+ lr(r0, src1); \
+ srdl(r0, Operand(32)); \
+ instr(r0, src2); \
+ LoadlW(dst, r1); \
+ }
+
+void MacroAssembler::DivU32(Register dst, Register src1,
+ const MemOperand& src2) {
+ Generate_DivU32(dl);
+}
+
+void MacroAssembler::DivU32(Register dst, Register src1, Register src2) {
+ Generate_DivU32(dlr);
+}
+
+void MacroAssembler::DivU32(Register dst, Register src1, const Operand& src2) {
+ USE(dst);
+ USE(src1);
+ USE(src2);
+ UNREACHABLE();
+}
+
+#undef Generate_DivU32
+
+#define Generate_Mod32(instr) \
+ { \
+ lgfr(r1, src1); \
+ instr(r0, src2); \
+ LoadlW(dst, r0); \
+ }
+
+void MacroAssembler::Mod32(Register dst, Register src1,
+ const MemOperand& src2) {
+ Generate_Mod32(dsgf);
+}
+
+void MacroAssembler::Mod32(Register dst, Register src1, Register src2) {
+ Generate_Mod32(dsgfr);
+}
+
+void MacroAssembler::Mod32(Register dst, Register src1, const Operand& src2) {
+ USE(dst);
+ USE(src1);
+ USE(src2);
+ UNREACHABLE();
+}
+
+#undef Generate_Mod32
+
+#define Generate_ModU32(instr) \
+ { \
+ lr(r0, src1); \
+ srdl(r0, Operand(32)); \
+ instr(r0, src2); \
+ LoadlW(dst, r0); \
+ }
+
+void MacroAssembler::ModU32(Register dst, Register src1,
+ const MemOperand& src2) {
+ Generate_ModU32(dl);
+}
+
+void MacroAssembler::ModU32(Register dst, Register src1, Register src2) {
+ Generate_ModU32(dlr);
+}
+
+void MacroAssembler::ModU32(Register dst, Register src1, const Operand& src2) {
+ USE(dst);
+ USE(src1);
+ USE(src2);
+ UNREACHABLE();
+}
+
+#undef Generate_ModU32
+
void MacroAssembler::MulP(Register dst, const Operand& opnd) {
#if V8_TARGET_ARCH_S390X
msgfi(dst, opnd);
@@ -3376,6 +3527,12 @@ void MacroAssembler::Add32(Register dst, const Operand& opnd) {
afi(dst, opnd);
}
+// Add 32-bit (Register dst = Register dst + Immediate opnd)
+void MacroAssembler::Add32_RI(Register dst, const Operand& opnd) {
+ // Just a wrapper for above
+ Add32(dst, opnd);
+}
+
// Add Pointer Size (Register dst = Register dst + Immediate opnd)
void MacroAssembler::AddP(Register dst, const Operand& opnd) {
#if V8_TARGET_ARCH_S390X
@@ -3400,6 +3557,13 @@ void MacroAssembler::Add32(Register dst, Register src, const Operand& opnd) {
Add32(dst, opnd);
}
+// Add 32-bit (Register dst = Register src + Immediate opnd)
+void MacroAssembler::Add32_RRI(Register dst, Register src,
+ const Operand& opnd) {
+ // Just a wrapper for above
+ Add32(dst, src, opnd);
+}
+
// Add Pointer Size (Register dst = Register src + Immediate opnd)
void MacroAssembler::AddP(Register dst, Register src, const Operand& opnd) {
if (!dst.is(src)) {
@@ -4148,12 +4312,24 @@ void MacroAssembler::Load(Register dst, const Operand& opnd) {
#else
lhi(dst, opnd);
#endif
- } else {
+ } else if (is_int32(value)) {
#if V8_TARGET_ARCH_S390X
lgfi(dst, opnd);
#else
iilf(dst, opnd);
#endif
+ } else if (is_uint32(value)) {
+#if V8_TARGET_ARCH_S390X
+ llilf(dst, opnd);
+#else
+ iilf(dst, opnd);
+#endif
+ } else {
+ int32_t hi_32 = static_cast<int64_t>(value) >> 32;
+ int32_t lo_32 = static_cast<int32_t>(value);
+
+ iihf(dst, Operand(hi_32));
+ iilf(dst, Operand(lo_32));
}
}
@@ -4697,6 +4873,14 @@ void MacroAssembler::LoadlB(Register dst, const MemOperand& mem) {
#endif
}
+void MacroAssembler::LoadlB(Register dst, Register src) {
+#if V8_TARGET_ARCH_S390X
+ llgcr(dst, src);
+#else
+ llcr(dst, src);
+#endif
+}
+
void MacroAssembler::LoadLogicalReversedWordP(Register dst,
const MemOperand& mem) {
lrv(dst, mem);
@@ -5052,7 +5236,7 @@ void MacroAssembler::Popcnt32(Register dst, Register src) {
ar(dst, r0);
ShiftRight(r0, dst, Operand(8));
ar(dst, r0);
- LoadB(dst, dst);
+ LoadlB(dst, dst);
}
#ifdef V8_TARGET_ARCH_S390X
@@ -5067,7 +5251,7 @@ void MacroAssembler::Popcnt64(Register dst, Register src) {
AddP(dst, r0);
ShiftRightP(r0, dst, Operand(8));
AddP(dst, r0);
- LoadB(dst, dst);
+ LoadlB(dst, dst);
}
#endif
diff --git a/deps/v8/src/s390/macro-assembler-s390.h b/deps/v8/src/s390/macro-assembler-s390.h
index c4d4f9327a..a3b57e9d4f 100644
--- a/deps/v8/src/s390/macro-assembler-s390.h
+++ b/deps/v8/src/s390/macro-assembler-s390.h
@@ -245,8 +245,10 @@ class MacroAssembler : public Assembler {
// Add (Register - Immediate)
void Add32(Register dst, const Operand& imm);
+ void Add32_RI(Register dst, const Operand& imm);
void AddP(Register dst, const Operand& imm);
void Add32(Register dst, Register src, const Operand& imm);
+ void Add32_RRI(Register dst, Register src, const Operand& imm);
void AddP(Register dst, Register src, const Operand& imm);
// Add (Register - Register)
@@ -282,8 +284,12 @@ class MacroAssembler : public Assembler {
// Subtract (Register - Immediate)
void Sub32(Register dst, const Operand& imm);
+ void Sub32_RI(Register dst, const Operand& imm) { Sub32(dst, imm); }
void SubP(Register dst, const Operand& imm);
void Sub32(Register dst, Register src, const Operand& imm);
+ void Sub32_RRI(Register dst, Register src, const Operand& imm) {
+ Sub32(dst, src, imm);
+ }
void SubP(Register dst, Register src, const Operand& imm);
// Subtract (Register - Register)
@@ -316,6 +322,17 @@ class MacroAssembler : public Assembler {
void Mul32(Register dst, const MemOperand& src1);
void Mul32(Register dst, Register src1);
void Mul32(Register dst, const Operand& src1);
+ void MulHigh32(Register dst, Register src1, const MemOperand& src2);
+ void MulHigh32(Register dst, Register src1, Register src2);
+ void MulHigh32(Register dst, Register src1, const Operand& src2);
+ void MulHighU32(Register dst, Register src1, const MemOperand& src2);
+ void MulHighU32(Register dst, Register src1, Register src2);
+ void MulHighU32(Register dst, Register src1, const Operand& src2);
+ void Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
+ const MemOperand& src2);
+ void Mul32WithOverflowIfCCUnequal(Register dst, Register src1, Register src2);
+ void Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
+ const Operand& src2);
void Mul64(Register dst, const MemOperand& src1);
void Mul64(Register dst, Register src1);
void Mul64(Register dst, const Operand& src1);
@@ -323,6 +340,20 @@ class MacroAssembler : public Assembler {
// Divide
void DivP(Register dividend, Register divider);
+ void Div32(Register dst, Register src1, const MemOperand& src2);
+ void Div32(Register dst, Register src1, Register src2);
+ void Div32(Register dst, Register src1, const Operand& src2);
+ void DivU32(Register dst, Register src1, const MemOperand& src2);
+ void DivU32(Register dst, Register src1, Register src2);
+ void DivU32(Register dst, Register src1, const Operand& src2);
+
+ // Mod
+ void Mod32(Register dst, Register src1, const MemOperand& src2);
+ void Mod32(Register dst, Register src1, Register src2);
+ void Mod32(Register dst, Register src1, const Operand& src2);
+ void ModU32(Register dst, Register src1, const MemOperand& src2);
+ void ModU32(Register dst, Register src1, Register src2);
+ void ModU32(Register dst, Register src1, const Operand& src2);
// Square root
void Sqrt(DoubleRegister result, DoubleRegister input);
@@ -359,6 +390,7 @@ class MacroAssembler : public Assembler {
void LoadB(Register dst, const MemOperand& opnd);
void LoadB(Register dst, Register src);
void LoadlB(Register dst, const MemOperand& opnd);
+ void LoadlB(Register dst, Register src);
void LoadLogicalReversedWordP(Register dst, const MemOperand& opnd);
void LoadLogicalReversedHalfWordP(Register dst, const MemOperand& opnd);
@@ -915,12 +947,9 @@ class MacroAssembler : public Assembler {
void IsObjectNameType(Register object, Register scratch, Label* fail);
- // ---------------------------------------------------------------------------
- // Debugger Support
-
- void DebugBreak();
+ // Frame restart support
+ void MaybeDropFrames();
- // ---------------------------------------------------------------------------
// Exception handling
// Push a new stack handler and link into stack handler chain.
@@ -1029,14 +1058,6 @@ class MacroAssembler : public Assembler {
void GetMapConstructor(Register result, Register map, Register temp,
Register temp2);
- // Try to get function prototype of a function and puts the value in
- // the result register. Checks that the function really is a
- // function and jumps to the miss label if the fast checks fail. The
- // function register will be untouched; the other registers may be
- // clobbered.
- void TryGetFunctionPrototype(Register function, Register result,
- Register scratch, Label* miss);
-
// Compare object type for heap object. heap_object contains a non-Smi
// whose object type should be compared with the given type. This both
// sets the flags and leaves the object type in the type_reg register.
@@ -1755,10 +1776,6 @@ class MacroAssembler : public Assembler {
bool* definitely_mismatches, InvokeFlag flag,
const CallWrapper& call_wrapper);
- void InitializeNewString(Register string, Register length,
- Heap::RootListIndex map_index, Register scratch1,
- Register scratch2);
-
// Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
void InNewSpace(Register object, Register scratch,
Condition cond, // eq for new space, ne otherwise.
diff --git a/deps/v8/src/s390/simulator-s390.cc b/deps/v8/src/s390/simulator-s390.cc
index 311e59d5e6..c5d3a1c3bb 100644
--- a/deps/v8/src/s390/simulator-s390.cc
+++ b/deps/v8/src/s390/simulator-s390.cc
@@ -1850,6 +1850,11 @@ double Simulator::ReadDouble(intptr_t addr) {
return *ptr;
}
+float Simulator::ReadFloat(intptr_t addr) {
+ float* ptr = reinterpret_cast<float*>(addr);
+ return *ptr;
+}
+
// Returns the limit of the stack area to enable checking for stack overflows.
uintptr_t Simulator::StackLimit(uintptr_t c_limit) const {
// The simulator uses a separate JS stack. If we have exhausted the C stack,
@@ -6608,15 +6613,16 @@ EVALUATE(LCR) {
DCHECK_OPCODE(LCR);
DECODE_RR_INSTRUCTION(r1, r2);
int32_t r2_val = get_low_register<int32_t>(r2);
- r2_val = ~r2_val;
- r2_val = r2_val + 1;
- set_low_register(r1, r2_val);
+ int32_t result = 0;
+ bool isOF = false;
+ isOF = __builtin_ssub_overflow(0, r2_val, &result);
+ set_low_register(r1, result);
SetS390ConditionCode<int32_t>(r2_val, 0);
// Checks for overflow where r2_val = -2147483648.
// Cannot do int comparison due to GCC 4.8 bug on x86.
// Detect INT_MIN alternatively, as it is the only value where both
// original and result are negative due to overflow.
- if (r2_val == (static_cast<int32_t>(1) << 31)) {
+ if (isOF) {
SetS390OverflowCode(true);
}
return length;
@@ -9967,12 +9973,16 @@ EVALUATE(LCGR) {
DCHECK_OPCODE(LCGR);
DECODE_RRE_INSTRUCTION(r1, r2);
int64_t r2_val = get_register(r2);
- r2_val = ~r2_val;
- r2_val = r2_val + 1;
- set_register(r1, r2_val);
- SetS390ConditionCode<int64_t>(r2_val, 0);
- // if the input is INT_MIN, loading its compliment would be overflowing
- if (r2_val == (static_cast<int64_t>(1) << 63)) {
+ int64_t result = 0;
+ bool isOF = false;
+#ifdef V8_TARGET_ARCH_S390X
+ isOF = __builtin_ssubl_overflow(0L, r2_val, &result);
+#else
+ isOF = __builtin_ssubll_overflow(0L, r2_val, &result);
+#endif
+ set_register(r1, result);
+ SetS390ConditionCode<int64_t>(result, 0);
+ if (isOF) {
SetS390OverflowCode(true);
}
return length;
@@ -10148,15 +10158,26 @@ EVALUATE(SLGFR) {
}
EVALUATE(MSGFR) {
- UNIMPLEMENTED();
- USE(instr);
- return 0;
+ DCHECK_OPCODE(MSGFR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ int64_t r1_val = get_register(r1);
+ int64_t r2_val = static_cast<int64_t>(get_low_register<int32_t>(r2));
+ int64_t product = r1_val * r2_val;
+ set_register(r1, product);
+ return length;
}
EVALUATE(DSGFR) {
- UNIMPLEMENTED();
- USE(instr);
- return 0;
+ DCHECK_OPCODE(DSGFR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ DCHECK(r1 % 2 == 0);
+ int64_t r1_val = get_register(r1 + 1);
+ int64_t r2_val = static_cast<int64_t>(get_low_register<int32_t>(r2));
+ int64_t quotient = r1_val / r2_val;
+ int64_t remainder = r1_val % r2_val;
+ set_register(r1, remainder);
+ set_register(r1 + 1, quotient);
+ return length;
}
EVALUATE(KMAC) {
@@ -10232,9 +10253,13 @@ EVALUATE(KMC) {
}
EVALUATE(CGFR) {
- UNIMPLEMENTED();
- USE(instr);
- return 0;
+ DCHECK_OPCODE(CGFR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ // Compare (64)
+ int64_t r1_val = get_register(r1);
+ int64_t r2_val = static_cast<int64_t>(get_low_register<int32_t>(r2));
+ SetS390ConditionCode<int64_t>(r1_val, r2_val);
+ return length;
}
EVALUATE(KIMD) {
@@ -10383,9 +10408,13 @@ EVALUATE(FLOGR) {
}
EVALUATE(LLGCR) {
- UNIMPLEMENTED();
- USE(instr);
- return 0;
+ DCHECK_OPCODE(LLGCR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ uint64_t r2_val = get_low_register<uint64_t>(r2);
+ r2_val <<= 56;
+ r2_val >>= 56;
+ set_register(r1, r2_val);
+ return length;
}
EVALUATE(LLGHR) {
@@ -10463,9 +10492,13 @@ EVALUATE(TROO) {
}
EVALUATE(LLCR) {
- UNIMPLEMENTED();
- USE(instr);
- return 0;
+ DCHECK_OPCODE(LLCR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ uint32_t r2_val = get_low_register<uint32_t>(r2);
+ r2_val <<= 24;
+ r2_val >>= 24;
+ set_low_register(r1, r2_val);
+ return length;
}
EVALUATE(LLHR) {
@@ -11062,15 +11095,34 @@ EVALUATE(SLGF) {
}
EVALUATE(MSGF) {
- UNIMPLEMENTED();
- USE(instr);
- return 0;
+ DCHECK_OPCODE(MSGF);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t d2_val = d2;
+ int64_t mem_val =
+ static_cast<int64_t>(ReadW(b2_val + d2_val + x2_val, instr));
+ int64_t r1_val = get_register(r1);
+ int64_t product = r1_val * mem_val;
+ set_register(r1, product);
+ return length;
}
EVALUATE(DSGF) {
- UNIMPLEMENTED();
- USE(instr);
- return 0;
+ DCHECK_OPCODE(DSGF);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ DCHECK(r1 % 2 == 0);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t d2_val = d2;
+ int64_t mem_val =
+ static_cast<int64_t>(ReadW(b2_val + d2_val + x2_val, instr));
+ int64_t r1_val = get_register(r1 + 1);
+ int64_t quotient = r1_val / mem_val;
+ int64_t remainder = r1_val % mem_val;
+ set_register(r1, remainder);
+ set_register(r1 + 1, quotient);
+ return length;
}
EVALUATE(LRVG) {
@@ -11629,9 +11681,20 @@ EVALUATE(ML) {
}
EVALUATE(DL) {
- UNIMPLEMENTED();
- USE(instr);
- return 0;
+ DCHECK_OPCODE(DL);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ DCHECK(r1 % 2 == 0);
+ uint32_t mem_val = ReadWU(b2_val + x2_val + d2, instr);
+ uint32_t r1_val = get_low_register<uint32_t>(r1 + 1);
+ uint64_t quotient =
+ static_cast<uint64_t>(r1_val) / static_cast<uint64_t>(mem_val);
+ uint64_t remainder =
+ static_cast<uint64_t>(r1_val) % static_cast<uint64_t>(mem_val);
+ set_low_register(r1, remainder);
+ set_low_register(r1 + 1, quotient);
+ return length;
}
EVALUATE(ALC) {
@@ -12489,9 +12552,16 @@ EVALUATE(KEB) {
}
EVALUATE(CEB) {
- UNIMPLEMENTED();
- USE(instr);
- return 0;
+ DCHECK_OPCODE(CEB);
+
+ DECODE_RXE_INSTRUCTION(r1, b2, x2, d2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t d2_val = d2;
+ float r1_val = get_float32_from_d_register(r1);
+ float fval = ReadFloat(b2_val + x2_val + d2_val);
+ SetS390ConditionCode<float>(r1_val, fval);
+ return length;
}
EVALUATE(AEB) {
diff --git a/deps/v8/src/s390/simulator-s390.h b/deps/v8/src/s390/simulator-s390.h
index c66b05e92d..98532ffc54 100644
--- a/deps/v8/src/s390/simulator-s390.h
+++ b/deps/v8/src/s390/simulator-s390.h
@@ -304,6 +304,7 @@ class Simulator {
inline int64_t ReadDW(intptr_t addr);
inline double ReadDouble(intptr_t addr);
+ inline float ReadFloat(intptr_t addr);
inline void WriteDW(intptr_t addr, int64_t value);
// S390
diff --git a/deps/v8/src/signature.h b/deps/v8/src/signature.h
index 32050fe4b0..519138bec3 100644
--- a/deps/v8/src/signature.h
+++ b/deps/v8/src/signature.h
@@ -32,7 +32,7 @@ class Signature : public ZoneObject {
return reps_[index];
}
- bool Equals(Signature* that) {
+ bool Equals(const Signature* that) const {
if (this == that) return true;
if (this->parameter_count() != that->parameter_count()) return false;
if (this->return_count() != that->return_count()) return false;
diff --git a/deps/v8/src/snapshot/code-serializer.cc b/deps/v8/src/snapshot/code-serializer.cc
index 1776cf1e4f..7f57f0aa64 100644
--- a/deps/v8/src/snapshot/code-serializer.cc
+++ b/deps/v8/src/snapshot/code-serializer.cc
@@ -7,8 +7,10 @@
#include <memory>
#include "src/code-stubs.h"
+#include "src/counters.h"
#include "src/log.h"
#include "src/macro-assembler.h"
+#include "src/objects-inl.h"
#include "src/snapshot/deserializer.h"
#include "src/snapshot/snapshot.h"
#include "src/version.h"
@@ -229,14 +231,20 @@ MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
return scope.CloseAndEscape(result);
}
+WasmCompiledModuleSerializer::WasmCompiledModuleSerializer(
+ Isolate* isolate, uint32_t source_hash, Handle<Context> native_context,
+ Handle<SeqOneByteString> module_bytes)
+ : CodeSerializer(isolate, source_hash) {
+ reference_map()->AddAttachedReference(*isolate->native_context());
+ reference_map()->AddAttachedReference(*module_bytes);
+}
+
std::unique_ptr<ScriptData> WasmCompiledModuleSerializer::SerializeWasmModule(
Isolate* isolate, Handle<FixedArray> input) {
Handle<WasmCompiledModule> compiled_module =
Handle<WasmCompiledModule>::cast(input);
- WasmCompiledModuleSerializer wasm_cs(isolate, 0);
- wasm_cs.reference_map()->AddAttachedReference(*isolate->native_context());
- wasm_cs.reference_map()->AddAttachedReference(
- compiled_module->module_bytes());
+ WasmCompiledModuleSerializer wasm_cs(isolate, 0, isolate->native_context(),
+ handle(compiled_module->module_bytes()));
ScriptData* data = wasm_cs.Serialize(compiled_module);
return std::unique_ptr<ScriptData>(data);
}
@@ -281,11 +289,35 @@ MaybeHandle<FixedArray> WasmCompiledModuleSerializer::DeserializeWasmModule(
Handle<WasmCompiledModule> compiled_module(
static_cast<WasmCompiledModule*>(*obj.ToHandleChecked()), isolate);
- WasmCompiledModule::RecreateModuleWrapper(isolate, compiled_module);
+ WasmCompiledModule::ReinitializeAfterDeserialization(isolate,
+ compiled_module);
DCHECK(WasmCompiledModule::IsWasmCompiledModule(*compiled_module));
return compiled_module;
}
+void WasmCompiledModuleSerializer::SerializeCodeObject(
+ Code* code_object, HowToCode how_to_code, WhereToPoint where_to_point) {
+ Code::Kind kind = code_object->kind();
+ switch (kind) {
+ case Code::WASM_FUNCTION:
+ case Code::JS_TO_WASM_FUNCTION:
+ // Just serialize the code_object.
+ break;
+ case Code::WASM_TO_JS_FUNCTION:
+ // Serialize the illegal builtin instead. On instantiation of a
+ // deserialized module, these will be replaced again.
+ code_object = *isolate()->builtins()->Illegal();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ SerializeGeneric(code_object, how_to_code, where_to_point);
+}
+
+bool WasmCompiledModuleSerializer::ElideObject(Object* obj) {
+ return obj->IsWeakCell() || obj->IsForeign() || obj->IsBreakPointInfo();
+}
+
class Checksum {
public:
explicit Checksum(Vector<const byte> payload) {
diff --git a/deps/v8/src/snapshot/code-serializer.h b/deps/v8/src/snapshot/code-serializer.h
index 15757379f0..4d87a731f7 100644
--- a/deps/v8/src/snapshot/code-serializer.h
+++ b/deps/v8/src/snapshot/code-serializer.h
@@ -64,23 +64,13 @@ class WasmCompiledModuleSerializer : public CodeSerializer {
protected:
void SerializeCodeObject(Code* code_object, HowToCode how_to_code,
- WhereToPoint where_to_point) override {
- Code::Kind kind = code_object->kind();
- if (kind == Code::WASM_FUNCTION || kind == Code::WASM_TO_JS_FUNCTION ||
- kind == Code::JS_TO_WASM_FUNCTION) {
- SerializeGeneric(code_object, how_to_code, where_to_point);
- } else {
- UNREACHABLE();
- }
- }
-
- bool ElideObject(Object* obj) override {
- return obj->IsWeakCell() || obj->IsForeign();
- };
+ WhereToPoint where_to_point) override;
+ bool ElideObject(Object* obj) override;
private:
- WasmCompiledModuleSerializer(Isolate* isolate, uint32_t source_hash)
- : CodeSerializer(isolate, source_hash) {}
+ WasmCompiledModuleSerializer(Isolate* isolate, uint32_t source_hash,
+ Handle<Context> native_context,
+ Handle<SeqOneByteString> module_bytes);
DISALLOW_COPY_AND_ASSIGN(WasmCompiledModuleSerializer);
};
diff --git a/deps/v8/src/snapshot/deserializer.cc b/deps/v8/src/snapshot/deserializer.cc
index 87e430baf5..86d20e14c0 100644
--- a/deps/v8/src/snapshot/deserializer.cc
+++ b/deps/v8/src/snapshot/deserializer.cc
@@ -4,13 +4,17 @@
#include "src/snapshot/deserializer.h"
+#include "src/api.h"
+#include "src/assembler-inl.h"
#include "src/bootstrapper.h"
#include "src/external-reference-table.h"
-#include "src/heap/heap.h"
+#include "src/heap/heap-inl.h"
#include "src/isolate.h"
#include "src/macro-assembler.h"
+#include "src/objects-inl.h"
#include "src/snapshot/natives.h"
#include "src/v8.h"
+#include "src/v8threads.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/snapshot/deserializer.h b/deps/v8/src/snapshot/deserializer.h
index 7b1ced8159..0348956eb6 100644
--- a/deps/v8/src/snapshot/deserializer.h
+++ b/deps/v8/src/snapshot/deserializer.h
@@ -84,7 +84,7 @@ class Deserializer : public SerializerDeserializer {
DCHECK_EQ(kWordAligned, next_alignment_);
int alignment = data - (kAlignmentPrefix - 1);
DCHECK_LE(kWordAligned, alignment);
- DCHECK_LE(alignment, kSimd128Unaligned);
+ DCHECK_LE(alignment, kDoubleUnaligned);
next_alignment_ = static_cast<AllocationAlignment>(alignment);
}
diff --git a/deps/v8/src/snapshot/partial-serializer.cc b/deps/v8/src/snapshot/partial-serializer.cc
index b78a1edbd0..7f30c9cd04 100644
--- a/deps/v8/src/snapshot/partial-serializer.cc
+++ b/deps/v8/src/snapshot/partial-serializer.cc
@@ -93,10 +93,6 @@ void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
// Clear literal boilerplates.
if (obj->IsJSFunction()) {
JSFunction* function = JSFunction::cast(obj);
- LiteralsArray* literals = function->literals();
- for (int i = 0; i < literals->literals_count(); i++) {
- literals->set_literal_undefined(i);
- }
function->ClearTypeFeedbackInfo();
}
diff --git a/deps/v8/src/snapshot/serializer-common.cc b/deps/v8/src/snapshot/serializer-common.cc
index ca4db75239..89aabdf263 100644
--- a/deps/v8/src/snapshot/serializer-common.cc
+++ b/deps/v8/src/snapshot/serializer-common.cc
@@ -7,6 +7,7 @@
#include "src/external-reference-table.h"
#include "src/ic/stub-cache.h"
#include "src/list-inl.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -21,6 +22,8 @@ ExternalReferenceEncoder::ExternalReferenceEncoder(Isolate* isolate) {
ExternalReferenceTable* table = ExternalReferenceTable::instance(isolate);
for (uint32_t i = 0; i < table->size(); ++i) {
Address addr = table->address(i);
+ // Ignore duplicate API references.
+ if (table->is_api_reference(i) && !map_->Get(addr).IsNothing()) continue;
DCHECK(map_->Get(addr).IsNothing());
map_->Set(addr, i);
DCHECK(map_->Get(addr).IsJust());
diff --git a/deps/v8/src/snapshot/serializer.cc b/deps/v8/src/snapshot/serializer.cc
index 2e971e3407..d99ca2ab30 100644
--- a/deps/v8/src/snapshot/serializer.cc
+++ b/deps/v8/src/snapshot/serializer.cc
@@ -4,6 +4,8 @@
#include "src/snapshot/serializer.h"
+#include "src/assembler-inl.h"
+#include "src/heap/heap-inl.h"
#include "src/macro-assembler.h"
#include "src/snapshot/natives.h"
diff --git a/deps/v8/src/snapshot/snapshot-common.cc b/deps/v8/src/snapshot/snapshot-common.cc
index 83ad2e7d39..1658b3bdcf 100644
--- a/deps/v8/src/snapshot/snapshot-common.cc
+++ b/deps/v8/src/snapshot/snapshot-common.cc
@@ -9,6 +9,7 @@
#include "src/api.h"
#include "src/base/platform/platform.h"
#include "src/full-codegen/full-codegen.h"
+#include "src/objects-inl.h"
#include "src/snapshot/deserializer.h"
#include "src/snapshot/snapshot-source-sink.h"
#include "src/version.h"
diff --git a/deps/v8/src/source-position-table.h b/deps/v8/src/source-position-table.h
index f569ac9819..756838d1e5 100644
--- a/deps/v8/src/source-position-table.h
+++ b/deps/v8/src/source-position-table.h
@@ -8,7 +8,6 @@
#include "src/assert-scope.h"
#include "src/checks.h"
#include "src/globals.h"
-#include "src/handles.h"
#include "src/source-position.h"
#include "src/zone/zone-containers.h"
@@ -18,6 +17,8 @@ namespace internal {
class AbstractCode;
class BytecodeArray;
class ByteArray;
+template <typename T>
+class Handle;
class Isolate;
class Zone;
diff --git a/deps/v8/src/source-position.cc b/deps/v8/src/source-position.cc
index ff204be73d..02bb339357 100644
--- a/deps/v8/src/source-position.cc
+++ b/deps/v8/src/source-position.cc
@@ -11,10 +11,16 @@ namespace internal {
std::ostream& operator<<(std::ostream& out, const SourcePositionInfo& pos) {
Handle<SharedFunctionInfo> function(pos.function);
- Handle<Script> script(Script::cast(function->script()));
+ String* name = nullptr;
+ if (function->script()->IsScript()) {
+ Script* script = Script::cast(function->script());
+ if (script->name()->IsString()) {
+ name = String::cast(script->name());
+ }
+ }
out << "<";
- if (script->name()->IsString()) {
- out << String::cast(script->name())->ToCString(DISALLOW_NULLS).get();
+ if (name != nullptr) {
+ out << name->ToCString(DISALLOW_NULLS).get();
} else {
out << "unknown";
}
@@ -78,12 +84,15 @@ std::vector<SourcePositionInfo> SourcePosition::InliningStack(
void SourcePosition::Print(std::ostream& out,
SharedFunctionInfo* function) const {
- Script* script = Script::cast(function->script());
- Object* source_name = script->name();
Script::PositionInfo pos;
- script->GetPositionInfo(ScriptOffset(), &pos, Script::WITH_OFFSET);
+ Object* source_name = nullptr;
+ if (function->script()->IsScript()) {
+ Script* script = Script::cast(function->script());
+ source_name = script->name();
+ script->GetPositionInfo(ScriptOffset(), &pos, Script::WITH_OFFSET);
+ }
out << "<";
- if (source_name->IsString()) {
+ if (source_name != nullptr && source_name->IsString()) {
out << String::cast(source_name)
->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL)
.get();
@@ -117,12 +126,14 @@ void SourcePosition::Print(std::ostream& out, Code* code) const {
SourcePositionInfo::SourcePositionInfo(SourcePosition pos,
Handle<SharedFunctionInfo> f)
: position(pos), function(f) {
- Handle<Script> script(Script::cast(function->script()));
- Script::PositionInfo info;
- if (Script::GetPositionInfo(script, pos.ScriptOffset(), &info,
- Script::WITH_OFFSET)) {
- line = info.line;
- column = info.column;
+ if (function->script()->IsScript()) {
+ Handle<Script> script(Script::cast(function->script()));
+ Script::PositionInfo info;
+ if (Script::GetPositionInfo(script, pos.ScriptOffset(), &info,
+ Script::WITH_OFFSET)) {
+ line = info.line;
+ column = info.column;
+ }
}
}
diff --git a/deps/v8/src/string-builder.h b/deps/v8/src/string-builder.h
index edc6476a45..c8c1329157 100644
--- a/deps/v8/src/string-builder.h
+++ b/deps/v8/src/string-builder.h
@@ -310,6 +310,8 @@ class IncrementalStringBuilder {
INLINE(bool HasOverflowed()) const { return overflowed_; }
+ INLINE(int Length()) const { return accumulator_->length() + current_index_; }
+
// Change encoding to two-byte.
void ChangeEncoding() {
DCHECK_EQ(String::ONE_BYTE_ENCODING, encoding_);
diff --git a/deps/v8/src/third_party/vtune/BUILD.gn b/deps/v8/src/third_party/vtune/BUILD.gn
new file mode 100644
index 0000000000..33e8443b98
--- /dev/null
+++ b/deps/v8/src/third_party/vtune/BUILD.gn
@@ -0,0 +1,20 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/toolchain/toolchain.gni")
+
+static_library("v8_vtune") {
+ sources = [
+ "ittnotify_config.h",
+ "ittnotify_types.h",
+ "jitprofiling.cc",
+ "jitprofiling.h",
+ "v8-vtune.h",
+ "vtune-jit.cc",
+ "vtune-jit.h",
+ ]
+ deps = [
+ "//:v8",
+ ]
+}
diff --git a/deps/v8/src/trap-handler/trap-handler.h b/deps/v8/src/trap-handler/trap-handler.h
index 7c78b1f232..e6dd9bdca4 100644
--- a/deps/v8/src/trap-handler/trap-handler.h
+++ b/deps/v8/src/trap-handler/trap-handler.h
@@ -11,12 +11,12 @@ namespace trap_handler {
struct ProtectedInstructionData {
// The offset of this instruction from the start of its code object.
- int32_t instr_offset;
+ intptr_t instr_offset;
// The offset of the landing pad from the start of its code object.
//
// TODO(eholk): Using a single landing pad and store parameters here.
- int32_t landing_offset;
+ intptr_t landing_offset;
};
} // namespace trap_handler
diff --git a/deps/v8/src/type-hints.cc b/deps/v8/src/type-hints.cc
index 4267ab8906..29a15c6e3c 100644
--- a/deps/v8/src/type-hints.cc
+++ b/deps/v8/src/type-hints.cc
@@ -40,6 +40,8 @@ std::ostream& operator<<(std::ostream& os, CompareOperationHint hint) {
return os << "InternalizedString";
case CompareOperationHint::kString:
return os << "String";
+ case CompareOperationHint::kReceiver:
+ return os << "Receiver";
case CompareOperationHint::kAny:
return os << "Any";
}
@@ -67,8 +69,6 @@ std::ostream& operator<<(std::ostream& os, ToBooleanHint hint) {
return os << "Symbol";
case ToBooleanHint::kHeapNumber:
return os << "HeapNumber";
- case ToBooleanHint::kSimdValue:
- return os << "SimdValue";
case ToBooleanHint::kAny:
return os << "Any";
case ToBooleanHint::kNeedsMap:
@@ -98,8 +98,6 @@ std::string ToString(ToBooleanHint hint) {
return "Symbol";
case ToBooleanHint::kHeapNumber:
return "HeapNumber";
- case ToBooleanHint::kSimdValue:
- return "SimdValue";
case ToBooleanHint::kAny:
return "Any";
case ToBooleanHint::kNeedsMap:
diff --git a/deps/v8/src/type-hints.h b/deps/v8/src/type-hints.h
index 0364154593..c7c6cccae0 100644
--- a/deps/v8/src/type-hints.h
+++ b/deps/v8/src/type-hints.h
@@ -35,6 +35,7 @@ enum class CompareOperationHint : uint8_t {
kNumberOrOddball,
kInternalizedString,
kString,
+ kReceiver,
kAny
};
@@ -55,10 +56,9 @@ enum class ToBooleanHint : uint16_t {
kString = 1u << 5,
kSymbol = 1u << 6,
kHeapNumber = 1u << 7,
- kSimdValue = 1u << 8,
kAny = kUndefined | kBoolean | kNull | kSmallInteger | kReceiver | kString |
- kSymbol | kHeapNumber | kSimdValue,
- kNeedsMap = kReceiver | kString | kSymbol | kHeapNumber | kSimdValue,
+ kSymbol | kHeapNumber,
+ kNeedsMap = kReceiver | kString | kSymbol | kHeapNumber,
kCanBeUndetectable = kReceiver,
};
diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc
index 8cd0897809..d7de1b8618 100644
--- a/deps/v8/src/type-info.cc
+++ b/deps/v8/src/type-info.cc
@@ -4,6 +4,7 @@
#include "src/type-info.h"
+#include "src/assembler-inl.h"
#include "src/ast/ast.h"
#include "src/code-stubs.h"
#include "src/ic/ic.h"
@@ -47,8 +48,7 @@ Handle<Object> TypeFeedbackOracle::GetInfo(TypeFeedbackId ast_id) {
return Handle<Object>::cast(isolate()->factory()->undefined_value());
}
-
-Handle<Object> TypeFeedbackOracle::GetInfo(FeedbackVectorSlot slot) {
+Handle<Object> TypeFeedbackOracle::GetInfo(FeedbackSlot slot) {
DCHECK(slot.ToInt() >= 0 && slot.ToInt() < feedback_vector_->length());
Handle<Object> undefined =
Handle<Object>::cast(isolate()->factory()->undefined_value());
@@ -62,23 +62,20 @@ Handle<Object> TypeFeedbackOracle::GetInfo(FeedbackVectorSlot slot) {
obj = cell->value();
}
- if (obj->IsJSFunction() || obj->IsAllocationSite() || obj->IsSymbol() ||
- obj->IsSimd128Value()) {
+ if (obj->IsJSFunction() || obj->IsAllocationSite() || obj->IsSymbol()) {
return Handle<Object>(obj, isolate());
}
return undefined;
}
-
-InlineCacheState TypeFeedbackOracle::LoadInlineCacheState(
- FeedbackVectorSlot slot) {
+InlineCacheState TypeFeedbackOracle::LoadInlineCacheState(FeedbackSlot slot) {
if (!slot.IsInvalid()) {
- FeedbackVectorSlotKind kind = feedback_vector_->GetKind(slot);
- if (kind == FeedbackVectorSlotKind::LOAD_IC) {
+ FeedbackSlotKind kind = feedback_vector_->GetKind(slot);
+ if (IsLoadICKind(kind)) {
LoadICNexus nexus(feedback_vector_, slot);
return nexus.StateFromFeedback();
- } else if (kind == FeedbackVectorSlotKind::KEYED_LOAD_IC) {
+ } else if (IsKeyedLoadICKind(kind)) {
KeyedLoadICNexus nexus(feedback_vector_, slot);
return nexus.StateFromFeedback();
}
@@ -89,14 +86,13 @@ InlineCacheState TypeFeedbackOracle::LoadInlineCacheState(
return PREMONOMORPHIC;
}
-
-bool TypeFeedbackOracle::StoreIsUninitialized(FeedbackVectorSlot slot) {
+bool TypeFeedbackOracle::StoreIsUninitialized(FeedbackSlot slot) {
if (!slot.IsInvalid()) {
- FeedbackVectorSlotKind kind = feedback_vector_->GetKind(slot);
- if (kind == FeedbackVectorSlotKind::STORE_IC) {
+ FeedbackSlotKind kind = feedback_vector_->GetKind(slot);
+ if (IsStoreICKind(kind)) {
StoreICNexus nexus(feedback_vector_, slot);
return nexus.StateFromFeedback() == UNINITIALIZED;
- } else if (kind == FeedbackVectorSlotKind::KEYED_STORE_IC) {
+ } else if (IsKeyedStoreICKind(kind)) {
KeyedStoreICNexus nexus(feedback_vector_, slot);
return nexus.StateFromFeedback() == UNINITIALIZED;
}
@@ -104,41 +100,34 @@ bool TypeFeedbackOracle::StoreIsUninitialized(FeedbackVectorSlot slot) {
return true;
}
-
-bool TypeFeedbackOracle::CallIsUninitialized(FeedbackVectorSlot slot) {
+bool TypeFeedbackOracle::CallIsUninitialized(FeedbackSlot slot) {
Handle<Object> value = GetInfo(slot);
return value->IsUndefined(isolate()) ||
value.is_identical_to(
FeedbackVector::UninitializedSentinel(isolate()));
}
-
-bool TypeFeedbackOracle::CallIsMonomorphic(FeedbackVectorSlot slot) {
+bool TypeFeedbackOracle::CallIsMonomorphic(FeedbackSlot slot) {
Handle<Object> value = GetInfo(slot);
return value->IsAllocationSite() || value->IsJSFunction();
}
-
-bool TypeFeedbackOracle::CallNewIsMonomorphic(FeedbackVectorSlot slot) {
+bool TypeFeedbackOracle::CallNewIsMonomorphic(FeedbackSlot slot) {
Handle<Object> info = GetInfo(slot);
return info->IsAllocationSite() || info->IsJSFunction();
}
-
-byte TypeFeedbackOracle::ForInType(FeedbackVectorSlot feedback_vector_slot) {
+byte TypeFeedbackOracle::ForInType(FeedbackSlot feedback_vector_slot) {
Handle<Object> value = GetInfo(feedback_vector_slot);
return value.is_identical_to(FeedbackVector::UninitializedSentinel(isolate()))
? ForInStatement::FAST_FOR_IN
: ForInStatement::SLOW_FOR_IN;
}
-
void TypeFeedbackOracle::GetStoreModeAndKeyType(
- FeedbackVectorSlot slot, KeyedAccessStoreMode* store_mode,
+ FeedbackSlot slot, KeyedAccessStoreMode* store_mode,
IcCheckType* key_type) {
- if (!slot.IsInvalid() &&
- feedback_vector_->GetKind(slot) ==
- FeedbackVectorSlotKind::KEYED_STORE_IC) {
+ if (!slot.IsInvalid() && feedback_vector_->IsKeyedStoreIC(slot)) {
KeyedStoreICNexus nexus(feedback_vector_, slot);
*store_mode = nexus.GetKeyedAccessStoreMode();
*key_type = nexus.GetKeyType();
@@ -148,8 +137,7 @@ void TypeFeedbackOracle::GetStoreModeAndKeyType(
}
}
-
-Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(FeedbackVectorSlot slot) {
+Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(FeedbackSlot slot) {
Handle<Object> info = GetInfo(slot);
if (info->IsAllocationSite()) {
return Handle<JSFunction>(isolate()->native_context()->array_function());
@@ -158,9 +146,7 @@ Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(FeedbackVectorSlot slot) {
return Handle<JSFunction>::cast(info);
}
-
-Handle<JSFunction> TypeFeedbackOracle::GetCallNewTarget(
- FeedbackVectorSlot slot) {
+Handle<JSFunction> TypeFeedbackOracle::GetCallNewTarget(FeedbackSlot slot) {
Handle<Object> info = GetInfo(slot);
if (info->IsJSFunction()) {
return Handle<JSFunction>::cast(info);
@@ -170,9 +156,8 @@ Handle<JSFunction> TypeFeedbackOracle::GetCallNewTarget(
return Handle<JSFunction>(isolate()->native_context()->array_function());
}
-
Handle<AllocationSite> TypeFeedbackOracle::GetCallAllocationSite(
- FeedbackVectorSlot slot) {
+ FeedbackSlot slot) {
Handle<Object> info = GetInfo(slot);
if (info->IsAllocationSite()) {
return Handle<AllocationSite>::cast(info);
@@ -180,9 +165,8 @@ Handle<AllocationSite> TypeFeedbackOracle::GetCallAllocationSite(
return Handle<AllocationSite>::null();
}
-
Handle<AllocationSite> TypeFeedbackOracle::GetCallNewAllocationSite(
- FeedbackVectorSlot slot) {
+ FeedbackSlot slot) {
Handle<Object> info = GetInfo(slot);
if (info->IsAllocationSite()) {
return Handle<AllocationSite>::cast(info);
@@ -206,6 +190,8 @@ AstType* CompareOpHintToType(CompareOperationHint hint) {
return AstType::InternalizedString();
case CompareOperationHint::kString:
return AstType::String();
+ case CompareOperationHint::kReceiver:
+ return AstType::Receiver();
case CompareOperationHint::kAny:
return AstType::Any();
}
@@ -235,7 +221,7 @@ AstType* BinaryOpFeedbackToType(int hint) {
} // end anonymous namespace
-void TypeFeedbackOracle::CompareType(TypeFeedbackId id, FeedbackVectorSlot slot,
+void TypeFeedbackOracle::CompareType(TypeFeedbackId id, FeedbackSlot slot,
AstType** left_type, AstType** right_type,
AstType** combined_type) {
Handle<Object> info = GetInfo(id);
@@ -296,7 +282,7 @@ void TypeFeedbackOracle::CompareType(TypeFeedbackId id, FeedbackVectorSlot slot,
}
}
-void TypeFeedbackOracle::BinaryType(TypeFeedbackId id, FeedbackVectorSlot slot,
+void TypeFeedbackOracle::BinaryType(TypeFeedbackId id, FeedbackSlot slot,
AstType** left, AstType** right,
AstType** result,
Maybe<int>* fixed_right_arg,
@@ -367,8 +353,7 @@ void TypeFeedbackOracle::BinaryType(TypeFeedbackId id, FeedbackVectorSlot slot,
}
}
-AstType* TypeFeedbackOracle::CountType(TypeFeedbackId id,
- FeedbackVectorSlot slot) {
+AstType* TypeFeedbackOracle::CountType(TypeFeedbackId id, FeedbackSlot slot) {
Handle<Object> object = GetInfo(id);
if (slot.IsInvalid()) {
DCHECK(!object->IsCode());
@@ -397,8 +382,7 @@ bool TypeFeedbackOracle::HasOnlyStringMaps(SmallMapList* receiver_types) {
return all_strings;
}
-
-void TypeFeedbackOracle::PropertyReceiverTypes(FeedbackVectorSlot slot,
+void TypeFeedbackOracle::PropertyReceiverTypes(FeedbackSlot slot,
Handle<Name> name,
SmallMapList* receiver_types) {
receiver_types->Clear();
@@ -409,9 +393,8 @@ void TypeFeedbackOracle::PropertyReceiverTypes(FeedbackVectorSlot slot,
}
}
-
void TypeFeedbackOracle::KeyedPropertyReceiverTypes(
- FeedbackVectorSlot slot, SmallMapList* receiver_types, bool* is_string,
+ FeedbackSlot slot, SmallMapList* receiver_types, bool* is_string,
IcCheckType* key_type) {
receiver_types->Clear();
if (slot.IsInvalid()) {
@@ -425,8 +408,7 @@ void TypeFeedbackOracle::KeyedPropertyReceiverTypes(
}
}
-
-void TypeFeedbackOracle::AssignmentReceiverTypes(FeedbackVectorSlot slot,
+void TypeFeedbackOracle::AssignmentReceiverTypes(FeedbackSlot slot,
Handle<Name> name,
SmallMapList* receiver_types) {
receiver_types->Clear();
@@ -434,24 +416,22 @@ void TypeFeedbackOracle::AssignmentReceiverTypes(FeedbackVectorSlot slot,
receiver_types);
}
-
void TypeFeedbackOracle::KeyedAssignmentReceiverTypes(
- FeedbackVectorSlot slot, SmallMapList* receiver_types,
+ FeedbackSlot slot, SmallMapList* receiver_types,
KeyedAccessStoreMode* store_mode, IcCheckType* key_type) {
receiver_types->Clear();
CollectReceiverTypes(slot, receiver_types);
GetStoreModeAndKeyType(slot, store_mode, key_type);
}
-
-void TypeFeedbackOracle::CountReceiverTypes(FeedbackVectorSlot slot,
+void TypeFeedbackOracle::CountReceiverTypes(FeedbackSlot slot,
SmallMapList* receiver_types) {
receiver_types->Clear();
if (!slot.IsInvalid()) CollectReceiverTypes(slot, receiver_types);
}
void TypeFeedbackOracle::CollectReceiverTypes(StubCache* stub_cache,
- FeedbackVectorSlot slot,
+ FeedbackSlot slot,
Handle<Name> name,
SmallMapList* types) {
StoreICNexus nexus(feedback_vector_, slot);
@@ -471,15 +451,14 @@ void TypeFeedbackOracle::CollectReceiverTypes(StubCache* stub_cache,
}
}
-
-void TypeFeedbackOracle::CollectReceiverTypes(FeedbackVectorSlot slot,
+void TypeFeedbackOracle::CollectReceiverTypes(FeedbackSlot slot,
SmallMapList* types) {
- FeedbackVectorSlotKind kind = feedback_vector_->GetKind(slot);
- if (kind == FeedbackVectorSlotKind::STORE_IC) {
+ FeedbackSlotKind kind = feedback_vector_->GetKind(slot);
+ if (IsStoreICKind(kind) || IsStoreOwnICKind(kind)) {
StoreICNexus nexus(feedback_vector_, slot);
CollectReceiverTypes(&nexus, types);
} else {
- DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC, kind);
+ DCHECK(IsKeyedStoreICKind(kind));
KeyedStoreICNexus nexus(feedback_vector_, slot);
CollectReceiverTypes(&nexus, types);
}
diff --git a/deps/v8/src/type-info.h b/deps/v8/src/type-info.h
index 33751dc828..c8e35564ab 100644
--- a/deps/v8/src/type-info.h
+++ b/deps/v8/src/type-info.h
@@ -26,37 +26,36 @@ class TypeFeedbackOracle: public ZoneObject {
Handle<FeedbackVector> feedback_vector,
Handle<Context> native_context);
- InlineCacheState LoadInlineCacheState(FeedbackVectorSlot slot);
- bool StoreIsUninitialized(FeedbackVectorSlot slot);
- bool CallIsUninitialized(FeedbackVectorSlot slot);
- bool CallIsMonomorphic(FeedbackVectorSlot slot);
- bool CallNewIsMonomorphic(FeedbackVectorSlot slot);
+ InlineCacheState LoadInlineCacheState(FeedbackSlot slot);
+ bool StoreIsUninitialized(FeedbackSlot slot);
+ bool CallIsUninitialized(FeedbackSlot slot);
+ bool CallIsMonomorphic(FeedbackSlot slot);
+ bool CallNewIsMonomorphic(FeedbackSlot slot);
// TODO(1571) We can't use ForInStatement::ForInType as the return value due
// to various cycles in our headers.
// TODO(rossberg): once all oracle access is removed from ast.cc, it should
// be possible.
- byte ForInType(FeedbackVectorSlot feedback_vector_slot);
+ byte ForInType(FeedbackSlot feedback_vector_slot);
- void GetStoreModeAndKeyType(FeedbackVectorSlot slot,
+ void GetStoreModeAndKeyType(FeedbackSlot slot,
KeyedAccessStoreMode* store_mode,
IcCheckType* key_type);
- void PropertyReceiverTypes(FeedbackVectorSlot slot, Handle<Name> name,
+ void PropertyReceiverTypes(FeedbackSlot slot, Handle<Name> name,
SmallMapList* receiver_types);
- void KeyedPropertyReceiverTypes(FeedbackVectorSlot slot,
+ void KeyedPropertyReceiverTypes(FeedbackSlot slot,
SmallMapList* receiver_types, bool* is_string,
IcCheckType* key_type);
- void AssignmentReceiverTypes(FeedbackVectorSlot slot, Handle<Name> name,
+ void AssignmentReceiverTypes(FeedbackSlot slot, Handle<Name> name,
SmallMapList* receiver_types);
- void KeyedAssignmentReceiverTypes(FeedbackVectorSlot slot,
+ void KeyedAssignmentReceiverTypes(FeedbackSlot slot,
SmallMapList* receiver_types,
KeyedAccessStoreMode* store_mode,
IcCheckType* key_type);
- void CountReceiverTypes(FeedbackVectorSlot slot,
- SmallMapList* receiver_types);
+ void CountReceiverTypes(FeedbackSlot slot, SmallMapList* receiver_types);
- void CollectReceiverTypes(FeedbackVectorSlot slot, SmallMapList* types);
+ void CollectReceiverTypes(FeedbackSlot slot, SmallMapList* types);
void CollectReceiverTypes(FeedbackNexus* nexus, SmallMapList* types);
static bool IsRelevantFeedback(Map* map, Context* native_context) {
@@ -66,10 +65,10 @@ class TypeFeedbackOracle: public ZoneObject {
native_context;
}
- Handle<JSFunction> GetCallTarget(FeedbackVectorSlot slot);
- Handle<AllocationSite> GetCallAllocationSite(FeedbackVectorSlot slot);
- Handle<JSFunction> GetCallNewTarget(FeedbackVectorSlot slot);
- Handle<AllocationSite> GetCallNewAllocationSite(FeedbackVectorSlot slot);
+ Handle<JSFunction> GetCallTarget(FeedbackSlot slot);
+ Handle<AllocationSite> GetCallAllocationSite(FeedbackSlot slot);
+ Handle<JSFunction> GetCallNewTarget(FeedbackSlot slot);
+ Handle<AllocationSite> GetCallNewAllocationSite(FeedbackSlot slot);
// TODO(1571) We can't use ToBooleanICStub::Types as the return value because
// of various cycles in our headers. Death to tons of implementations in
@@ -77,22 +76,22 @@ class TypeFeedbackOracle: public ZoneObject {
uint16_t ToBooleanTypes(TypeFeedbackId id);
// Get type information for arithmetic operations and compares.
- void BinaryType(TypeFeedbackId id, FeedbackVectorSlot slot, AstType** left,
+ void BinaryType(TypeFeedbackId id, FeedbackSlot slot, AstType** left,
AstType** right, AstType** result,
Maybe<int>* fixed_right_arg,
Handle<AllocationSite>* allocation_site,
Token::Value operation);
- void CompareType(TypeFeedbackId id, FeedbackVectorSlot slot, AstType** left,
+ void CompareType(TypeFeedbackId id, FeedbackSlot slot, AstType** left,
AstType** right, AstType** combined);
- AstType* CountType(TypeFeedbackId id, FeedbackVectorSlot slot);
+ AstType* CountType(TypeFeedbackId id, FeedbackSlot slot);
Zone* zone() const { return zone_; }
Isolate* isolate() const { return isolate_; }
private:
- void CollectReceiverTypes(StubCache* stub_cache, FeedbackVectorSlot slot,
+ void CollectReceiverTypes(StubCache* stub_cache, FeedbackSlot slot,
Handle<Name> name, SmallMapList* types);
void CollectReceiverTypes(StubCache* stub_cache, FeedbackNexus* nexus,
Handle<Name> name, SmallMapList* types);
@@ -117,7 +116,7 @@ class TypeFeedbackOracle: public ZoneObject {
// Returns an element from the type feedback vector. Returns undefined
// if there is no information.
- Handle<Object> GetInfo(FeedbackVectorSlot slot);
+ Handle<Object> GetInfo(FeedbackSlot slot);
private:
Handle<Context> native_context_;
diff --git a/deps/v8/src/utils.cc b/deps/v8/src/utils.cc
index ef640c3b0e..96a7d2c9ee 100644
--- a/deps/v8/src/utils.cc
+++ b/deps/v8/src/utils.cc
@@ -76,8 +76,7 @@ char* SimpleStringBuilder::Finalize() {
return buffer_.start();
}
-
-std::ostream& operator<<(std::ostream& os, FeedbackVectorSlot slot) {
+std::ostream& operator<<(std::ostream& os, FeedbackSlot slot) {
return os << "#" << slot.id_;
}
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index 0ea1de1e07..f6e50e5191 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -137,15 +137,20 @@ inline int MostSignificantBit(uint32_t x) {
return nibble + msb4[x];
}
-
-// The C++ standard leaves the semantics of '>>' undefined for
-// negative signed operands. Most implementations do the right thing,
-// though.
-inline int ArithmeticShiftRight(int x, int s) {
- return x >> s;
+template <typename T>
+static T ArithmeticShiftRight(T x, int shift) {
+ DCHECK_LE(0, shift);
+ if (x < 0) {
+ // Right shift of signed values is implementation defined. Simulate a
+ // true arithmetic right shift by adding leading sign bits.
+ using UnsignedT = typename std::make_unsigned<T>::type;
+ UnsignedT mask = ~(static_cast<UnsignedT>(~0) >> shift);
+ return (static_cast<UnsignedT>(x) >> shift) | mask;
+ } else {
+ return x >> shift;
+ }
}
-
template <typename T>
int Compare(const T& a, const T& b) {
if (a == b)
@@ -187,6 +192,11 @@ inline bool IsAddressAligned(Address addr,
return IsAligned(offs, alignment);
}
+template <typename T, typename U>
+inline T RoundUpToMultipleOfPowOf2(T value, U multiple) {
+ DCHECK(multiple && ((multiple & (multiple - 1)) == 0));
+ return (value + multiple - 1) & ~(multiple - 1);
+}
// Returns the maximum of the two parameters.
template <typename T>
@@ -503,13 +513,22 @@ V8_EXPORT_PRIVATE V8_INLINE void MemMove(void* dest, const void* src,
size_t size) {
memmove(dest, src, size);
}
-const int kMinComplexMemCopy = 16 * kPointerSize;
+const int kMinComplexMemCopy = 8;
#endif // V8_TARGET_ARCH_IA32
// ----------------------------------------------------------------------------
// Miscellaneous
+// Memory offset for lower and higher bits in a 64 bit integer.
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+static const int kInt64LowerHalfMemoryOffset = 0;
+static const int kInt64UpperHalfMemoryOffset = 4;
+#elif defined(V8_TARGET_BIG_ENDIAN)
+static const int kInt64LowerHalfMemoryOffset = 4;
+static const int kInt64UpperHalfMemoryOffset = 0;
+#endif // V8_TARGET_LITTLE_ENDIAN
+
// A static resource holds a static instance that can be reserved in
// a local scope using an instance of Access. Attempts to re-reserve
// the instance will cause an error.
@@ -880,24 +899,21 @@ inline bool operator>(TypeFeedbackId lhs, TypeFeedbackId rhs) {
return lhs.ToInt() > rhs.ToInt();
}
-
-class FeedbackVectorSlot {
+class FeedbackSlot {
public:
- FeedbackVectorSlot() : id_(kInvalidSlot) {}
- explicit FeedbackVectorSlot(int id) : id_(id) {}
+ FeedbackSlot() : id_(kInvalidSlot) {}
+ explicit FeedbackSlot(int id) : id_(id) {}
int ToInt() const { return id_; }
- static FeedbackVectorSlot Invalid() { return FeedbackVectorSlot(); }
+ static FeedbackSlot Invalid() { return FeedbackSlot(); }
bool IsInvalid() const { return id_ == kInvalidSlot; }
- bool operator==(FeedbackVectorSlot that) const {
- return this->id_ == that.id_;
- }
- bool operator!=(FeedbackVectorSlot that) const { return !(*this == that); }
+ bool operator==(FeedbackSlot that) const { return this->id_ == that.id_; }
+ bool operator!=(FeedbackSlot that) const { return !(*this == that); }
- friend size_t hash_value(FeedbackVectorSlot slot) { return slot.ToInt(); }
- friend std::ostream& operator<<(std::ostream& os, FeedbackVectorSlot);
+ friend size_t hash_value(FeedbackSlot slot) { return slot.ToInt(); }
+ friend std::ostream& operator<<(std::ostream& os, FeedbackSlot);
private:
static const int kInvalidSlot = -1;
@@ -919,6 +935,17 @@ class BailoutId {
static BailoutId FirstUsable() { return BailoutId(kFirstUsableId); }
static BailoutId StubEntry() { return BailoutId(kStubEntryId); }
+ // Special bailout id support for deopting into the {JSConstructStub} stub.
+ // The following hard-coded deoptimization points are supported by the stub:
+ // - {ConstructStubCreate} maps to {construct_stub_create_deopt_pc_offset}.
+ // - {ConstructStubInvoke} maps to {construct_stub_invoke_deopt_pc_offset}.
+ static BailoutId ConstructStubCreate() { return BailoutId(1); }
+ static BailoutId ConstructStubInvoke() { return BailoutId(2); }
+ bool IsValidForConstructStub() const {
+ return id_ == ConstructStubCreate().ToInt() ||
+ id_ == ConstructStubInvoke().ToInt();
+ }
+
bool IsNone() const { return id_ == kNoneId; }
bool operator==(const BailoutId& other) const { return id_ == other.id_; }
bool operator!=(const BailoutId& other) const { return id_ != other.id_; }
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index 7fd8dc0930..45e60ada28 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -4,6 +4,7 @@
#include "src/v8.h"
+#include "src/api.h"
#include "src/assembler.h"
#include "src/base/once.h"
#include "src/base/platform/platform.h"
@@ -15,7 +16,7 @@
#include "src/frames.h"
#include "src/isolate.h"
#include "src/libsampler/sampler.h"
-#include "src/objects.h"
+#include "src/objects-inl.h"
#include "src/profiler/heap-profiler.h"
#include "src/runtime-profiler.h"
#include "src/snapshot/natives.h"
diff --git a/deps/v8/src/v8.gyp b/deps/v8/src/v8.gyp
index f3fb248c29..144f482853 100644
--- a/deps/v8/src/v8.gyp
+++ b/deps/v8/src/v8.gyp
@@ -439,8 +439,6 @@
'ast/ast-expression-rewriter.h',
'ast/ast-function-literal-id-reindexer.cc',
'ast/ast-function-literal-id-reindexer.h',
- 'ast/ast-literal-reindexer.cc',
- 'ast/ast-literal-reindexer.h',
'ast/ast-numbering.cc',
'ast/ast-numbering.h',
'ast/ast-traversal-visitor.h',
@@ -478,8 +476,14 @@
'bootstrapper.cc',
'bootstrapper.h',
'builtins/builtins-api.cc',
+ 'builtins/builtins-arguments.cc',
+ 'builtins/builtins-arguments.h',
'builtins/builtins-arraybuffer.cc',
'builtins/builtins-array.cc',
+ 'builtins/builtins-async-iterator.cc',
+ 'builtins/builtins-async-function.cc',
+ 'builtins/builtins-async.cc',
+ 'builtins/builtins-async.h',
'builtins/builtins-boolean.cc',
'builtins/builtins-call.cc',
'builtins/builtins-callsite.cc',
@@ -501,16 +505,19 @@
'builtins/builtins-math.cc',
'builtins/builtins-number.cc',
'builtins/builtins-object.cc',
+ 'builtins/builtins-object.h',
'builtins/builtins-promise.cc',
'builtins/builtins-promise.h',
'builtins/builtins-proxy.cc',
'builtins/builtins-reflect.cc',
'builtins/builtins-regexp.cc',
+ 'builtins/builtins-regexp.h',
'builtins/builtins-sharedarraybuffer.cc',
'builtins/builtins-string.cc',
'builtins/builtins-symbol.cc',
'builtins/builtins-typedarray.cc',
'builtins/builtins-utils.h',
+ 'builtins/builtins-wasm.cc',
'builtins/builtins.cc',
'builtins/builtins.h',
'cached-powers.cc',
@@ -631,8 +638,6 @@
'compiler/js-frame-specialization.h',
'compiler/js-generic-lowering.cc',
'compiler/js-generic-lowering.h',
- 'compiler/js-global-object-specialization.cc',
- 'compiler/js-global-object-specialization.h',
'compiler/js-graph.cc',
'compiler/js-graph.h',
'compiler/js-inlining.cc',
@@ -645,6 +650,8 @@
'compiler/js-native-context-specialization.h',
'compiler/js-operator.cc',
'compiler/js-operator.h',
+ 'compiler/js-type-hint-lowering.cc',
+ 'compiler/js-type-hint-lowering.h',
'compiler/js-typed-lowering.cc',
'compiler/js-typed-lowering.h',
'compiler/jump-threading.cc',
@@ -835,6 +842,8 @@
'dateparser-inl.h',
'dateparser.cc',
'dateparser.h',
+ 'debug/debug-coverage.cc',
+ 'debug/debug-coverage.h',
'debug/debug-evaluate.cc',
'debug/debug-evaluate.h',
'debug/debug-interface.h',
@@ -891,10 +900,13 @@
'feedback-vector-inl.h',
'feedback-vector.cc',
'feedback-vector.h',
+ 'ffi/ffi-compiler.cc',
+ 'ffi/ffi-compiler.h',
'field-index.h',
'field-index-inl.h',
'field-type.cc',
'field-type.h',
+ 'find-and-replace-pattern.h',
'fixed-dtoa.cc',
'fixed-dtoa.h',
'flag-definitions.h',
@@ -967,7 +979,6 @@
'ic/access-compiler.cc',
'ic/access-compiler.h',
'ic/accessor-assembler.cc',
- 'ic/accessor-assembler-impl.h',
'ic/accessor-assembler.h',
'ic/call-optimization.cc',
'ic/call-optimization.h',
@@ -982,8 +993,6 @@
'ic/ic-stats.h',
'ic/ic.cc',
'ic/ic.h',
- 'ic/ic-compiler.cc',
- 'ic/ic-compiler.h',
'ic/keyed-store-generic.cc',
'ic/keyed-store-generic.h',
'identity-map.cc',
@@ -1046,6 +1055,7 @@
'json-stringifier.h',
'keys.h',
'keys.cc',
+ 'label.h',
'layout-descriptor-inl.h',
'layout-descriptor.cc',
'layout-descriptor.h',
@@ -1068,6 +1078,7 @@
'macro-assembler.h',
'machine-type.cc',
'machine-type.h',
+ 'managed.h',
'messages.cc',
'messages.h',
'msan.h',
@@ -1078,9 +1089,12 @@
'objects-printer.cc',
'objects.cc',
'objects.h',
+ 'objects/literal-objects.cc',
+ 'objects/literal-objects.h',
'objects/module-info.h',
'objects/object-macros.h',
'objects/object-macros-undef.h',
+ 'objects/regexp-match-info.h',
'objects/scope-info.cc',
'objects/scope-info.h',
'ostreams.cc',
@@ -1103,6 +1117,8 @@
'parsing/preparse-data-format.h',
'parsing/preparse-data.cc',
'parsing/preparse-data.h',
+ 'parsing/preparsed-scope-data.cc',
+ 'parsing/preparsed-scope-data.h',
'parsing/preparser.cc',
'parsing/preparser.h',
'parsing/rewriter.cc',
@@ -1201,7 +1217,6 @@
'runtime/runtime-proxy.cc',
'runtime/runtime-regexp.cc',
'runtime/runtime-scopes.cc',
- 'runtime/runtime-simd.cc',
'runtime/runtime-strings.cc',
'runtime/runtime-symbol.cc',
'runtime/runtime-test.cc',
@@ -1293,12 +1308,14 @@
'wasm/decoder.h',
'wasm/function-body-decoder.cc',
'wasm/function-body-decoder.h',
+ 'wasm/function-body-decoder-impl.h',
'wasm/leb-helper.h',
- 'wasm/managed.h',
'wasm/module-decoder.cc',
'wasm/module-decoder.h',
'wasm/signature-map.cc',
'wasm/signature-map.h',
+ 'wasm/wasm-code-specialization.h',
+ 'wasm/wasm-code-specialization.cc',
'wasm/wasm-debug.cc',
'wasm/wasm-external-refs.cc',
'wasm/wasm-external-refs.h',
@@ -1741,7 +1758,7 @@
# When building Official, the .lib is too large and exceeds the 2G
# limit. This breaks it into multiple pieces to avoid the limit.
# See http://crbug.com/485155.
- 'msvs_shard': 10,
+ 'msvs_shard': 4,
}],
['component=="shared_library"', {
'defines': [
@@ -2259,7 +2276,6 @@
'js/templates.js',
'js/spread.js',
'js/proxy.js',
- 'js/async-await.js',
'js/harmony-string-padding.js',
'debug/mirrors.js',
'debug/debug.js',
@@ -2269,7 +2285,6 @@
'js/macros.py',
'messages.h',
'js/harmony-atomics.js',
- 'js/harmony-simd.js',
],
'libraries_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries.bin',
'libraries_experimental_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental.bin',
@@ -2278,10 +2293,6 @@
'conditions': [
['v8_enable_i18n_support==1', {
'library_files': ['js/i18n.js'],
- 'experimental_library_files': [
- 'js/datetime-format-to-parts.js',
- 'js/icu-case-mapping.js',
- ],
}],
],
},
diff --git a/deps/v8/src/value-serializer.cc b/deps/v8/src/value-serializer.cc
index 585ef0f59c..7b17275b01 100644
--- a/deps/v8/src/value-serializer.cc
+++ b/deps/v8/src/value-serializer.cc
@@ -157,8 +157,9 @@ ValueSerializer::ValueSerializer(Isolate* isolate,
: isolate_(isolate),
delegate_(delegate),
zone_(isolate->allocator(), ZONE_NAME),
- id_map_(isolate->heap(), &zone_),
- array_buffer_transfer_map_(isolate->heap(), &zone_) {}
+ id_map_(isolate->heap(), ZoneAllocationPolicy(&zone_)),
+ array_buffer_transfer_map_(isolate->heap(),
+ ZoneAllocationPolicy(&zone_)) {}
ValueSerializer::~ValueSerializer() {
if (buffer_) {
@@ -1177,8 +1178,9 @@ MaybeHandle<String> ValueDeserializer::ReadUtf8String() {
if (!ReadVarint<uint32_t>().To(&utf8_length) ||
utf8_length >
static_cast<uint32_t>(std::numeric_limits<int32_t>::max()) ||
- !ReadRawBytes(utf8_length).To(&utf8_bytes))
+ !ReadRawBytes(utf8_length).To(&utf8_bytes)) {
return MaybeHandle<String>();
+ }
return isolate_->factory()->NewStringFromUtf8(
Vector<const char>::cast(utf8_bytes), pretenure_);
}
@@ -1201,16 +1203,20 @@ MaybeHandle<String> ValueDeserializer::ReadTwoByteString() {
if (!ReadVarint<uint32_t>().To(&byte_length) ||
byte_length >
static_cast<uint32_t>(std::numeric_limits<int32_t>::max()) ||
- byte_length % sizeof(uc16) != 0 || !ReadRawBytes(byte_length).To(&bytes))
+ byte_length % sizeof(uc16) != 0 ||
+ !ReadRawBytes(byte_length).To(&bytes)) {
return MaybeHandle<String>();
+ }
// Allocate an uninitialized string so that we can do a raw memcpy into the
// string on the heap (regardless of alignment).
+ if (byte_length == 0) return isolate_->factory()->empty_string();
Handle<SeqTwoByteString> string;
if (!isolate_->factory()
->NewRawTwoByteString(byte_length / sizeof(uc16), pretenure_)
- .ToHandle(&string))
+ .ToHandle(&string)) {
return MaybeHandle<String>();
+ }
// Copy the bytes directly into the new string.
// Warning: this uses host endianness.
@@ -1630,10 +1636,8 @@ MaybeHandle<JSObject> ValueDeserializer::ReadWasmModule() {
MaybeHandle<JSObject> result;
{
wasm::ErrorThrower thrower(isolate_, "ValueDeserializer::ReadWasmModule");
- result = wasm::CreateModuleObjectFromBytes(
- isolate_, wire_bytes.begin(), wire_bytes.end(), &thrower,
- wasm::ModuleOrigin::kWasmOrigin, Handle<Script>::null(),
- Vector<const byte>::empty());
+ result = wasm::SyncCompile(isolate_, &thrower,
+ wasm::ModuleWireBytes(wire_bytes));
}
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate_, JSObject);
return result;
@@ -1742,8 +1746,8 @@ Maybe<uint32_t> ValueDeserializer::ReadJSObjectProperties(
->NowContains(value)) {
Handle<FieldType> value_type =
value->OptimalType(isolate_, expected_representation);
- Map::GeneralizeField(target, descriptor, expected_representation,
- value_type);
+ Map::GeneralizeField(target, descriptor, details.constness(),
+ expected_representation, value_type);
}
DCHECK(target->instance_descriptors()
->GetFieldType(descriptor)
diff --git a/deps/v8/src/value-serializer.h b/deps/v8/src/value-serializer.h
index 4f3fb2cf00..7961b2ea0b 100644
--- a/deps/v8/src/value-serializer.h
+++ b/deps/v8/src/value-serializer.h
@@ -157,11 +157,11 @@ class ValueSerializer {
// To avoid extra lookups in the identity map, ID+1 is actually stored in the
// map (checking if the used identity is zero is the fast way of checking if
// the entry is new).
- IdentityMap<uint32_t> id_map_;
+ IdentityMap<uint32_t, ZoneAllocationPolicy> id_map_;
uint32_t next_id_ = 0;
// A similar map, for transferred array buffers.
- IdentityMap<uint32_t> array_buffer_transfer_map_;
+ IdentityMap<uint32_t, ZoneAllocationPolicy> array_buffer_transfer_map_;
DISALLOW_COPY_AND_ASSIGN(ValueSerializer);
};
diff --git a/deps/v8/src/wasm/decoder.h b/deps/v8/src/wasm/decoder.h
index afe8701779..bfd14366e1 100644
--- a/deps/v8/src/wasm/decoder.h
+++ b/deps/v8/src/wasm/decoder.h
@@ -382,8 +382,10 @@ class Decoder {
int length = static_cast<int>(pc_ - pos);
if (pc_ == end && (b & 0x80)) {
+ TRACE("\n");
error(pc_ - 1, "varint too large");
} else if (length == 0) {
+ TRACE("\n");
error(pc_, "varint of length 0");
} else if (is_signed) {
if (length < kMaxLength) {
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
new file mode 100644
index 0000000000..6759ed6f2a
--- /dev/null
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -0,0 +1,325 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_FUNCTION_BODY_DECODER_IMPL_H_
+#define V8_WASM_FUNCTION_BODY_DECODER_IMPL_H_
+
+#include "src/wasm/decoder.h"
+#include "src/wasm/wasm-opcodes.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+struct WasmGlobal;
+
+// Helpers for decoding different kinds of operands which follow bytecodes.
+struct LocalIndexOperand {
+ uint32_t index;
+ ValueType type;
+ unsigned length;
+
+ inline LocalIndexOperand(Decoder* decoder, const byte* pc) {
+ index = decoder->checked_read_u32v(pc, 1, &length, "local index");
+ type = kWasmStmt;
+ }
+};
+
+struct ImmI32Operand {
+ int32_t value;
+ unsigned length;
+ inline ImmI32Operand(Decoder* decoder, const byte* pc) {
+ value = decoder->checked_read_i32v(pc, 1, &length, "immi32");
+ }
+};
+
+struct ImmI64Operand {
+ int64_t value;
+ unsigned length;
+ inline ImmI64Operand(Decoder* decoder, const byte* pc) {
+ value = decoder->checked_read_i64v(pc, 1, &length, "immi64");
+ }
+};
+
+struct ImmF32Operand {
+ float value;
+ unsigned length;
+ inline ImmF32Operand(Decoder* decoder, const byte* pc) {
+ // Avoid bit_cast because it might not preserve the signalling bit of a NaN.
+ uint32_t tmp = decoder->checked_read_u32(pc, 1, "immf32");
+ memcpy(&value, &tmp, sizeof(value));
+ length = 4;
+ }
+};
+
+struct ImmF64Operand {
+ double value;
+ unsigned length;
+ inline ImmF64Operand(Decoder* decoder, const byte* pc) {
+ // Avoid bit_cast because it might not preserve the signalling bit of a NaN.
+ uint64_t tmp = decoder->checked_read_u64(pc, 1, "immf64");
+ memcpy(&value, &tmp, sizeof(value));
+ length = 8;
+ }
+};
+
+struct GlobalIndexOperand {
+ uint32_t index;
+ ValueType type;
+ const WasmGlobal* global;
+ unsigned length;
+
+ inline GlobalIndexOperand(Decoder* decoder, const byte* pc) {
+ index = decoder->checked_read_u32v(pc, 1, &length, "global index");
+ global = nullptr;
+ type = kWasmStmt;
+ }
+};
+
+struct BlockTypeOperand {
+ uint32_t arity;
+ const byte* types; // pointer to encoded types for the block.
+ unsigned length;
+
+ inline BlockTypeOperand(Decoder* decoder, const byte* pc) {
+ uint8_t val = decoder->checked_read_u8(pc, 1, "block type");
+ ValueType type = kWasmStmt;
+ length = 1;
+ arity = 0;
+ types = nullptr;
+ if (decode_local_type(val, &type)) {
+ arity = type == kWasmStmt ? 0 : 1;
+ types = pc + 1;
+ } else {
+ // Handle multi-value blocks.
+ if (!FLAG_wasm_mv_prototype) {
+ decoder->error(pc, pc + 1, "invalid block arity > 1");
+ return;
+ }
+ if (val != kMultivalBlock) {
+ decoder->error(pc, pc + 1, "invalid block type");
+ return;
+ }
+ // Decode and check the types vector of the block.
+ unsigned len = 0;
+ uint32_t count = decoder->checked_read_u32v(pc, 2, &len, "block arity");
+ // {count} is encoded as {arity-2}, so that a {0} count here corresponds
+ // to a block with 2 values. This makes invalid/redundant encodings
+ // impossible.
+ arity = count + 2;
+ length = 1 + len + arity;
+ types = pc + 1 + 1 + len;
+
+ for (uint32_t i = 0; i < arity; i++) {
+ uint32_t offset = 1 + 1 + len + i;
+ val = decoder->checked_read_u8(pc, offset, "block type");
+ decode_local_type(val, &type);
+ if (type == kWasmStmt) {
+ decoder->error(pc, pc + offset, "invalid block type");
+ return;
+ }
+ }
+ }
+ }
+ // Decode a byte representing a local type. Return {false} if the encoded
+ // byte was invalid or {kMultivalBlock}.
+ bool decode_local_type(uint8_t val, ValueType* result) {
+ switch (static_cast<ValueTypeCode>(val)) {
+ case kLocalVoid:
+ *result = kWasmStmt;
+ return true;
+ case kLocalI32:
+ *result = kWasmI32;
+ return true;
+ case kLocalI64:
+ *result = kWasmI64;
+ return true;
+ case kLocalF32:
+ *result = kWasmF32;
+ return true;
+ case kLocalF64:
+ *result = kWasmF64;
+ return true;
+ case kLocalS128:
+ *result = kWasmS128;
+ return true;
+ case kLocalS1x4:
+ *result = kWasmS1x4;
+ return true;
+ case kLocalS1x8:
+ *result = kWasmS1x8;
+ return true;
+ case kLocalS1x16:
+ *result = kWasmS1x16;
+ return true;
+ default:
+ *result = kWasmStmt;
+ return false;
+ }
+ }
+ ValueType read_entry(unsigned index) {
+ DCHECK_LT(index, arity);
+ ValueType result;
+ CHECK(decode_local_type(types[index], &result));
+ return result;
+ }
+};
+
+struct Control;
+struct BreakDepthOperand {
+ uint32_t depth;
+ Control* target;
+ unsigned length;
+ inline BreakDepthOperand(Decoder* decoder, const byte* pc) {
+ depth = decoder->checked_read_u32v(pc, 1, &length, "break depth");
+ target = nullptr;
+ }
+};
+
+struct CallIndirectOperand {
+ uint32_t table_index;
+ uint32_t index;
+ FunctionSig* sig;
+ unsigned length;
+ inline CallIndirectOperand(Decoder* decoder, const byte* pc) {
+ unsigned len = 0;
+ index = decoder->checked_read_u32v(pc, 1, &len, "signature index");
+ table_index = decoder->checked_read_u8(pc, 1 + len, "table index");
+ if (table_index != 0) {
+ decoder->error(pc, pc + 1 + len, "expected table index 0, found %u",
+ table_index);
+ }
+ length = 1 + len;
+ sig = nullptr;
+ }
+};
+
+struct CallFunctionOperand {
+ uint32_t index;
+ FunctionSig* sig;
+ unsigned length;
+ inline CallFunctionOperand(Decoder* decoder, const byte* pc) {
+ unsigned len1 = 0;
+ unsigned len2 = 0;
+ index = decoder->checked_read_u32v(pc, 1 + len1, &len2, "function index");
+ length = len1 + len2;
+ sig = nullptr;
+ }
+};
+
+struct MemoryIndexOperand {
+ uint32_t index;
+ unsigned length;
+ inline MemoryIndexOperand(Decoder* decoder, const byte* pc) {
+ index = decoder->checked_read_u8(pc, 1, "memory index");
+ if (index != 0) {
+ decoder->error(pc, pc + 1, "expected memory index 0, found %u", index);
+ }
+ length = 1;
+ }
+};
+
+struct BranchTableOperand {
+ uint32_t table_count;
+ const byte* start;
+ const byte* table;
+ inline BranchTableOperand(Decoder* decoder, const byte* pc) {
+ DCHECK_EQ(kExprBrTable, decoder->checked_read_u8(pc, 0, "opcode"));
+ start = pc + 1;
+ unsigned len1 = 0;
+ table_count = decoder->checked_read_u32v(pc, 1, &len1, "table count");
+ if (table_count > (UINT_MAX / sizeof(uint32_t)) - 1 ||
+ len1 > UINT_MAX - (table_count + 1) * sizeof(uint32_t)) {
+ decoder->error(pc, "branch table size overflow");
+ }
+ table = pc + 1 + len1;
+ }
+};
+
+// A helper to iterate over a branch table.
+class BranchTableIterator {
+ public:
+ unsigned cur_index() { return index_; }
+ bool has_next() { return decoder_->ok() && index_ <= table_count_; }
+ uint32_t next() {
+ DCHECK(has_next());
+ index_++;
+ unsigned length = 0;
+ uint32_t result =
+ decoder_->checked_read_u32v(pc_, 0, &length, "branch table entry");
+ pc_ += length;
+ return result;
+ }
+ // length, including the length of the {BranchTableOperand}, but not the
+ // opcode.
+ unsigned length() {
+ while (has_next()) next();
+ return static_cast<unsigned>(pc_ - start_);
+ }
+ const byte* pc() { return pc_; }
+
+ BranchTableIterator(Decoder* decoder, BranchTableOperand& operand)
+ : decoder_(decoder),
+ start_(operand.start),
+ pc_(operand.table),
+ index_(0),
+ table_count_(operand.table_count) {}
+
+ private:
+ Decoder* decoder_;
+ const byte* start_;
+ const byte* pc_;
+ uint32_t index_; // the current index.
+ uint32_t table_count_; // the count of entries, not including default.
+};
+
+struct MemoryAccessOperand {
+ uint32_t alignment;
+ uint32_t offset;
+ unsigned length;
+ inline MemoryAccessOperand(Decoder* decoder, const byte* pc,
+ uint32_t max_alignment) {
+ unsigned alignment_length;
+ alignment =
+ decoder->checked_read_u32v(pc, 1, &alignment_length, "alignment");
+ if (max_alignment < alignment) {
+ decoder->error(pc, pc + 1,
+ "invalid alignment; expected maximum alignment is %u, "
+ "actual alignment is %u",
+ max_alignment, alignment);
+ }
+ unsigned offset_length;
+ offset = decoder->checked_read_u32v(pc, 1 + alignment_length,
+ &offset_length, "offset");
+ length = alignment_length + offset_length;
+ }
+};
+
+// Operand for SIMD lane operations.
+struct SimdLaneOperand {
+ uint8_t lane;
+ unsigned length;
+
+ inline SimdLaneOperand(Decoder* decoder, const byte* pc) {
+ lane = decoder->checked_read_u8(pc, 2, "lane");
+ length = 1;
+ }
+};
+
+// Operand for SIMD shift operations.
+struct SimdShiftOperand {
+ uint8_t shift;
+ unsigned length;
+
+ inline SimdShiftOperand(Decoder* decoder, const byte* pc) {
+ shift = decoder->checked_read_u8(pc, 2, "shift");
+ length = 1;
+ }
+};
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_FUNCTION_BODY_DECODER_IMPL_H_
diff --git a/deps/v8/src/wasm/function-body-decoder.cc b/deps/v8/src/wasm/function-body-decoder.cc
index 04a2806237..dc2f83b459 100644
--- a/deps/v8/src/wasm/function-body-decoder.cc
+++ b/deps/v8/src/wasm/function-body-decoder.cc
@@ -4,13 +4,17 @@
#include "src/signature.h"
+#include "src/base/platform/elapsed-timer.h"
#include "src/bit-vector.h"
#include "src/flags.h"
#include "src/handles.h"
+#include "src/objects-inl.h"
#include "src/zone/zone-containers.h"
#include "src/wasm/decoder.h"
+#include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/function-body-decoder.h"
+#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
@@ -149,16 +153,6 @@ struct Control {
(build() ? CheckForException(builder_->func(__VA_ARGS__)) : nullptr)
#define BUILD0(func) (build() ? CheckForException(builder_->func()) : nullptr)
-struct LaneOperand {
- uint8_t lane;
- unsigned length;
-
- inline LaneOperand(Decoder* decoder, const byte* pc) {
- lane = decoder->checked_read_u8(pc, 2, "lane");
- length = 1;
- }
-};
-
// Generic Wasm bytecode decoder with utilities for decoding operands,
// lengths, etc.
class WasmDecoder : public Decoder {
@@ -197,7 +191,7 @@ class WasmDecoder : public Decoder {
uint32_t count = decoder->consume_u32v("local count");
if (decoder->failed()) return false;
- if ((count + type_list->size()) > kMaxNumWasmLocals) {
+ if ((count + type_list->size()) > kV8MaxWasmFunctionLocals) {
decoder->error(decoder->pc() - 1, "local count too large");
return false;
}
@@ -221,6 +215,15 @@ class WasmDecoder : public Decoder {
case kLocalS128:
type = kWasmS128;
break;
+ case kLocalS1x4:
+ type = kWasmS1x4;
+ break;
+ case kLocalS1x8:
+ type = kWasmS1x8;
+ break;
+ case kLocalS1x16:
+ type = kWasmS1x16;
+ break;
default:
decoder->error(decoder->pc() - 1, "invalid local type");
return false;
@@ -349,9 +352,61 @@ class WasmDecoder : public Decoder {
return true;
}
- inline bool Validate(const byte* pc, LaneOperand& operand) {
- if (operand.lane < 0 || operand.lane > 3) {
- error(pc_, pc_ + 2, "invalid extract lane value");
+ inline bool Validate(const byte* pc, WasmOpcode opcode,
+ SimdLaneOperand& operand) {
+ uint8_t num_lanes = 0;
+ switch (opcode) {
+ case kExprF32x4ExtractLane:
+ case kExprF32x4ReplaceLane:
+ case kExprI32x4ExtractLane:
+ case kExprI32x4ReplaceLane:
+ num_lanes = 4;
+ break;
+ case kExprI16x8ExtractLane:
+ case kExprI16x8ReplaceLane:
+ num_lanes = 8;
+ break;
+ case kExprI8x16ExtractLane:
+ case kExprI8x16ReplaceLane:
+ num_lanes = 16;
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ if (operand.lane < 0 || operand.lane >= num_lanes) {
+ error(pc_, pc_ + 2, "invalid lane index");
+ return false;
+ } else {
+ return true;
+ }
+ }
+
+ inline bool Validate(const byte* pc, WasmOpcode opcode,
+ SimdShiftOperand& operand) {
+ uint8_t max_shift = 0;
+ switch (opcode) {
+ case kExprI32x4Shl:
+ case kExprI32x4ShrS:
+ case kExprI32x4ShrU:
+ max_shift = 32;
+ break;
+ case kExprI16x8Shl:
+ case kExprI16x8ShrS:
+ case kExprI16x8ShrU:
+ max_shift = 16;
+ break;
+ case kExprI8x16Shl:
+ case kExprI8x16ShrS:
+ case kExprI8x16ShrU:
+ max_shift = 8;
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ if (operand.shift < 0 || operand.shift >= max_shift) {
+ error(pc_, pc_ + 2, "invalid shift amount");
return false;
} else {
return true;
@@ -658,10 +713,12 @@ class WasmFullDecoder : public WasmDecoder {
while (pc_ < end_) { // decoding loop.
unsigned len = 1;
WasmOpcode opcode = static_cast<WasmOpcode>(*pc_);
- if (!WasmOpcodes::IsPrefixOpcode(opcode)) {
- TRACE(" @%-8d #%02x:%-20s|", startrel(pc_), opcode,
- WasmOpcodes::ShortOpcodeName(opcode));
+#if DEBUG
+ if (FLAG_trace_wasm_decoder && !WasmOpcodes::IsPrefixOpcode(opcode)) {
+ TRACE(" @%-8d #%-20s|", startrel(pc_),
+ WasmOpcodes::OpcodeName(opcode));
}
+#endif
FunctionSig* sig = WasmOpcodes::Signature(opcode);
if (sig) {
@@ -847,7 +904,7 @@ class WasmFullDecoder : public WasmDecoder {
last_end_found_ = true;
if (ssa_env_->go()) {
// The result of the block is the return value.
- TRACE(" @%-8d #xx:%-20s|", startrel(pc_), "ImplicitReturn");
+ TRACE(" @%-8d #xx:%-20s|", startrel(pc_), "(implicit) return");
DoReturn();
TRACE("\n");
} else {
@@ -932,8 +989,7 @@ class WasmFullDecoder : public WasmDecoder {
if (i == 0) {
merge = &c->merge;
} else if (merge->arity != c->merge.arity) {
- error(pos, pos,
- "inconsistent arity in br_table target %d"
+ error(pos, pos, "inconsistent arity in br_table target %d"
" (previous was %u, this one %u)",
i, merge->arity, c->merge.arity);
} else if (control_.back().unreachable) {
@@ -941,8 +997,8 @@ class WasmFullDecoder : public WasmDecoder {
if ((*merge)[j].type != c->merge[j].type) {
error(pos, pos,
"type error in br_table target %d operand %d"
- " (previous expected %s, this one %s)",
- i, j, WasmOpcodes::TypeName((*merge)[j].type),
+ " (previous expected %s, this one %s)", i, j,
+ WasmOpcodes::TypeName((*merge)[j].type),
WasmOpcodes::TypeName(c->merge[j].type));
}
}
@@ -1174,8 +1230,8 @@ class WasmFullDecoder : public WasmDecoder {
len++;
byte simd_index = checked_read_u8(pc_, 1, "simd index");
opcode = static_cast<WasmOpcode>(opcode << 8 | simd_index);
- TRACE(" @%-4d #%02x #%02x:%-20s|", startrel(pc_), kSimdPrefix,
- simd_index, WasmOpcodes::ShortOpcodeName(opcode));
+ TRACE(" @%-4d #%-20s|", startrel(pc_),
+ WasmOpcodes::OpcodeName(opcode));
len += DecodeSimdOpcode(opcode);
break;
}
@@ -1251,7 +1307,7 @@ class WasmFullDecoder : public WasmDecoder {
}
PrintF(" %c@%d:%s", WasmOpcodes::ShortNameOf(val.type),
static_cast<int>(val.pc - start_),
- WasmOpcodes::ShortOpcodeName(opcode));
+ WasmOpcodes::OpcodeName(opcode));
switch (opcode) {
case kExprI32Const: {
ImmI32Operand operand(this, val.pc);
@@ -1370,9 +1426,9 @@ class WasmFullDecoder : public WasmDecoder {
return 1 + operand.length;
}
- unsigned ExtractLane(WasmOpcode opcode, ValueType type) {
- LaneOperand operand(this, pc_);
- if (Validate(pc_, operand)) {
+ unsigned SimdExtractLane(WasmOpcode opcode, ValueType type) {
+ SimdLaneOperand operand(this, pc_);
+ if (Validate(pc_, opcode, operand)) {
compiler::NodeVector inputs(1, zone_);
inputs[0] = Pop(0, ValueType::kSimd128).node;
TFNode* node = BUILD(SimdLaneOp, opcode, operand.lane, inputs);
@@ -1381,9 +1437,9 @@ class WasmFullDecoder : public WasmDecoder {
return operand.length;
}
- unsigned ReplaceLane(WasmOpcode opcode, ValueType type) {
- LaneOperand operand(this, pc_);
- if (Validate(pc_, operand)) {
+ unsigned SimdReplaceLane(WasmOpcode opcode, ValueType type) {
+ SimdLaneOperand operand(this, pc_);
+ if (Validate(pc_, opcode, operand)) {
compiler::NodeVector inputs(2, zone_);
inputs[1] = Pop(1, type).node;
inputs[0] = Pop(0, ValueType::kSimd128).node;
@@ -1393,23 +1449,50 @@ class WasmFullDecoder : public WasmDecoder {
return operand.length;
}
+ unsigned SimdShiftOp(WasmOpcode opcode) {
+ SimdShiftOperand operand(this, pc_);
+ if (Validate(pc_, opcode, operand)) {
+ compiler::NodeVector inputs(1, zone_);
+ inputs[0] = Pop(0, ValueType::kSimd128).node;
+ TFNode* node = BUILD(SimdShiftOp, opcode, operand.shift, inputs);
+ Push(ValueType::kSimd128, node);
+ }
+ return operand.length;
+ }
+
unsigned DecodeSimdOpcode(WasmOpcode opcode) {
unsigned len = 0;
switch (opcode) {
- case kExprI32x4ExtractLane: {
- len = ExtractLane(opcode, ValueType::kWord32);
- break;
- }
case kExprF32x4ExtractLane: {
- len = ExtractLane(opcode, ValueType::kFloat32);
+ len = SimdExtractLane(opcode, ValueType::kFloat32);
break;
}
- case kExprI32x4ReplaceLane: {
- len = ReplaceLane(opcode, ValueType::kWord32);
+ case kExprI32x4ExtractLane:
+ case kExprI16x8ExtractLane:
+ case kExprI8x16ExtractLane: {
+ len = SimdExtractLane(opcode, ValueType::kWord32);
break;
}
case kExprF32x4ReplaceLane: {
- len = ReplaceLane(opcode, ValueType::kFloat32);
+ len = SimdReplaceLane(opcode, ValueType::kFloat32);
+ break;
+ }
+ case kExprI32x4ReplaceLane:
+ case kExprI16x8ReplaceLane:
+ case kExprI8x16ReplaceLane: {
+ len = SimdReplaceLane(opcode, ValueType::kWord32);
+ break;
+ }
+ case kExprI32x4Shl:
+ case kExprI32x4ShrS:
+ case kExprI32x4ShrU:
+ case kExprI16x8Shl:
+ case kExprI16x8ShrS:
+ case kExprI16x8ShrU:
+ case kExprI8x16Shl:
+ case kExprI8x16ShrS:
+ case kExprI8x16ShrU: {
+ len = SimdShiftOp(opcode);
break;
}
default: {
@@ -1475,7 +1558,7 @@ class WasmFullDecoder : public WasmDecoder {
const char* SafeOpcodeNameAt(const byte* pc) {
if (pc >= end_) return "<end>";
- return WasmOpcodes::ShortOpcodeName(static_cast<WasmOpcode>(*pc));
+ return WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(*pc));
}
Value Pop(int index, ValueType expected) {
diff --git a/deps/v8/src/wasm/function-body-decoder.h b/deps/v8/src/wasm/function-body-decoder.h
index 1115b1a450..6e6b824727 100644
--- a/deps/v8/src/wasm/function-body-decoder.h
+++ b/deps/v8/src/wasm/function-body-decoder.h
@@ -26,295 +26,10 @@ class WasmGraphBuilder;
namespace wasm {
-const uint32_t kMaxNumWasmLocals = 8000000;
-struct WasmGlobal;
-
-// Helpers for decoding different kinds of operands which follow bytecodes.
-struct LocalIndexOperand {
- uint32_t index;
- ValueType type;
- unsigned length;
-
- inline LocalIndexOperand(Decoder* decoder, const byte* pc) {
- index = decoder->checked_read_u32v(pc, 1, &length, "local index");
- type = kWasmStmt;
- }
-};
-
-struct ImmI8Operand {
- int8_t value;
- unsigned length;
- inline ImmI8Operand(Decoder* decoder, const byte* pc) {
- value = bit_cast<int8_t>(decoder->checked_read_u8(pc, 1, "immi8"));
- length = 1;
- }
-};
-
-struct ImmI32Operand {
- int32_t value;
- unsigned length;
- inline ImmI32Operand(Decoder* decoder, const byte* pc) {
- value = decoder->checked_read_i32v(pc, 1, &length, "immi32");
- }
-};
-
-struct ImmI64Operand {
- int64_t value;
- unsigned length;
- inline ImmI64Operand(Decoder* decoder, const byte* pc) {
- value = decoder->checked_read_i64v(pc, 1, &length, "immi64");
- }
-};
-
-struct ImmF32Operand {
- float value;
- unsigned length;
- inline ImmF32Operand(Decoder* decoder, const byte* pc) {
- // Avoid bit_cast because it might not preserve the signalling bit of a NaN.
- uint32_t tmp = decoder->checked_read_u32(pc, 1, "immf32");
- memcpy(&value, &tmp, sizeof(value));
- length = 4;
- }
-};
-
-struct ImmF64Operand {
- double value;
- unsigned length;
- inline ImmF64Operand(Decoder* decoder, const byte* pc) {
- // Avoid bit_cast because it might not preserve the signalling bit of a NaN.
- uint64_t tmp = decoder->checked_read_u64(pc, 1, "immf64");
- memcpy(&value, &tmp, sizeof(value));
- length = 8;
- }
-};
-
-struct GlobalIndexOperand {
- uint32_t index;
- ValueType type;
- const WasmGlobal* global;
- unsigned length;
-
- inline GlobalIndexOperand(Decoder* decoder, const byte* pc) {
- index = decoder->checked_read_u32v(pc, 1, &length, "global index");
- global = nullptr;
- type = kWasmStmt;
- }
-};
-
-struct BlockTypeOperand {
- uint32_t arity;
- const byte* types; // pointer to encoded types for the block.
- unsigned length;
-
- inline BlockTypeOperand(Decoder* decoder, const byte* pc) {
- uint8_t val = decoder->checked_read_u8(pc, 1, "block type");
- ValueType type = kWasmStmt;
- length = 1;
- arity = 0;
- types = nullptr;
- if (decode_local_type(val, &type)) {
- arity = type == kWasmStmt ? 0 : 1;
- types = pc + 1;
- } else {
- // Handle multi-value blocks.
- if (!FLAG_wasm_mv_prototype) {
- decoder->error(pc, pc + 1, "invalid block arity > 1");
- return;
- }
- if (val != kMultivalBlock) {
- decoder->error(pc, pc + 1, "invalid block type");
- return;
- }
- // Decode and check the types vector of the block.
- unsigned len = 0;
- uint32_t count = decoder->checked_read_u32v(pc, 2, &len, "block arity");
- // {count} is encoded as {arity-2}, so that a {0} count here corresponds
- // to a block with 2 values. This makes invalid/redundant encodings
- // impossible.
- arity = count + 2;
- length = 1 + len + arity;
- types = pc + 1 + 1 + len;
-
- for (uint32_t i = 0; i < arity; i++) {
- uint32_t offset = 1 + 1 + len + i;
- val = decoder->checked_read_u8(pc, offset, "block type");
- decode_local_type(val, &type);
- if (type == kWasmStmt) {
- decoder->error(pc, pc + offset, "invalid block type");
- return;
- }
- }
- }
- }
- // Decode a byte representing a local type. Return {false} if the encoded
- // byte was invalid or {kMultivalBlock}.
- bool decode_local_type(uint8_t val, ValueType* result) {
- switch (static_cast<ValueTypeCode>(val)) {
- case kLocalVoid:
- *result = kWasmStmt;
- return true;
- case kLocalI32:
- *result = kWasmI32;
- return true;
- case kLocalI64:
- *result = kWasmI64;
- return true;
- case kLocalF32:
- *result = kWasmF32;
- return true;
- case kLocalF64:
- *result = kWasmF64;
- return true;
- case kLocalS128:
- *result = kWasmS128;
- return true;
- default:
- *result = kWasmStmt;
- return false;
- }
- }
- ValueType read_entry(unsigned index) {
- DCHECK_LT(index, arity);
- ValueType result;
- CHECK(decode_local_type(types[index], &result));
- return result;
- }
-};
-
-struct Control;
-struct BreakDepthOperand {
- uint32_t depth;
- Control* target;
- unsigned length;
- inline BreakDepthOperand(Decoder* decoder, const byte* pc) {
- depth = decoder->checked_read_u32v(pc, 1, &length, "break depth");
- target = nullptr;
- }
-};
-
-struct CallIndirectOperand {
- uint32_t table_index;
- uint32_t index;
- FunctionSig* sig;
- unsigned length;
- inline CallIndirectOperand(Decoder* decoder, const byte* pc) {
- unsigned len = 0;
- index = decoder->checked_read_u32v(pc, 1, &len, "signature index");
- table_index = decoder->checked_read_u8(pc, 1 + len, "table index");
- if (table_index != 0) {
- decoder->error(pc, pc + 1 + len, "expected table index 0, found %u",
- table_index);
- }
- length = 1 + len;
- sig = nullptr;
- }
-};
-
-struct CallFunctionOperand {
- uint32_t index;
- FunctionSig* sig;
- unsigned length;
- inline CallFunctionOperand(Decoder* decoder, const byte* pc) {
- unsigned len1 = 0;
- unsigned len2 = 0;
- index = decoder->checked_read_u32v(pc, 1 + len1, &len2, "function index");
- length = len1 + len2;
- sig = nullptr;
- }
-};
-
-struct MemoryIndexOperand {
- uint32_t index;
- unsigned length;
- inline MemoryIndexOperand(Decoder* decoder, const byte* pc) {
- index = decoder->checked_read_u8(pc, 1, "memory index");
- if (index != 0) {
- decoder->error(pc, pc + 1, "expected memory index 0, found %u", index);
- }
- length = 1;
- }
-};
-
-struct BranchTableOperand {
- uint32_t table_count;
- const byte* start;
- const byte* table;
- inline BranchTableOperand(Decoder* decoder, const byte* pc) {
- DCHECK_EQ(kExprBrTable, decoder->checked_read_u8(pc, 0, "opcode"));
- start = pc + 1;
- unsigned len1 = 0;
- table_count = decoder->checked_read_u32v(pc, 1, &len1, "table count");
- if (table_count > (UINT_MAX / sizeof(uint32_t)) - 1 ||
- len1 > UINT_MAX - (table_count + 1) * sizeof(uint32_t)) {
- decoder->error(pc, "branch table size overflow");
- }
- table = pc + 1 + len1;
- }
-};
-
-// A helper to iterate over a branch table.
-class BranchTableIterator {
- public:
- unsigned cur_index() { return index_; }
- bool has_next() { return decoder_->ok() && index_ <= table_count_; }
- uint32_t next() {
- DCHECK(has_next());
- index_++;
- unsigned length = 0;
- uint32_t result =
- decoder_->checked_read_u32v(pc_, 0, &length, "branch table entry");
- pc_ += length;
- return result;
- }
- // length, including the length of the {BranchTableOperand}, but not the
- // opcode.
- unsigned length() {
- while (has_next()) next();
- return static_cast<unsigned>(pc_ - start_);
- }
- const byte* pc() { return pc_; }
-
- BranchTableIterator(Decoder* decoder, BranchTableOperand& operand)
- : decoder_(decoder),
- start_(operand.start),
- pc_(operand.table),
- index_(0),
- table_count_(operand.table_count) {}
-
- private:
- Decoder* decoder_;
- const byte* start_;
- const byte* pc_;
- uint32_t index_; // the current index.
- uint32_t table_count_; // the count of entries, not including default.
-};
-
-struct MemoryAccessOperand {
- uint32_t alignment;
- uint32_t offset;
- unsigned length;
- inline MemoryAccessOperand(Decoder* decoder, const byte* pc,
- uint32_t max_alignment) {
- unsigned alignment_length;
- alignment =
- decoder->checked_read_u32v(pc, 1, &alignment_length, "alignment");
- if (max_alignment < alignment) {
- decoder->error(pc, pc + 1,
- "invalid alignment; expected maximum alignment is %u, "
- "actual alignment is %u",
- max_alignment, alignment);
- }
- unsigned offset_length;
- offset = decoder->checked_read_u32v(pc, 1 + alignment_length,
- &offset_length, "offset");
- length = alignment_length + offset_length;
- }
-};
-
typedef compiler::WasmGraphBuilder TFBuilder;
struct WasmModule; // forward declaration of module interface.
-// All of the various data structures necessary to decode a function body.
+// A wrapper around the signature and bytes of a function.
struct FunctionBody {
FunctionSig* sig; // function signature
const byte* base; // base of the module bytes, for error reporting
diff --git a/deps/v8/src/wasm/managed.h b/deps/v8/src/wasm/managed.h
deleted file mode 100644
index 785d5d32c0..0000000000
--- a/deps/v8/src/wasm/managed.h
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_WASM_MANAGED_H_
-#define V8_WASM_MANAGED_H_
-
-#include "src/factory.h"
-#include "src/global-handles.h"
-#include "src/handles.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-// An object that wraps a pointer to a C++ object and optionally deletes it
-// when the managed wrapper object is garbage collected.
-template <class CppType>
-class Managed : public Foreign {
- public:
- V8_INLINE CppType* get() {
- return reinterpret_cast<CppType*>(foreign_address());
- }
-
- static Handle<Managed<CppType>> New(Isolate* isolate, CppType* ptr,
- bool delete_on_gc = true) {
- Handle<Foreign> foreign =
- isolate->factory()->NewForeign(reinterpret_cast<Address>(ptr));
- Handle<Managed<CppType>> handle(
- reinterpret_cast<Managed<CppType>*>(*foreign), isolate);
- if (delete_on_gc) {
- RegisterWeakCallbackForDelete(isolate, handle);
- }
- return handle;
- }
-
- private:
- static void RegisterWeakCallbackForDelete(Isolate* isolate,
- Handle<Managed<CppType>> handle) {
- Handle<Object> global_handle = isolate->global_handles()->Create(*handle);
- GlobalHandles::MakeWeak(global_handle.location(), global_handle.location(),
- &Managed<CppType>::Delete,
- v8::WeakCallbackType::kFinalizer);
- }
- static void Delete(const v8::WeakCallbackInfo<void>& data) {
- Managed<CppType>** p =
- reinterpret_cast<Managed<CppType>**>(data.GetParameter());
- delete (*p)->get();
- (*p)->set_foreign_address(0);
- GlobalHandles::Destroy(reinterpret_cast<Object**>(p));
- }
-};
-} // namespace internal
-} // namespace v8
-
-#endif // V8_WASM_MANAGED_H_
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index 056fc2f64d..440e5dcbb9 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -3,12 +3,15 @@
// found in the LICENSE file.
#include "src/wasm/module-decoder.h"
+#include "src/wasm/function-body-decoder-impl.h"
#include "src/base/functional.h"
#include "src/base/platform/platform.h"
+#include "src/counters.h"
#include "src/flags.h"
#include "src/macro-assembler.h"
-#include "src/objects.h"
+#include "src/objects-inl.h"
+#include "src/ostreams.h"
#include "src/v8.h"
#include "src/wasm/decoder.h"
@@ -27,6 +30,39 @@ namespace wasm {
#define TRACE(...)
#endif
+const char* SectionName(WasmSectionCode code) {
+ switch (code) {
+ case kUnknownSectionCode:
+ return "Unknown";
+ case kTypeSectionCode:
+ return "Type";
+ case kImportSectionCode:
+ return "Import";
+ case kFunctionSectionCode:
+ return "Function";
+ case kTableSectionCode:
+ return "Table";
+ case kMemorySectionCode:
+ return "Memory";
+ case kGlobalSectionCode:
+ return "Global";
+ case kExportSectionCode:
+ return "Export";
+ case kStartSectionCode:
+ return "Start";
+ case kCodeSectionCode:
+ return "Code";
+ case kElementSectionCode:
+ return "Element";
+ case kDataSectionCode:
+ return "Data";
+ case kNameSectionCode:
+ return "Name";
+ default:
+ return "<unknown>";
+ }
+}
+
namespace {
const char* kNameString = "name";
@@ -178,7 +214,9 @@ class ModuleDecoder : public Decoder {
public:
ModuleDecoder(Zone* zone, const byte* module_start, const byte* module_end,
ModuleOrigin origin)
- : Decoder(module_start, module_end), module_zone(zone), origin_(origin) {
+ : Decoder(module_start, module_end),
+ module_zone(zone),
+ origin_(FLAG_assume_asmjs_origin ? kAsmJsOrigin : origin) {
result_.start = start_;
if (end_ < start_) {
error(start_, "end is less than start");
@@ -313,8 +351,8 @@ class ModuleDecoder : public Decoder {
expect_u8("element type", kWasmAnyFunctionTypeForm);
WasmIndirectFunctionTable* table = &module->function_tables.back();
consume_resizable_limits("element count", "elements",
- kV8MaxWasmTableSize, &table->min_size,
- &table->has_max, kV8MaxWasmTableSize,
+ FLAG_wasm_max_table_size, &table->min_size,
+ &table->has_max, FLAG_wasm_max_table_size,
&table->max_size);
break;
}
@@ -322,7 +360,7 @@ class ModuleDecoder : public Decoder {
// ===== Imported memory =========================================
if (!AddMemory(module)) break;
consume_resizable_limits(
- "memory", "pages", kV8MaxWasmMemoryPages,
+ "memory", "pages", FLAG_wasm_max_mem_pages,
&module->min_mem_pages, &module->has_max_mem,
kSpecMaxWasmMemoryPages, &module->max_mem_pages);
break;
@@ -381,9 +419,10 @@ class ModuleDecoder : public Decoder {
false, false, SignatureMap()});
WasmIndirectFunctionTable* table = &module->function_tables.back();
expect_u8("table type", kWasmAnyFunctionTypeForm);
- consume_resizable_limits(
- "table elements", "elements", kV8MaxWasmTableSize, &table->min_size,
- &table->has_max, kV8MaxWasmTableSize, &table->max_size);
+ consume_resizable_limits("table elements", "elements",
+ FLAG_wasm_max_table_size, &table->min_size,
+ &table->has_max, FLAG_wasm_max_table_size,
+ &table->max_size);
}
section_iter.advance();
}
@@ -394,7 +433,7 @@ class ModuleDecoder : public Decoder {
for (uint32_t i = 0; ok() && i < memory_count; i++) {
if (!AddMemory(module)) break;
- consume_resizable_limits("memory", "pages", kV8MaxWasmMemoryPages,
+ consume_resizable_limits("memory", "pages", FLAG_wasm_max_mem_pages,
&module->min_mem_pages, &module->has_max_mem,
kSpecMaxWasmMemoryPages,
&module->max_mem_pages);
@@ -526,7 +565,7 @@ class ModuleDecoder : public Decoder {
// ===== Elements section ================================================
if (section_iter.section_code() == kElementSectionCode) {
uint32_t element_count =
- consume_count("element count", kV8MaxWasmTableSize);
+ consume_count("element count", FLAG_wasm_max_table_size);
for (uint32_t i = 0; ok() && i < element_count; ++i) {
const byte* pos = pc();
uint32_t table_index = consume_u32v("table index");
@@ -625,7 +664,7 @@ class ModuleDecoder : public Decoder {
}
uint32_t local_names_count = inner.consume_u32v("local names count");
- for (uint32_t j = 0; ok() && j < local_names_count; j++) {
+ for (uint32_t j = 0; inner.ok() && j < local_names_count; j++) {
uint32_t length = inner.consume_u32v("string length");
inner.consume_bytes(length, "string");
}
@@ -787,21 +826,22 @@ class ModuleDecoder : public Decoder {
// Verifies the body (code) of a given function.
void VerifyFunctionBody(uint32_t func_num, ModuleBytesEnv* menv,
WasmFunction* function) {
+ WasmFunctionName func_name(function,
+ menv->wire_bytes.GetNameOrNull(function));
if (FLAG_trace_wasm_decoder || FLAG_trace_wasm_decode_time) {
OFStream os(stdout);
- os << "Verifying WASM function " << WasmFunctionName(function, menv)
- << std::endl;
+ os << "Verifying WASM function " << func_name << std::endl;
}
FunctionBody body = {function->sig, start_,
start_ + function->code_start_offset,
start_ + function->code_end_offset};
- DecodeResult result =
- VerifyWasmCode(module_zone->allocator(),
- menv == nullptr ? nullptr : menv->module, body);
+ DecodeResult result = VerifyWasmCode(
+ module_zone->allocator(),
+ menv == nullptr ? nullptr : menv->module_env.module, body);
if (result.failed()) {
// Wrap the error message from the function decoder.
std::ostringstream str;
- str << "in function " << WasmFunctionName(function, menv) << ": ";
+ str << "in function " << func_name << ": ";
str << result;
std::string strval = str.str();
const char* raw = strval.c_str();
@@ -1024,14 +1064,21 @@ class ModuleDecoder : public Decoder {
return kWasmF32;
case kLocalF64:
return kWasmF64;
- case kLocalS128:
+ default:
if (origin_ != kAsmJsOrigin && FLAG_wasm_simd_prototype) {
- return kWasmS128;
- } else {
- error(pc_ - 1, "invalid local type");
- return kWasmStmt;
+ switch (t) {
+ case kLocalS128:
+ return kWasmS128;
+ case kLocalS1x4:
+ return kWasmS1x4;
+ case kLocalS1x8:
+ return kWasmS1x8;
+ case kLocalS1x16:
+ return kWasmS1x16;
+ default:
+ break;
+ }
}
- default:
error(pc_ - 1, "invalid local type");
return kWasmStmt;
}
@@ -1207,7 +1254,7 @@ FunctionOffsetsResult DecodeWasmFunctionOffsets(const byte* module_start,
for (uint32_t i = 0; i < functions_count && decoder.ok(); ++i) {
uint32_t size = decoder.consume_u32v("body size");
int offset = static_cast<int>(section_offset + decoder.pc_offset());
- table.push_back(std::make_pair(offset, static_cast<int>(size)));
+ table.emplace_back(offset, static_cast<int>(size));
DCHECK(table.back().first >= 0 && table.back().second >= 0);
decoder.consume_bytes(size);
}
@@ -1230,7 +1277,7 @@ AsmJsOffsetsResult DecodeAsmJsOffsets(const byte* tables_start,
for (uint32_t i = 0; i < functions_count && decoder.ok(); ++i) {
uint32_t size = decoder.consume_u32v("table size");
if (size == 0) {
- table.push_back(std::vector<AsmJsOffsetEntry>());
+ table.emplace_back();
continue;
}
if (!decoder.checkAvailable(size)) {
diff --git a/deps/v8/src/wasm/module-decoder.h b/deps/v8/src/wasm/module-decoder.h
index 982fbc9189..446883fd6b 100644
--- a/deps/v8/src/wasm/module-decoder.h
+++ b/deps/v8/src/wasm/module-decoder.h
@@ -14,6 +14,34 @@ namespace v8 {
namespace internal {
namespace wasm {
+const uint32_t kWasmMagic = 0x6d736100;
+const uint32_t kWasmVersion = 0x01;
+const uint8_t kWasmFunctionTypeForm = 0x60;
+const uint8_t kWasmAnyFunctionTypeForm = 0x70;
+const uint8_t kResizableMaximumFlag = 1;
+
+enum WasmSectionCode {
+ kUnknownSectionCode = 0, // code for unknown sections
+ kTypeSectionCode = 1, // Function signature declarations
+ kImportSectionCode = 2, // Import declarations
+ kFunctionSectionCode = 3, // Function declarations
+ kTableSectionCode = 4, // Indirect function table and other tables
+ kMemorySectionCode = 5, // Memory attributes
+ kGlobalSectionCode = 6, // Global declarations
+ kExportSectionCode = 7, // Exports
+ kStartSectionCode = 8, // Start function declaration
+ kElementSectionCode = 9, // Elements section
+ kCodeSectionCode = 10, // Function code
+ kDataSectionCode = 11, // Data segments
+ kNameSectionCode = 12, // Name section (encoded as a string)
+};
+
+inline bool IsValidSectionCode(uint8_t byte) {
+ return kTypeSectionCode <= byte && byte <= kDataSectionCode;
+}
+
+const char* SectionName(WasmSectionCode code);
+
typedef Result<const WasmModule*> ModuleResult;
typedef Result<WasmFunction*> FunctionResult;
typedef std::vector<std::pair<int, int>> FunctionOffsets;
diff --git a/deps/v8/src/wasm/wasm-code-specialization.cc b/deps/v8/src/wasm/wasm-code-specialization.cc
new file mode 100644
index 0000000000..1147899ef5
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-code-specialization.cc
@@ -0,0 +1,263 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-code-specialization.h"
+
+#include "src/assembler-inl.h"
+#include "src/objects-inl.h"
+#include "src/source-position-table.h"
+#include "src/wasm/decoder.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-opcodes.h"
+
+using namespace v8::internal;
+using namespace v8::internal::wasm;
+
+namespace {
+
+int ExtractDirectCallIndex(wasm::Decoder& decoder, const byte* pc) {
+ DCHECK_EQ(static_cast<int>(kExprCallFunction), static_cast<int>(*pc));
+ decoder.Reset(pc + 1, pc + 6);
+ uint32_t call_idx = decoder.consume_u32v("call index");
+ DCHECK(decoder.ok());
+ DCHECK_GE(kMaxInt, call_idx);
+ return static_cast<int>(call_idx);
+}
+
+int AdvanceSourcePositionTableIterator(SourcePositionTableIterator& iterator,
+ size_t offset_l) {
+ DCHECK_GE(kMaxInt, offset_l);
+ int offset = static_cast<int>(offset_l);
+ DCHECK(!iterator.done());
+ int byte_pos;
+ do {
+ byte_pos = iterator.source_position().ScriptOffset();
+ iterator.Advance();
+ } while (!iterator.done() && iterator.code_offset() <= offset);
+ return byte_pos;
+}
+
+class PatchDirectCallsHelper {
+ public:
+ PatchDirectCallsHelper(WasmInstanceObject* instance, Code* code)
+ : source_pos_it(code->source_position_table()),
+ decoder(nullptr, nullptr) {
+ FixedArray* deopt_data = code->deoptimization_data();
+ DCHECK_EQ(2, deopt_data->length());
+ WasmCompiledModule* comp_mod = instance->compiled_module();
+ int func_index = Smi::cast(deopt_data->get(1))->value();
+ func_bytes = comp_mod->module_bytes()->GetChars() +
+ comp_mod->module()->functions[func_index].code_start_offset;
+ }
+
+ SourcePositionTableIterator source_pos_it;
+ Decoder decoder;
+ const byte* func_bytes;
+};
+
+} // namespace
+
+CodeSpecialization::CodeSpecialization(Isolate* isolate, Zone* zone)
+ : objects_to_relocate(isolate->heap(), ZoneAllocationPolicy(zone)) {}
+
+CodeSpecialization::~CodeSpecialization() {}
+
+void CodeSpecialization::RelocateMemoryReferences(Address old_start,
+ uint32_t old_size,
+ Address new_start,
+ uint32_t new_size) {
+ DCHECK(old_mem_start == nullptr && old_mem_size == 0 &&
+ new_mem_start == nullptr && new_mem_size == 0);
+ DCHECK(old_start != new_start || old_size != new_size);
+ old_mem_start = old_start;
+ old_mem_size = old_size;
+ new_mem_start = new_start;
+ new_mem_size = new_size;
+}
+
+void CodeSpecialization::RelocateGlobals(Address old_start, Address new_start) {
+ DCHECK(old_globals_start == 0 && new_globals_start == 0);
+ DCHECK(old_start != 0 || new_start != 0);
+ old_globals_start = old_start;
+ new_globals_start = new_start;
+}
+
+void CodeSpecialization::PatchTableSize(uint32_t old_size, uint32_t new_size) {
+ DCHECK(old_function_table_size == 0 && new_function_table_size == 0);
+ DCHECK(old_size != 0 || new_size != 0);
+ old_function_table_size = old_size;
+ new_function_table_size = new_size;
+}
+
+void CodeSpecialization::RelocateDirectCalls(
+ Handle<WasmInstanceObject> instance) {
+ DCHECK(relocate_direct_calls_instance.is_null());
+ DCHECK(!instance.is_null());
+ relocate_direct_calls_instance = instance;
+}
+
+void CodeSpecialization::RelocateObject(Handle<Object> old_obj,
+ Handle<Object> new_obj) {
+ DCHECK(!old_obj.is_null() && !new_obj.is_null());
+ has_objects_to_relocate = true;
+ objects_to_relocate.Set(*old_obj, new_obj);
+}
+
+bool CodeSpecialization::ApplyToWholeInstance(
+ WasmInstanceObject* instance, ICacheFlushMode icache_flush_mode) {
+ DisallowHeapAllocation no_gc;
+ WasmCompiledModule* compiled_module = instance->compiled_module();
+ FixedArray* code_table = compiled_module->ptr_to_code_table();
+ WasmModule* module = compiled_module->module();
+ std::vector<WasmFunction>* wasm_functions =
+ &compiled_module->module()->functions;
+ DCHECK_EQ(wasm_functions->size() +
+ compiled_module->module()->num_exported_functions,
+ code_table->length());
+
+ bool changed = false;
+ int func_index = module->num_imported_functions;
+
+ // Patch all wasm functions.
+ for (int num_wasm_functions = static_cast<int>(wasm_functions->size());
+ func_index < num_wasm_functions; ++func_index) {
+ Code* wasm_function = Code::cast(code_table->get(func_index));
+ changed |= ApplyToWasmCode(wasm_function, icache_flush_mode);
+ }
+
+ // Patch all exported functions.
+ for (auto exp : module->export_table) {
+ if (exp.kind != kExternalFunction) continue;
+ Code* export_wrapper = Code::cast(code_table->get(func_index));
+ DCHECK_EQ(Code::JS_TO_WASM_FUNCTION, export_wrapper->kind());
+ // There must be exactly one call to WASM_FUNCTION or WASM_TO_JS_FUNCTION.
+ int num_wasm_calls = 0;
+ for (RelocIterator it(export_wrapper,
+ RelocInfo::ModeMask(RelocInfo::CODE_TARGET));
+ !it.done(); it.next()) {
+ DCHECK(RelocInfo::IsCodeTarget(it.rinfo()->rmode()));
+ Code* code = Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
+ // Ignore calls to other builtins like ToNumber.
+ if (code->kind() != Code::WASM_FUNCTION &&
+ code->kind() != Code::WASM_TO_JS_FUNCTION &&
+ code->builtin_index() != Builtins::kIllegal)
+ continue;
+ ++num_wasm_calls;
+ Code* new_code = Code::cast(code_table->get(exp.index));
+ DCHECK(new_code->kind() == Code::WASM_FUNCTION ||
+ new_code->kind() == Code::WASM_TO_JS_FUNCTION);
+ it.rinfo()->set_target_address(new_code->instruction_start(),
+ UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
+ changed = true;
+ }
+ DCHECK_EQ(1, num_wasm_calls);
+ func_index++;
+ }
+ DCHECK_EQ(code_table->length(), func_index);
+ return changed;
+}
+
+bool CodeSpecialization::ApplyToWasmCode(Code* code,
+ ICacheFlushMode icache_flush_mode) {
+ DisallowHeapAllocation no_gc;
+ DCHECK_EQ(Code::WASM_FUNCTION, code->kind());
+
+ bool reloc_mem_addr = old_mem_start != new_mem_start;
+ bool reloc_mem_size = old_mem_size != new_mem_size;
+ bool reloc_globals = old_globals_start || new_globals_start;
+ bool patch_table_size = old_function_table_size || new_function_table_size;
+ bool reloc_direct_calls = !relocate_direct_calls_instance.is_null();
+ bool reloc_objects = has_objects_to_relocate;
+
+ int reloc_mode = 0;
+ auto add_mode = [&reloc_mode](bool cond, RelocInfo::Mode mode) {
+ if (cond) reloc_mode |= RelocInfo::ModeMask(mode);
+ };
+ add_mode(reloc_mem_addr, RelocInfo::WASM_MEMORY_REFERENCE);
+ add_mode(reloc_mem_size, RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+ add_mode(reloc_globals, RelocInfo::WASM_GLOBAL_REFERENCE);
+ add_mode(patch_table_size, RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE);
+ add_mode(reloc_direct_calls, RelocInfo::CODE_TARGET);
+ add_mode(reloc_objects, RelocInfo::EMBEDDED_OBJECT);
+
+ std::unique_ptr<PatchDirectCallsHelper> patch_direct_calls_helper;
+ bool changed = false;
+
+ for (RelocIterator it(code, reloc_mode); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ switch (mode) {
+ case RelocInfo::WASM_MEMORY_REFERENCE:
+ DCHECK(reloc_mem_addr);
+ it.rinfo()->update_wasm_memory_reference(old_mem_start, new_mem_start,
+ icache_flush_mode);
+ changed = true;
+ break;
+ case RelocInfo::WASM_MEMORY_SIZE_REFERENCE:
+ DCHECK(reloc_mem_size);
+ it.rinfo()->update_wasm_memory_size(old_mem_size, new_mem_size,
+ icache_flush_mode);
+ changed = true;
+ break;
+ case RelocInfo::WASM_GLOBAL_REFERENCE:
+ DCHECK(reloc_globals);
+ it.rinfo()->update_wasm_global_reference(
+ old_globals_start, new_globals_start, icache_flush_mode);
+ changed = true;
+ break;
+ case RelocInfo::CODE_TARGET: {
+ DCHECK(reloc_direct_calls);
+ Code* old_code =
+ Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
+ // Skip everything which is not a wasm call (stack checks, traps, ...).
+ if (old_code->kind() != Code::WASM_FUNCTION &&
+ old_code->kind() != Code::WASM_TO_JS_FUNCTION &&
+ old_code->builtin_index() != Builtins::kIllegal)
+ continue;
+ // Iterate simultaneously over the relocation information and the source
+ // position table. For each call in the reloc info, move the source
+ // position iterator forward to that position to find the byte offset of
+ // the respective call. Then extract the call index from the module wire
+ // bytes to find the new compiled function.
+ size_t offset = it.rinfo()->pc() - code->instruction_start();
+ if (!patch_direct_calls_helper) {
+ patch_direct_calls_helper.reset(new PatchDirectCallsHelper(
+ *relocate_direct_calls_instance, code));
+ }
+ int byte_pos = AdvanceSourcePositionTableIterator(
+ patch_direct_calls_helper->source_pos_it, offset);
+ int called_func_index = ExtractDirectCallIndex(
+ patch_direct_calls_helper->decoder,
+ patch_direct_calls_helper->func_bytes + byte_pos);
+ FixedArray* code_table =
+ relocate_direct_calls_instance->compiled_module()
+ ->ptr_to_code_table();
+ Code* new_code = Code::cast(code_table->get(called_func_index));
+ it.rinfo()->set_target_address(new_code->instruction_start(),
+ UPDATE_WRITE_BARRIER, icache_flush_mode);
+ changed = true;
+ } break;
+ case RelocInfo::EMBEDDED_OBJECT: {
+ DCHECK(reloc_objects);
+ Object* old = it.rinfo()->target_object();
+ Handle<Object>* new_obj = objects_to_relocate.Find(old);
+ if (new_obj) {
+ it.rinfo()->set_target_object(**new_obj, UPDATE_WRITE_BARRIER,
+ icache_flush_mode);
+ changed = true;
+ }
+ } break;
+ case RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE:
+ DCHECK(patch_table_size);
+ it.rinfo()->update_wasm_function_table_size_reference(
+ old_function_table_size, new_function_table_size,
+ icache_flush_mode);
+ changed = true;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ return changed;
+}
diff --git a/deps/v8/src/wasm/wasm-code-specialization.h b/deps/v8/src/wasm/wasm-code-specialization.h
new file mode 100644
index 0000000000..fa54235ec3
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-code-specialization.h
@@ -0,0 +1,70 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_CODE_SPECIALIZATION_H_
+#define V8_WASM_CODE_SPECIALIZATION_H_
+
+#include "src/assembler.h"
+#include "src/identity-map.h"
+#include "src/wasm/wasm-objects.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// Helper class to specialize wasm code for a specific instance, or to update
+// code when memory / globals / tables change.
+// This class in unhandlified, and contains a DisallowHeapAllocation field to
+// ensure that no allocations happen while it is alive.
+//
+// Set up all relocations / patching that should be performed by the Relocate* /
+// Patch* methods, then apply all changes in one step using the Apply* methods.
+class CodeSpecialization {
+ public:
+ CodeSpecialization(Isolate*, Zone*);
+ ~CodeSpecialization();
+
+ // Update memory references.
+ void RelocateMemoryReferences(Address old_start, uint32_t old_size,
+ Address new_start, uint32_t new_size);
+ // Update references to global variables.
+ void RelocateGlobals(Address old_start, Address new_start);
+ // Update function table size.
+ // TODO(wasm): Prepare this for more than one indirect function table.
+ void PatchTableSize(uint32_t old_size, uint32_t new_size);
+ // Update all direct call sites based on the code table in the given instance.
+ void RelocateDirectCalls(Handle<WasmInstanceObject> instance);
+ // Relocate an arbitrary object (e.g. function table).
+ void RelocateObject(Handle<Object> old_obj, Handle<Object> new_obj);
+
+ // Apply all relocations and patching to all code in the instance (wasm code
+ // and exported functions).
+ bool ApplyToWholeInstance(WasmInstanceObject*,
+ ICacheFlushMode = FLUSH_ICACHE_IF_NEEDED);
+ // Apply all relocations and patching to one wasm code object.
+ bool ApplyToWasmCode(Code*, ICacheFlushMode = FLUSH_ICACHE_IF_NEEDED);
+
+ private:
+ Address old_mem_start = 0;
+ uint32_t old_mem_size = 0;
+ Address new_mem_start = 0;
+ uint32_t new_mem_size = 0;
+
+ Address old_globals_start = 0;
+ Address new_globals_start = 0;
+
+ uint32_t old_function_table_size = 0;
+ uint32_t new_function_table_size = 0;
+
+ Handle<WasmInstanceObject> relocate_direct_calls_instance;
+
+ bool has_objects_to_relocate = false;
+ IdentityMap<Handle<Object>, ZoneAllocationPolicy> objects_to_relocate;
+};
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_CODE_SPECIALIZATION_H_
diff --git a/deps/v8/src/wasm/wasm-debug.cc b/deps/v8/src/wasm/wasm-debug.cc
index 6cb865d59c..769f57d951 100644
--- a/deps/v8/src/wasm/wasm-debug.cc
+++ b/deps/v8/src/wasm/wasm-debug.cc
@@ -21,27 +21,34 @@ using namespace v8::internal::wasm;
namespace {
+// Forward declaration.
+class InterpreterHandle;
+InterpreterHandle* GetInterpreterHandle(WasmDebugInfo* debug_info);
+
class InterpreterHandle {
AccountingAllocator allocator_;
WasmInstance instance_;
WasmInterpreter interpreter_;
+ Isolate* isolate_;
+ StepAction next_step_action_ = StepNone;
+ int last_step_stack_depth_ = 0;
public:
// Initialize in the right order, using helper methods to make this possible.
// WasmInterpreter has to be allocated in place, since it is not movable.
InterpreterHandle(Isolate* isolate, WasmDebugInfo* debug_info)
: instance_(debug_info->wasm_instance()->compiled_module()->module()),
- interpreter_(GetBytesEnv(&instance_, debug_info), &allocator_) {
- Handle<JSArrayBuffer> mem_buffer =
- handle(debug_info->wasm_instance()->memory_buffer(), isolate);
- if (mem_buffer->IsUndefined(isolate)) {
- DCHECK_EQ(0, instance_.module->min_mem_pages);
- instance_.mem_start = nullptr;
- instance_.mem_size = 0;
- } else {
+ interpreter_(GetBytesEnv(&instance_, debug_info), &allocator_),
+ isolate_(isolate) {
+ if (debug_info->wasm_instance()->has_memory_buffer()) {
+ JSArrayBuffer* mem_buffer = debug_info->wasm_instance()->memory_buffer();
instance_.mem_start =
reinterpret_cast<byte*>(mem_buffer->backing_store());
CHECK(mem_buffer->byte_length()->ToUint32(&instance_.mem_size));
+ } else {
+ DCHECK_EQ(0, instance_.module->min_mem_pages);
+ instance_.mem_start = nullptr;
+ instance_.mem_size = 0;
}
}
@@ -58,6 +65,18 @@ class InterpreterHandle {
WasmInterpreter* interpreter() { return &interpreter_; }
const WasmModule* module() { return instance_.module; }
+ void PrepareStep(StepAction step_action) {
+ next_step_action_ = step_action;
+ last_step_stack_depth_ = CurrentStackDepth();
+ }
+
+ void ClearStepping() { next_step_action_ = StepNone; }
+
+ int CurrentStackDepth() {
+ DCHECK_EQ(1, interpreter()->GetThreadCount());
+ return interpreter()->GetThread(0)->GetFrameCount();
+ }
+
void Execute(uint32_t func_index, uint8_t* arg_buffer) {
DCHECK_GE(module()->functions.size(), func_index);
FunctionSig* sig = module()->functions[func_index].sig;
@@ -66,7 +85,7 @@ class InterpreterHandle {
ScopedVector<WasmVal> wasm_args(num_params);
uint8_t* arg_buf_ptr = arg_buffer;
for (int i = 0; i < num_params; ++i) {
- uint32_t param_size = 1 << ElementSizeLog2Of(sig->GetParam(i));
+ int param_size = 1 << ElementSizeLog2Of(sig->GetParam(i));
#define CASE_ARG_TYPE(type, ctype) \
case type: \
DCHECK_EQ(param_size, sizeof(ctype)); \
@@ -81,7 +100,7 @@ class InterpreterHandle {
default:
UNREACHABLE();
}
- arg_buf_ptr += param_size;
+ arg_buf_ptr += RoundUpToMultipleOfPowOf2(param_size, 8);
}
WasmInterpreter::Thread* thread = interpreter_.GetThread(0);
@@ -91,16 +110,17 @@ class InterpreterHandle {
thread->state() == WasmInterpreter::FINISHED);
thread->Reset();
thread->PushFrame(&module()->functions[func_index], wasm_args.start());
- WasmInterpreter::State state;
- do {
- state = thread->Run();
+ bool finished = false;
+ while (!finished) {
+ // TODO(clemensh): Add occasional StackChecks.
+ WasmInterpreter::State state = ContinueExecution(thread);
switch (state) {
- case WasmInterpreter::State::PAUSED: {
- // We hit a breakpoint.
- // TODO(clemensh): Handle this.
- } break;
+ case WasmInterpreter::State::PAUSED:
+ NotifyDebugEventListeners(thread);
+ break;
case WasmInterpreter::State::FINISHED:
// Perfect, just break the switch and exit the loop.
+ finished = true;
break;
case WasmInterpreter::State::TRAPPED:
// TODO(clemensh): Generate appropriate JS exception.
@@ -112,7 +132,7 @@ class InterpreterHandle {
default:
UNREACHABLE();
}
- } while (state != WasmInterpreter::State::FINISHED);
+ }
// Copy back the return value
DCHECK_GE(kV8MaxWasmFunctionReturns, sig->return_count());
@@ -136,6 +156,128 @@ class InterpreterHandle {
}
}
}
+
+ WasmInterpreter::State ContinueExecution(WasmInterpreter::Thread* thread) {
+ switch (next_step_action_) {
+ case StepNone:
+ return thread->Run();
+ case StepIn:
+ return thread->Step();
+ case StepOut:
+ thread->AddBreakFlags(WasmInterpreter::BreakFlag::AfterReturn);
+ return thread->Run();
+ case StepNext: {
+ int stack_depth = thread->GetFrameCount();
+ if (stack_depth == last_step_stack_depth_) return thread->Step();
+ thread->AddBreakFlags(stack_depth > last_step_stack_depth_
+ ? WasmInterpreter::BreakFlag::AfterReturn
+ : WasmInterpreter::BreakFlag::AfterCall);
+ return thread->Run();
+ }
+ default:
+ UNREACHABLE();
+ return WasmInterpreter::STOPPED;
+ }
+ }
+
+ Handle<WasmInstanceObject> GetInstanceObject() {
+ StackTraceFrameIterator it(isolate_);
+ WasmInterpreterEntryFrame* frame =
+ WasmInterpreterEntryFrame::cast(it.frame());
+ Handle<WasmInstanceObject> instance_obj(frame->wasm_instance(), isolate_);
+ DCHECK_EQ(this, GetInterpreterHandle(instance_obj->debug_info()));
+ return instance_obj;
+ }
+
+ void NotifyDebugEventListeners(WasmInterpreter::Thread* thread) {
+ // Enter the debugger.
+ DebugScope debug_scope(isolate_->debug());
+ if (debug_scope.failed()) return;
+
+ // Postpone interrupt during breakpoint processing.
+ PostponeInterruptsScope postpone(isolate_);
+
+ // Check whether we hit a breakpoint.
+ if (isolate_->debug()->break_points_active()) {
+ Handle<WasmCompiledModule> compiled_module(
+ GetInstanceObject()->compiled_module(), isolate_);
+ int position = GetTopPosition(compiled_module);
+ Handle<FixedArray> breakpoints;
+ if (compiled_module->CheckBreakPoints(position).ToHandle(&breakpoints)) {
+ // We hit one or several breakpoints. Clear stepping, notify the
+ // listeners and return.
+ ClearStepping();
+ Handle<Object> hit_breakpoints_js =
+ isolate_->factory()->NewJSArrayWithElements(breakpoints);
+ isolate_->debug()->OnDebugBreak(hit_breakpoints_js);
+ return;
+ }
+ }
+
+ // We did not hit a breakpoint, so maybe this pause is related to stepping.
+ bool hit_step = false;
+ switch (next_step_action_) {
+ case StepNone:
+ break;
+ case StepIn:
+ hit_step = true;
+ break;
+ case StepOut:
+ hit_step = thread->GetFrameCount() < last_step_stack_depth_;
+ break;
+ case StepNext: {
+ hit_step = thread->GetFrameCount() == last_step_stack_depth_;
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ if (!hit_step) return;
+ ClearStepping();
+ isolate_->debug()->OnDebugBreak(isolate_->factory()->undefined_value());
+ }
+
+ int GetTopPosition(Handle<WasmCompiledModule> compiled_module) {
+ DCHECK_EQ(1, interpreter()->GetThreadCount());
+ WasmInterpreter::Thread* thread = interpreter()->GetThread(0);
+ DCHECK_LT(0, thread->GetFrameCount());
+
+ wasm::InterpretedFrame frame =
+ thread->GetFrame(thread->GetFrameCount() - 1);
+ return compiled_module->GetFunctionOffset(frame.function()->func_index) +
+ frame.pc();
+ }
+
+ std::vector<std::pair<uint32_t, int>> GetInterpretedStack(
+ Address frame_pointer) {
+ // TODO(clemensh): Use frame_pointer.
+ USE(frame_pointer);
+
+ DCHECK_EQ(1, interpreter()->GetThreadCount());
+ WasmInterpreter::Thread* thread = interpreter()->GetThread(0);
+ std::vector<std::pair<uint32_t, int>> stack(thread->GetFrameCount());
+ for (int i = 0, e = thread->GetFrameCount(); i < e; ++i) {
+ wasm::InterpretedFrame frame = thread->GetFrame(i);
+ stack[i] = {frame.function()->func_index, frame.pc()};
+ }
+ return stack;
+ }
+
+ std::unique_ptr<wasm::InterpretedFrame> GetInterpretedFrame(
+ Address frame_pointer, int idx) {
+ // TODO(clemensh): Use frame_pointer.
+ USE(frame_pointer);
+
+ DCHECK_EQ(1, interpreter()->GetThreadCount());
+ WasmInterpreter::Thread* thread = interpreter()->GetThread(0);
+ return std::unique_ptr<wasm::InterpretedFrame>(
+ new wasm::InterpretedFrame(thread->GetMutableFrame(idx)));
+ }
+
+ uint64_t NumInterpretedCalls() {
+ DCHECK_EQ(1, interpreter()->GetThreadCount());
+ return interpreter()->GetThread(0)->NumInterpretedCalls();
+ }
};
InterpreterHandle* GetOrCreateInterpreterHandle(
@@ -151,6 +293,18 @@ InterpreterHandle* GetOrCreateInterpreterHandle(
return Handle<Managed<InterpreterHandle>>::cast(handle)->get();
}
+InterpreterHandle* GetInterpreterHandle(WasmDebugInfo* debug_info) {
+ Object* handle_obj = debug_info->get(WasmDebugInfo::kInterpreterHandle);
+ DCHECK(!handle_obj->IsUndefined(debug_info->GetIsolate()));
+ return Managed<InterpreterHandle>::cast(handle_obj)->get();
+}
+
+InterpreterHandle* GetInterpreterHandleOrNull(WasmDebugInfo* debug_info) {
+ Object* handle_obj = debug_info->get(WasmDebugInfo::kInterpreterHandle);
+ if (handle_obj->IsUndefined(debug_info->GetIsolate())) return nullptr;
+ return Managed<InterpreterHandle>::cast(handle_obj)->get();
+}
+
int GetNumFunctions(WasmInstanceObject* instance) {
size_t num_functions =
instance->compiled_module()->module()->functions.size();
@@ -202,32 +356,13 @@ void RedirectCallsitesInInstance(Isolate* isolate, WasmInstanceObject* instance,
}
}
-void EnsureRedirectToInterpreter(Isolate* isolate,
- Handle<WasmDebugInfo> debug_info,
- int func_index) {
- Handle<FixedArray> interpreted_functions =
- GetOrCreateInterpretedFunctions(isolate, debug_info);
- if (!interpreted_functions->get(func_index)->IsUndefined(isolate)) return;
-
- Handle<WasmInstanceObject> instance(debug_info->wasm_instance(), isolate);
- Handle<Code> new_code = compiler::CompileWasmInterpreterEntry(
- isolate, func_index,
- instance->compiled_module()->module()->functions[func_index].sig,
- instance);
-
- Handle<FixedArray> code_table = instance->compiled_module()->code_table();
- Handle<Code> old_code(Code::cast(code_table->get(func_index)), isolate);
- interpreted_functions->set(func_index, *new_code);
-
- RedirectCallsitesInInstance(isolate, *instance, *old_code, *new_code);
-}
-
} // namespace
Handle<WasmDebugInfo> WasmDebugInfo::New(Handle<WasmInstanceObject> instance) {
Isolate* isolate = instance->GetIsolate();
Factory* factory = isolate->factory();
Handle<FixedArray> arr = factory->NewFixedArray(kFieldCount, TENURED);
+ arr->set(kWrapperTracerHeader, Smi::kZero);
arr->set(kInstance, *instance);
return Handle<WasmDebugInfo>::cast(arr);
}
@@ -257,18 +392,57 @@ void WasmDebugInfo::SetBreakpoint(Handle<WasmDebugInfo> debug_info,
int func_index, int offset) {
Isolate* isolate = debug_info->GetIsolate();
InterpreterHandle* handle = GetOrCreateInterpreterHandle(isolate, debug_info);
- WasmInterpreter* interpreter = handle->interpreter();
- DCHECK_LE(0, func_index);
- DCHECK_GT(handle->module()->functions.size(), func_index);
+ RedirectToInterpreter(debug_info, func_index);
const WasmFunction* func = &handle->module()->functions[func_index];
- interpreter->SetBreakpoint(func, offset, true);
- EnsureRedirectToInterpreter(isolate, debug_info, func_index);
+ handle->interpreter()->SetBreakpoint(func, offset, true);
+}
+
+void WasmDebugInfo::RedirectToInterpreter(Handle<WasmDebugInfo> debug_info,
+ int func_index) {
+ Isolate* isolate = debug_info->GetIsolate();
+ DCHECK_LE(0, func_index);
+ DCHECK_GT(debug_info->wasm_instance()->module()->functions.size(),
+ func_index);
+ Handle<FixedArray> interpreted_functions =
+ GetOrCreateInterpretedFunctions(isolate, debug_info);
+ if (!interpreted_functions->get(func_index)->IsUndefined(isolate)) return;
+
+ // Ensure that the interpreter is instantiated.
+ GetOrCreateInterpreterHandle(isolate, debug_info);
+ Handle<WasmInstanceObject> instance(debug_info->wasm_instance(), isolate);
+ Handle<Code> new_code = compiler::CompileWasmInterpreterEntry(
+ isolate, func_index,
+ instance->compiled_module()->module()->functions[func_index].sig,
+ instance);
+
+ Handle<FixedArray> code_table = instance->compiled_module()->code_table();
+ Handle<Code> old_code(Code::cast(code_table->get(func_index)), isolate);
+ interpreted_functions->set(func_index, *new_code);
+
+ RedirectCallsitesInInstance(isolate, *instance, *old_code, *new_code);
}
-void WasmDebugInfo::RunInterpreter(Handle<WasmDebugInfo> debug_info,
- int func_index, uint8_t* arg_buffer) {
+void WasmDebugInfo::PrepareStep(StepAction step_action) {
+ GetInterpreterHandle(this)->PrepareStep(step_action);
+}
+
+void WasmDebugInfo::RunInterpreter(int func_index, uint8_t* arg_buffer) {
DCHECK_LE(0, func_index);
- InterpreterHandle* interp_handle =
- GetOrCreateInterpreterHandle(debug_info->GetIsolate(), debug_info);
- interp_handle->Execute(static_cast<uint32_t>(func_index), arg_buffer);
+ GetInterpreterHandle(this)->Execute(static_cast<uint32_t>(func_index),
+ arg_buffer);
+}
+
+std::vector<std::pair<uint32_t, int>> WasmDebugInfo::GetInterpretedStack(
+ Address frame_pointer) {
+ return GetInterpreterHandle(this)->GetInterpretedStack(frame_pointer);
+}
+
+std::unique_ptr<wasm::InterpretedFrame> WasmDebugInfo::GetInterpretedFrame(
+ Address frame_pointer, int idx) {
+ return GetInterpreterHandle(this)->GetInterpretedFrame(frame_pointer, idx);
+}
+
+uint64_t WasmDebugInfo::NumInterpretedCalls() {
+ auto handle = GetInterpreterHandleOrNull(this);
+ return handle ? handle->NumInterpretedCalls() : 0;
}
diff --git a/deps/v8/src/wasm/wasm-interpreter.cc b/deps/v8/src/wasm/wasm-interpreter.cc
index ac125caf7e..f32b5e617b 100644
--- a/deps/v8/src/wasm/wasm-interpreter.cc
+++ b/deps/v8/src/wasm/wasm-interpreter.cc
@@ -2,10 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <type_traits>
+
#include "src/wasm/wasm-interpreter.h"
+#include "src/conversions.h"
+#include "src/objects-inl.h"
#include "src/utils.h"
#include "src/wasm/decoder.h"
+#include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/wasm-external-refs.h"
#include "src/wasm/wasm-limits.h"
@@ -77,12 +82,10 @@ namespace wasm {
V(F64Lt, double, <) \
V(F64Le, double, <=) \
V(F64Gt, double, >) \
- V(F64Ge, double, >=)
-
-#define FOREACH_SIMPLE_BINOP_NAN(V) \
- V(F32Mul, float, *) \
- V(F64Mul, double, *) \
- V(F32Div, float, /) \
+ V(F64Ge, double, >=) \
+ V(F32Mul, float, *) \
+ V(F64Mul, double, *) \
+ V(F32Div, float, /) \
V(F64Div, double, /)
#define FOREACH_OTHER_BINOP(V) \
@@ -106,10 +109,8 @@ namespace wasm {
V(I64Rol, int64_t) \
V(F32Min, float) \
V(F32Max, float) \
- V(F32CopySign, float) \
V(F64Min, double) \
V(F64Max, double) \
- V(F64CopySign, double) \
V(I32AsmjsDivS, int32_t) \
V(I32AsmjsDivU, uint32_t) \
V(I32AsmjsRemS, int32_t) \
@@ -162,10 +163,8 @@ namespace wasm {
V(I32AsmjsSConvertF32, float) \
V(I32AsmjsUConvertF32, float) \
V(I32AsmjsSConvertF64, double) \
- V(I32AsmjsUConvertF64, double)
-
-#define FOREACH_OTHER_UNOP_NAN(V) \
- V(F32Sqrt, float) \
+ V(I32AsmjsUConvertF64, double) \
+ V(F32Sqrt, float) \
V(F64Sqrt, double)
static inline int32_t ExecuteI32DivS(int32_t a, int32_t b, TrapReason* trap) {
@@ -616,7 +615,7 @@ static inline int32_t ExecuteGrowMemory(uint32_t delta_pages,
WasmInstance* instance) {
// TODO(ahaas): Move memory allocation to wasm-module.cc for better
// encapsulation.
- if (delta_pages > wasm::kV8MaxWasmMemoryPages ||
+ if (delta_pages > FLAG_wasm_max_mem_pages ||
delta_pages > instance->module->max_mem_pages) {
return -1;
}
@@ -633,7 +632,7 @@ static inline int32_t ExecuteGrowMemory(uint32_t delta_pages,
} else {
DCHECK_NOT_NULL(instance->mem_start);
new_size = old_size + delta_pages * wasm::WasmModule::kPageSize;
- if (new_size / wasm::WasmModule::kPageSize > wasm::kV8MaxWasmMemoryPages ||
+ if (new_size / wasm::WasmModule::kPageSize > FLAG_wasm_max_mem_pages ||
new_size / wasm::WasmModule::kPageSize >
instance->module->max_mem_pages) {
return -1;
@@ -916,31 +915,27 @@ class CodeMap {
}
};
+namespace {
// Responsible for executing code directly.
-class ThreadImpl : public WasmInterpreter::Thread {
+class ThreadImpl {
public:
ThreadImpl(Zone* zone, CodeMap* codemap, WasmInstance* instance)
: codemap_(codemap),
instance_(instance),
stack_(zone),
frames_(zone),
- blocks_(zone),
- state_(WasmInterpreter::STOPPED),
- break_pc_(kInvalidPc),
- trap_reason_(kTrapCount),
- possible_nondeterminism_(false) {}
-
- virtual ~ThreadImpl() {}
+ blocks_(zone) {}
//==========================================================================
// Implementation of public interface for WasmInterpreter::Thread.
//==========================================================================
- virtual WasmInterpreter::State state() { return state_; }
+ WasmInterpreter::State state() { return state_; }
- virtual void PushFrame(const WasmFunction* function, WasmVal* args) {
+ void PushFrame(const WasmFunction* function, WasmVal* args) {
InterpreterCode* code = codemap()->FindCode(function);
CHECK_NOT_NULL(code);
+ ++num_interpreted_calls_;
frames_.push_back({code, 0, 0, stack_.size()});
for (size_t i = 0; i < function->sig->parameter_count(); ++i) {
stack_.push_back(args[i]);
@@ -953,7 +948,7 @@ class ThreadImpl : public WasmInterpreter::Thread {
frames_.back().ret_pc);
}
- virtual WasmInterpreter::State Run() {
+ WasmInterpreter::State Run() {
do {
TRACE(" => Run()\n");
if (state_ == WasmInterpreter::STOPPED ||
@@ -965,7 +960,7 @@ class ThreadImpl : public WasmInterpreter::Thread {
return state_;
}
- virtual WasmInterpreter::State Step() {
+ WasmInterpreter::State Step() {
TRACE(" => Step()\n");
if (state_ == WasmInterpreter::STOPPED ||
state_ == WasmInterpreter::PAUSED) {
@@ -975,9 +970,9 @@ class ThreadImpl : public WasmInterpreter::Thread {
return state_;
}
- virtual void Pause() { UNIMPLEMENTED(); }
+ void Pause() { UNIMPLEMENTED(); }
- virtual void Reset() {
+ void Reset() {
TRACE("----- RESET -----\n");
stack_.clear();
frames_.clear();
@@ -986,33 +981,40 @@ class ThreadImpl : public WasmInterpreter::Thread {
possible_nondeterminism_ = false;
}
- virtual int GetFrameCount() { return static_cast<int>(frames_.size()); }
-
- virtual const WasmFrame* GetFrame(int index) {
- UNIMPLEMENTED();
- return nullptr;
+ int GetFrameCount() {
+ DCHECK_GE(kMaxInt, frames_.size());
+ return static_cast<int>(frames_.size());
}
- virtual WasmFrame* GetMutableFrame(int index) {
- UNIMPLEMENTED();
- return nullptr;
+ template <typename FrameCons>
+ InterpretedFrame GetMutableFrame(int index, FrameCons frame_cons) {
+ DCHECK_LE(0, index);
+ DCHECK_GT(frames_.size(), index);
+ Frame* frame = &frames_[index];
+ DCHECK_GE(kMaxInt, frame->ret_pc);
+ DCHECK_GE(kMaxInt, frame->sp);
+ DCHECK_GE(kMaxInt, frame->llimit());
+ return frame_cons(frame->code->function, static_cast<int>(frame->ret_pc),
+ static_cast<int>(frame->sp),
+ static_cast<int>(frame->llimit()));
}
- virtual WasmVal GetReturnValue(int index) {
+ WasmVal GetReturnValue(int index) {
if (state_ == WasmInterpreter::TRAPPED) return WasmVal(0xdeadbeef);
CHECK_EQ(WasmInterpreter::FINISHED, state_);
CHECK_LT(static_cast<size_t>(index), stack_.size());
return stack_[index];
}
- virtual pc_t GetBreakpointPc() { return break_pc_; }
+ pc_t GetBreakpointPc() { return break_pc_; }
- virtual bool PossibleNondeterminism() { return possible_nondeterminism_; }
+ bool PossibleNondeterminism() { return possible_nondeterminism_; }
- bool Terminated() {
- return state_ == WasmInterpreter::TRAPPED ||
- state_ == WasmInterpreter::FINISHED;
- }
+ uint64_t NumInterpretedCalls() { return num_interpreted_calls_; }
+
+ void AddBreakFlags(uint8_t flags) { break_flags_ |= flags; }
+
+ void ClearBreakFlags() { break_flags_ = WasmInterpreter::BreakFlag::None; }
private:
// Entries on the stack of functions being evaluated.
@@ -1040,10 +1042,12 @@ class ThreadImpl : public WasmInterpreter::Thread {
ZoneVector<WasmVal> stack_;
ZoneVector<Frame> frames_;
ZoneVector<Block> blocks_;
- WasmInterpreter::State state_;
- pc_t break_pc_;
- TrapReason trap_reason_;
- bool possible_nondeterminism_;
+ WasmInterpreter::State state_ = WasmInterpreter::STOPPED;
+ pc_t break_pc_ = kInvalidPc;
+ TrapReason trap_reason_ = kTrapCount;
+ bool possible_nondeterminism_ = false;
+ uint8_t break_flags_ = 0; // a combination of WasmInterpreter::BreakFlag
+ uint64_t num_interpreted_calls_ = 0;
CodeMap* codemap() { return codemap_; }
WasmInstance* instance() { return instance_; }
@@ -1059,6 +1063,7 @@ class ThreadImpl : public WasmInterpreter::Thread {
void PushFrame(InterpreterCode* code, pc_t call_pc, pc_t ret_pc) {
CHECK_NOT_NULL(code);
DCHECK(!frames_.empty());
+ ++num_interpreted_calls_;
frames_.back().call_pc = call_pc;
frames_.back().ret_pc = ret_pc;
size_t arity = code->function->sig->parameter_count();
@@ -1175,42 +1180,73 @@ class ThreadImpl : public WasmInterpreter::Thread {
stack_.resize(stack_.size() - pop_count);
}
+ template <typename ctype, typename mtype>
+ bool ExecuteLoad(Decoder* decoder, InterpreterCode* code, pc_t pc, int& len) {
+ MemoryAccessOperand operand(decoder, code->at(pc), sizeof(ctype));
+ uint32_t index = Pop().to<uint32_t>();
+ size_t effective_mem_size = instance()->mem_size - sizeof(mtype);
+ if (operand.offset > effective_mem_size ||
+ index > (effective_mem_size - operand.offset)) {
+ DoTrap(kTrapMemOutOfBounds, pc);
+ return false;
+ }
+ byte* addr = instance()->mem_start + operand.offset + index;
+ WasmVal result(static_cast<ctype>(ReadLittleEndianValue<mtype>(addr)));
+
+ Push(pc, result);
+ len = 1 + operand.length;
+ return true;
+ }
+
+ template <typename ctype, typename mtype>
+ bool ExecuteStore(Decoder* decoder, InterpreterCode* code, pc_t pc,
+ int& len) {
+ MemoryAccessOperand operand(decoder, code->at(pc), sizeof(ctype));
+ WasmVal val = Pop();
+
+ uint32_t index = Pop().to<uint32_t>();
+ size_t effective_mem_size = instance()->mem_size - sizeof(mtype);
+ if (operand.offset > effective_mem_size ||
+ index > (effective_mem_size - operand.offset)) {
+ DoTrap(kTrapMemOutOfBounds, pc);
+ return false;
+ }
+ byte* addr = instance()->mem_start + operand.offset + index;
+ WriteLittleEndianValue<mtype>(addr, static_cast<mtype>(val.to<ctype>()));
+ len = 1 + operand.length;
+
+ if (std::is_same<float, ctype>::value) {
+ possible_nondeterminism_ |= std::isnan(val.to<float>());
+ } else if (std::is_same<double, ctype>::value) {
+ possible_nondeterminism_ |= std::isnan(val.to<double>());
+ }
+ return true;
+ }
+
void Execute(InterpreterCode* code, pc_t pc, int max) {
Decoder decoder(code->start, code->end);
pc_t limit = code->end - code->start;
- while (true) {
- if (max-- <= 0) {
- // Maximum number of instructions reached.
- state_ = WasmInterpreter::PAUSED;
- return CommitPc(pc);
- }
+ while (--max >= 0) {
+#define PAUSE_IF_BREAK_FLAG(flag) \
+ if (V8_UNLIKELY(break_flags_ & WasmInterpreter::BreakFlag::flag)) max = 0;
- if (pc >= limit) {
- // Fell off end of code; do an implicit return.
- TRACE("@%-3zu: ImplicitReturn\n", pc);
- if (!DoReturn(&code, &pc, &limit, code->function->sig->return_count()))
- return;
- decoder.Reset(code->start, code->end);
- continue;
- }
+ DCHECK_GT(limit, pc);
const char* skip = " ";
int len = 1;
byte opcode = code->start[pc];
byte orig = opcode;
- if (opcode == kInternalBreakpoint) {
+ if (V8_UNLIKELY(opcode == kInternalBreakpoint)) {
orig = code->orig_start[pc];
if (SkipBreakpoint(code, pc)) {
// skip breakpoint by switching on original code.
skip = "[skip] ";
} else {
- state_ = WasmInterpreter::PAUSED;
TRACE("@%-3zu: [break] %-24s:", pc,
WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(orig)));
TraceValueStack();
TRACE("\n");
- break_pc_ = pc;
- return CommitPc(pc);
+ break;
}
}
@@ -1300,6 +1336,7 @@ class ThreadImpl : public WasmInterpreter::Thread {
size_t arity = code->function->sig->return_count();
if (!DoReturn(&code, &pc, &limit, arity)) return;
decoder.Reset(code->start, code->end);
+ PAUSE_IF_BREAK_FLAG(AfterReturn);
continue;
}
case kExprUnreachable: {
@@ -1365,6 +1402,7 @@ class ThreadImpl : public WasmInterpreter::Thread {
DoCall(target, &pc, pc + 1 + operand.length, &limit);
code = target;
decoder.Reset(code->start, code->end);
+ PAUSE_IF_BREAK_FLAG(AfterCall);
continue;
}
case kExprCallIndirect: {
@@ -1391,6 +1429,7 @@ class ThreadImpl : public WasmInterpreter::Thread {
DoCall(target, &pc, pc + 1 + operand.length, &limit);
code = target;
decoder.Reset(code->start, code->end);
+ PAUSE_IF_BREAK_FLAG(AfterCall);
continue;
}
case kExprGetGlobal: {
@@ -1435,20 +1474,10 @@ class ThreadImpl : public WasmInterpreter::Thread {
break;
}
-#define LOAD_CASE(name, ctype, mtype) \
- case kExpr##name: { \
- MemoryAccessOperand operand(&decoder, code->at(pc), sizeof(ctype)); \
- uint32_t index = Pop().to<uint32_t>(); \
- size_t effective_mem_size = instance()->mem_size - sizeof(mtype); \
- if (operand.offset > effective_mem_size || \
- index > (effective_mem_size - operand.offset)) { \
- return DoTrap(kTrapMemOutOfBounds, pc); \
- } \
- byte* addr = instance()->mem_start + operand.offset + index; \
- WasmVal result(static_cast<ctype>(ReadLittleEndianValue<mtype>(addr))); \
- Push(pc, result); \
- len = 1 + operand.length; \
- break; \
+#define LOAD_CASE(name, ctype, mtype) \
+ case kExpr##name: { \
+ if (!ExecuteLoad<ctype, mtype>(&decoder, code, pc, len)) return; \
+ break; \
}
LOAD_CASE(I32LoadMem8S, int32_t, int8_t);
@@ -1467,20 +1496,10 @@ class ThreadImpl : public WasmInterpreter::Thread {
LOAD_CASE(F64LoadMem, double, double);
#undef LOAD_CASE
-#define STORE_CASE(name, ctype, mtype) \
- case kExpr##name: { \
- MemoryAccessOperand operand(&decoder, code->at(pc), sizeof(ctype)); \
- WasmVal val = Pop(); \
- uint32_t index = Pop().to<uint32_t>(); \
- size_t effective_mem_size = instance()->mem_size - sizeof(mtype); \
- if (operand.offset > effective_mem_size || \
- index > (effective_mem_size - operand.offset)) { \
- return DoTrap(kTrapMemOutOfBounds, pc); \
- } \
- byte* addr = instance()->mem_start + operand.offset + index; \
- WriteLittleEndianValue<mtype>(addr, static_cast<mtype>(val.to<ctype>())); \
- len = 1 + operand.length; \
- break; \
+#define STORE_CASE(name, ctype, mtype) \
+ case kExpr##name: { \
+ if (!ExecuteStore<ctype, mtype>(&decoder, code, pc, len)) return; \
+ break; \
}
STORE_CASE(I32StoreMem8, int32_t, int8_t);
@@ -1556,13 +1575,17 @@ class ThreadImpl : public WasmInterpreter::Thread {
// specially to guarantee that the quiet bit of a NaN is preserved on
// ia32 by the reinterpret casts.
case kExprI32ReinterpretF32: {
- WasmVal result(ExecuteI32ReinterpretF32(Pop()));
+ WasmVal val = Pop();
+ WasmVal result(ExecuteI32ReinterpretF32(val));
Push(pc, result);
+ possible_nondeterminism_ |= std::isnan(val.to<float>());
break;
}
case kExprI64ReinterpretF64: {
- WasmVal result(ExecuteI64ReinterpretF64(Pop()));
+ WasmVal val = Pop();
+ WasmVal result(ExecuteI64ReinterpretF64(val));
Push(pc, result);
+ possible_nondeterminism_ |= std::isnan(val.to<double>());
break;
}
#define EXECUTE_SIMPLE_BINOP(name, ctype, op) \
@@ -1576,19 +1599,6 @@ class ThreadImpl : public WasmInterpreter::Thread {
FOREACH_SIMPLE_BINOP(EXECUTE_SIMPLE_BINOP)
#undef EXECUTE_SIMPLE_BINOP
-#define EXECUTE_SIMPLE_BINOP_NAN(name, ctype, op) \
- case kExpr##name: { \
- WasmVal rval = Pop(); \
- WasmVal lval = Pop(); \
- ctype result = lval.to<ctype>() op rval.to<ctype>(); \
- possible_nondeterminism_ |= std::isnan(result); \
- WasmVal result_val(result); \
- Push(pc, result_val); \
- break; \
- }
- FOREACH_SIMPLE_BINOP_NAN(EXECUTE_SIMPLE_BINOP_NAN)
-#undef EXECUTE_SIMPLE_BINOP_NAN
-
#define EXECUTE_OTHER_BINOP(name, ctype) \
case kExpr##name: { \
TrapReason trap = kTrapCount; \
@@ -1602,6 +1612,28 @@ class ThreadImpl : public WasmInterpreter::Thread {
FOREACH_OTHER_BINOP(EXECUTE_OTHER_BINOP)
#undef EXECUTE_OTHER_BINOP
+ case kExprF32CopySign: {
+ // Handle kExprF32CopySign separately because it may introduce
+ // observable non-determinism.
+ TrapReason trap = kTrapCount;
+ volatile float rval = Pop().to<float>();
+ volatile float lval = Pop().to<float>();
+ WasmVal result(ExecuteF32CopySign(lval, rval, &trap));
+ Push(pc, result);
+ possible_nondeterminism_ |= std::isnan(rval);
+ break;
+ }
+ case kExprF64CopySign: {
+ // Handle kExprF32CopySign separately because it may introduce
+ // observable non-determinism.
+ TrapReason trap = kTrapCount;
+ volatile double rval = Pop().to<double>();
+ volatile double lval = Pop().to<double>();
+ WasmVal result(ExecuteF64CopySign(lval, rval, &trap));
+ Push(pc, result);
+ possible_nondeterminism_ |= std::isnan(rval);
+ break;
+ }
#define EXECUTE_OTHER_UNOP(name, ctype) \
case kExpr##name: { \
TrapReason trap = kTrapCount; \
@@ -1614,20 +1646,6 @@ class ThreadImpl : public WasmInterpreter::Thread {
FOREACH_OTHER_UNOP(EXECUTE_OTHER_UNOP)
#undef EXECUTE_OTHER_UNOP
-#define EXECUTE_OTHER_UNOP_NAN(name, ctype) \
- case kExpr##name: { \
- TrapReason trap = kTrapCount; \
- volatile ctype val = Pop().to<ctype>(); \
- ctype result = Execute##name(val, &trap); \
- possible_nondeterminism_ |= std::isnan(result); \
- WasmVal result_val(result); \
- if (trap != kTrapCount) return DoTrap(trap, pc); \
- Push(pc, result_val); \
- break; \
- }
- FOREACH_OTHER_UNOP_NAN(EXECUTE_OTHER_UNOP_NAN)
-#undef EXECUTE_OTHER_UNOP_NAN
-
default:
V8_Fatal(__FILE__, __LINE__, "Unknown or unimplemented opcode #%d:%s",
code->start[pc], OpcodeName(code->start[pc]));
@@ -1635,8 +1653,20 @@ class ThreadImpl : public WasmInterpreter::Thread {
}
pc += len;
+ if (pc == limit) {
+ // Fell off end of code; do an implicit return.
+ TRACE("@%-3zu: ImplicitReturn\n", pc);
+ if (!DoReturn(&code, &pc, &limit, code->function->sig->return_count()))
+ return;
+ decoder.Reset(code->start, code->end);
+ PAUSE_IF_BREAK_FLAG(AfterReturn);
+ }
}
- UNREACHABLE(); // above decoding loop should run forever.
+ // Set break_pc_, even though we might have stopped because max was reached.
+ // We don't want to stop after executing zero instructions next time.
+ break_pc_ = pc;
+ state_ = WasmInterpreter::PAUSED;
+ CommitPc(pc);
}
WasmVal Pop() {
@@ -1676,6 +1706,7 @@ class ThreadImpl : public WasmInterpreter::Thread {
}
void TraceValueStack() {
+#ifdef DEBUG
Frame* top = frames_.size() > 0 ? &frames_.back() : nullptr;
sp_t sp = top ? top->sp : 0;
sp_t plimit = top ? top->plimit() : 0;
@@ -1711,9 +1742,76 @@ class ThreadImpl : public WasmInterpreter::Thread {
}
}
}
+#endif // DEBUG
}
};
+// Converters between WasmInterpreter::Thread and WasmInterpreter::ThreadImpl.
+// Thread* is the public interface, without knowledge of the object layout.
+// This cast is potentially risky, but as long as we always cast it back before
+// accessing any data, it should be fine. UBSan is not complaining.
+WasmInterpreter::Thread* ToThread(ThreadImpl* impl) {
+ return reinterpret_cast<WasmInterpreter::Thread*>(impl);
+}
+static ThreadImpl* ToImpl(WasmInterpreter::Thread* thread) {
+ return reinterpret_cast<ThreadImpl*>(thread);
+}
+} // namespace
+
+//============================================================================
+// Implementation of the pimpl idiom for WasmInterpreter::Thread.
+// Instead of placing a pointer to the ThreadImpl inside of the Thread object,
+// we just reinterpret_cast them. ThreadImpls are only allocated inside this
+// translation unit anyway.
+//============================================================================
+WasmInterpreter::State WasmInterpreter::Thread::state() {
+ return ToImpl(this)->state();
+}
+void WasmInterpreter::Thread::PushFrame(const WasmFunction* function,
+ WasmVal* args) {
+ return ToImpl(this)->PushFrame(function, args);
+}
+WasmInterpreter::State WasmInterpreter::Thread::Run() {
+ return ToImpl(this)->Run();
+}
+WasmInterpreter::State WasmInterpreter::Thread::Step() {
+ return ToImpl(this)->Step();
+}
+void WasmInterpreter::Thread::Pause() { return ToImpl(this)->Pause(); }
+void WasmInterpreter::Thread::Reset() { return ToImpl(this)->Reset(); }
+pc_t WasmInterpreter::Thread::GetBreakpointPc() {
+ return ToImpl(this)->GetBreakpointPc();
+}
+int WasmInterpreter::Thread::GetFrameCount() {
+ return ToImpl(this)->GetFrameCount();
+}
+const InterpretedFrame WasmInterpreter::Thread::GetFrame(int index) {
+ return GetMutableFrame(index);
+}
+InterpretedFrame WasmInterpreter::Thread::GetMutableFrame(int index) {
+ // We have access to the constructor of InterpretedFrame, but ThreadImpl has
+ // not. So pass it as a lambda (should all get inlined).
+ auto frame_cons = [](const WasmFunction* function, int pc, int fp, int sp) {
+ return InterpretedFrame(function, pc, fp, sp);
+ };
+ return ToImpl(this)->GetMutableFrame(index, frame_cons);
+}
+WasmVal WasmInterpreter::Thread::GetReturnValue(int index) {
+ return ToImpl(this)->GetReturnValue(index);
+}
+bool WasmInterpreter::Thread::PossibleNondeterminism() {
+ return ToImpl(this)->PossibleNondeterminism();
+}
+uint64_t WasmInterpreter::Thread::NumInterpretedCalls() {
+ return ToImpl(this)->NumInterpretedCalls();
+}
+void WasmInterpreter::Thread::AddBreakFlags(uint8_t flags) {
+ ToImpl(this)->AddBreakFlags(flags);
+}
+void WasmInterpreter::Thread::ClearBreakFlags() {
+ ToImpl(this)->ClearBreakFlags();
+}
+
//============================================================================
// The implementation details of the interpreter.
//============================================================================
@@ -1724,22 +1822,19 @@ class WasmInterpreterInternals : public ZoneObject {
// pointer might be invalidated after constructing the interpreter.
const ZoneVector<uint8_t> module_bytes_;
CodeMap codemap_;
- ZoneVector<ThreadImpl*> threads_;
+ ZoneVector<ThreadImpl> threads_;
WasmInterpreterInternals(Zone* zone, const ModuleBytesEnv& env)
- : instance_(env.instance),
- module_bytes_(env.module_bytes.start(), env.module_bytes.end(), zone),
- codemap_(env.instance ? env.instance->module : nullptr,
- module_bytes_.data(), zone),
+ : instance_(env.module_env.instance),
+ module_bytes_(env.wire_bytes.start(), env.wire_bytes.end(), zone),
+ codemap_(
+ env.module_env.instance ? env.module_env.instance->module : nullptr,
+ module_bytes_.data(), zone),
threads_(zone) {
- threads_.push_back(new ThreadImpl(zone, &codemap_, env.instance));
+ threads_.emplace_back(zone, &codemap_, env.module_env.instance);
}
- void Delete() {
- // TODO(titzer): CFI doesn't like threads in the ZoneVector.
- for (auto t : threads_) delete t;
- threads_.resize(0);
- }
+ void Delete() { threads_.clear(); }
};
//============================================================================
@@ -1752,9 +1847,9 @@ WasmInterpreter::WasmInterpreter(const ModuleBytesEnv& env,
WasmInterpreter::~WasmInterpreter() { internals_->Delete(); }
-void WasmInterpreter::Run() { internals_->threads_[0]->Run(); }
+void WasmInterpreter::Run() { internals_->threads_[0].Run(); }
-void WasmInterpreter::Pause() { internals_->threads_[0]->Pause(); }
+void WasmInterpreter::Pause() { internals_->threads_[0].Pause(); }
bool WasmInterpreter::SetBreakpoint(const WasmFunction* function, pc_t pc,
bool enabled) {
@@ -1799,30 +1894,7 @@ int WasmInterpreter::GetThreadCount() {
WasmInterpreter::Thread* WasmInterpreter::GetThread(int id) {
CHECK_EQ(0, id); // only one thread for now.
- return internals_->threads_[id];
-}
-
-WasmVal WasmInterpreter::GetLocalVal(const WasmFrame* frame, int index) {
- CHECK_GE(index, 0);
- UNIMPLEMENTED();
- WasmVal none;
- none.type = kWasmStmt;
- return none;
-}
-
-WasmVal WasmInterpreter::GetExprVal(const WasmFrame* frame, int pc) {
- UNIMPLEMENTED();
- WasmVal none;
- none.type = kWasmStmt;
- return none;
-}
-
-void WasmInterpreter::SetLocalVal(WasmFrame* frame, int index, WasmVal val) {
- UNIMPLEMENTED();
-}
-
-void WasmInterpreter::SetExprVal(WasmFrame* frame, int pc, WasmVal val) {
- UNIMPLEMENTED();
+ return ToThread(&internals_->threads_[id]);
}
size_t WasmInterpreter::GetMemorySize() {
@@ -1854,6 +1926,35 @@ ControlTransferMap WasmInterpreter::ComputeControlTransfersForTesting(
return targets.map_;
}
+//============================================================================
+// Implementation of the frame inspection interface.
+//============================================================================
+int InterpretedFrame::GetParameterCount() const {
+ USE(fp_);
+ USE(sp_);
+ // TODO(clemensh): Return the correct number of parameters.
+ return 0;
+}
+
+WasmVal InterpretedFrame::GetLocalVal(int index) const {
+ CHECK_GE(index, 0);
+ UNIMPLEMENTED();
+ WasmVal none;
+ none.type = kWasmStmt;
+ return none;
+}
+
+WasmVal InterpretedFrame::GetExprVal(int pc) const {
+ UNIMPLEMENTED();
+ WasmVal none;
+ none.type = kWasmStmt;
+ return none;
+}
+
+void InterpretedFrame::SetLocalVal(int index, WasmVal val) { UNIMPLEMENTED(); }
+
+void InterpretedFrame::SetExprVal(int pc, WasmVal val) { UNIMPLEMENTED(); }
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-interpreter.h b/deps/v8/src/wasm/wasm-interpreter.h
index 80e6c4ba79..ab11a0883b 100644
--- a/deps/v8/src/wasm/wasm-interpreter.h
+++ b/deps/v8/src/wasm/wasm-interpreter.h
@@ -80,15 +80,24 @@ FOREACH_UNION_MEMBER(DECLARE_CAST)
#undef DECLARE_CAST
// Representation of frames within the interpreter.
-class WasmFrame {
+class InterpretedFrame {
public:
const WasmFunction* function() const { return function_; }
int pc() const { return pc_; }
+ //==========================================================================
+ // Stack frame inspection.
+ //==========================================================================
+ int GetParameterCount() const;
+ WasmVal GetLocalVal(int index) const;
+ WasmVal GetExprVal(int pc) const;
+ void SetLocalVal(int index, WasmVal val);
+ void SetExprVal(int pc, WasmVal val);
+
private:
friend class WasmInterpreter;
- WasmFrame(const WasmFunction* function, int pc, int fp, int sp)
+ InterpretedFrame(const WasmFunction* function, int pc, int fp, int sp)
: function_(function), pc_(pc), fp_(fp), sp_(sp) {}
const WasmFunction* function_;
@@ -111,32 +120,50 @@ class V8_EXPORT_PRIVATE WasmInterpreter {
// +------------- Finish -------------> FINISHED
enum State { STOPPED, RUNNING, PAUSED, FINISHED, TRAPPED };
+ // Tells a thread to pause after certain instructions.
+ enum BreakFlag : uint8_t {
+ None = 0,
+ AfterReturn = 1 << 0,
+ AfterCall = 1 << 1
+ };
+
// Representation of a thread in the interpreter.
- class Thread {
+ class V8_EXPORT_PRIVATE Thread {
+ // Don't instante Threads; they will be allocated as ThreadImpl in the
+ // interpreter implementation.
+ Thread() = delete;
+
public:
// Execution control.
- virtual State state() = 0;
- virtual void PushFrame(const WasmFunction* function, WasmVal* args) = 0;
- virtual State Run() = 0;
- virtual State Step() = 0;
- virtual void Pause() = 0;
- virtual void Reset() = 0;
- virtual ~Thread() {}
+ State state();
+ void PushFrame(const WasmFunction* function, WasmVal* args);
+ State Run();
+ State Step();
+ void Pause();
+ void Reset();
// Stack inspection and modification.
- virtual pc_t GetBreakpointPc() = 0;
- virtual int GetFrameCount() = 0;
- virtual const WasmFrame* GetFrame(int index) = 0;
- virtual WasmFrame* GetMutableFrame(int index) = 0;
- virtual WasmVal GetReturnValue(int index = 0) = 0;
+ pc_t GetBreakpointPc();
+ int GetFrameCount();
+ const InterpretedFrame GetFrame(int index);
+ InterpretedFrame GetMutableFrame(int index);
+ WasmVal GetReturnValue(int index = 0);
+
// Returns true if the thread executed an instruction which may produce
// nondeterministic results, e.g. float div, float sqrt, and float mul,
// where the sign bit of a NaN is nondeterministic.
- virtual bool PossibleNondeterminism() = 0;
+ bool PossibleNondeterminism();
+
+ // Returns the number of calls / function frames executed on this thread.
+ uint64_t NumInterpretedCalls();
// Thread-specific breakpoints.
- bool SetBreakpoint(const WasmFunction* function, int pc, bool enabled);
- bool GetBreakpoint(const WasmFunction* function, int pc);
+ // TODO(wasm): Implement this once we support multiple threads.
+ // bool SetBreakpoint(const WasmFunction* function, int pc, bool enabled);
+ // bool GetBreakpoint(const WasmFunction* function, int pc);
+
+ void AddBreakFlags(uint8_t flags);
+ void ClearBreakFlags();
};
WasmInterpreter(const ModuleBytesEnv& env, AccountingAllocator* allocator);
@@ -165,14 +192,6 @@ class V8_EXPORT_PRIVATE WasmInterpreter {
Thread* GetThread(int id);
//==========================================================================
- // Stack frame inspection.
- //==========================================================================
- WasmVal GetLocalVal(const WasmFrame* frame, int index);
- WasmVal GetExprVal(const WasmFrame* frame, int pc);
- void SetLocalVal(WasmFrame* frame, int index, WasmVal val);
- void SetExprVal(WasmFrame* frame, int pc, WasmVal val);
-
- //==========================================================================
// Memory access.
//==========================================================================
size_t GetMemorySize();
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index b426d5bf3d..281c4e82e6 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -13,6 +13,7 @@
#include "src/factory.h"
#include "src/handles.h"
#include "src/isolate.h"
+#include "src/objects-inl.h"
#include "src/objects.h"
#include "src/parsing/parse-info.h"
@@ -35,6 +36,20 @@ namespace {
"Wasm compilation exceeds internal limits in this context for the provided " \
"arguments"
+// TODO(wasm): move brand check to the respective types, and don't throw
+// in it, rather, use a provided ErrorThrower, or let caller handle it.
+static bool HasBrand(i::Handle<i::Object> value, i::Handle<i::Symbol> sym) {
+ if (!value->IsJSObject()) return false;
+ i::Handle<i::JSObject> object = i::Handle<i::JSObject>::cast(value);
+ Maybe<bool> has_brand = i::JSObject::HasOwnProperty(object, sym);
+ return has_brand.FromMaybe(false);
+}
+
+static bool BrandCheck(i::Handle<i::Object> value, i::Handle<i::Symbol> sym,
+ ErrorThrower* thrower, const char* msg) {
+ return HasBrand(value, sym) ? true : (thrower->TypeError("%s", msg), false);
+}
+
i::Handle<i::String> v8_str(i::Isolate* isolate, const char* str) {
return isolate->factory()->NewStringFromAsciiChecked(str);
}
@@ -42,11 +57,26 @@ Local<String> v8_str(Isolate* isolate, const char* str) {
return Utils::ToLocal(v8_str(reinterpret_cast<i::Isolate*>(isolate), str));
}
-struct RawBuffer {
- const byte* start;
- const byte* end;
- size_t size() { return static_cast<size_t>(end - start); }
-};
+i::MaybeHandle<i::WasmModuleObject> GetFirstArgumentAsModule(
+ const v8::FunctionCallbackInfo<v8::Value>& args, ErrorThrower* thrower) {
+ v8::Isolate* isolate = args.GetIsolate();
+ if (args.Length() < 1) {
+ thrower->TypeError("Argument 0 must be a WebAssembly.Module");
+ return {};
+ }
+
+ Local<Context> context = isolate->GetCurrentContext();
+ i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
+ if (!BrandCheck(Utils::OpenHandle(*args[0]),
+ i::handle(i_context->wasm_module_sym()), thrower,
+ "Argument 0 must be a WebAssembly.Module")) {
+ return {};
+ }
+
+ Local<Object> module_obj = Local<Object>::Cast(args[0]);
+ return i::Handle<i::WasmModuleObject>::cast(
+ v8::Utils::OpenHandle(*module_obj));
+}
bool IsCompilationAllowed(i::Isolate* isolate, ErrorThrower* thrower,
v8::Local<v8::Value> source, bool is_async) {
@@ -142,99 +172,12 @@ i::MaybeHandle<i::JSReceiver> GetSecondArgumentAsImports(
return i::Handle<i::JSReceiver>::cast(v8::Utils::OpenHandle(*obj));
}
-RawBuffer GetRawBufferSource(
- v8::Local<v8::Value> source, ErrorThrower* thrower) {
- const byte* start = nullptr;
- const byte* end = nullptr;
-
- if (source->IsArrayBuffer()) {
- // A raw array buffer was passed.
- Local<ArrayBuffer> buffer = Local<ArrayBuffer>::Cast(source);
- ArrayBuffer::Contents contents = buffer->GetContents();
-
- start = reinterpret_cast<const byte*>(contents.Data());
- end = start + contents.ByteLength();
-
- } else if (source->IsTypedArray()) {
- // A TypedArray was passed.
- Local<TypedArray> array = Local<TypedArray>::Cast(source);
- Local<ArrayBuffer> buffer = array->Buffer();
-
- ArrayBuffer::Contents contents = buffer->GetContents();
-
- start =
- reinterpret_cast<const byte*>(contents.Data()) + array->ByteOffset();
- end = start + array->ByteLength();
-
- } else {
- thrower->TypeError("Argument 0 must be a buffer source");
- }
- if (start == nullptr || end == start) {
- thrower->CompileError("BufferSource argument is empty");
- }
- return {start, end};
-}
-
-static i::MaybeHandle<i::WasmModuleObject> CreateModuleObject(
- v8::Isolate* isolate, const v8::Local<v8::Value> source,
- ErrorThrower* thrower) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::MaybeHandle<i::JSObject> nothing;
-
- RawBuffer buffer = GetRawBufferSource(source, thrower);
- if (buffer.start == nullptr) return i::MaybeHandle<i::WasmModuleObject>();
-
- DCHECK(source->IsArrayBuffer() || source->IsTypedArray());
- return i::wasm::CreateModuleObjectFromBytes(
- i_isolate, buffer.start, buffer.end, thrower, i::wasm::kWasmOrigin,
- i::Handle<i::Script>::null(), i::Vector<const byte>::empty());
-}
-
-static bool ValidateModule(v8::Isolate* isolate,
- const v8::Local<v8::Value> source,
- ErrorThrower* thrower) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::MaybeHandle<i::JSObject> nothing;
-
- RawBuffer buffer = GetRawBufferSource(source, thrower);
- if (buffer.start == nullptr) return false;
-
- DCHECK(source->IsArrayBuffer() || source->IsTypedArray());
- return i::wasm::ValidateModuleBytes(i_isolate, buffer.start, buffer.end,
- thrower,
- i::wasm::ModuleOrigin::kWasmOrigin);
-}
-
-// TODO(wasm): move brand check to the respective types, and don't throw
-// in it, rather, use a provided ErrorThrower, or let caller handle it.
-static bool BrandCheck(Isolate* isolate, i::Handle<i::Object> value,
- i::Handle<i::Symbol> sym) {
- if (!value->IsJSObject()) return false;
- i::Handle<i::JSObject> object = i::Handle<i::JSObject>::cast(value);
- Maybe<bool> has_brand = i::JSObject::HasOwnProperty(object, sym);
- if (has_brand.IsNothing()) return false;
- return has_brand.ToChecked();
-}
-
-static bool BrandCheck(Isolate* isolate, i::Handle<i::Object> value,
- i::Handle<i::Symbol> sym, const char* msg) {
- if (value->IsJSObject()) {
- i::Handle<i::JSObject> object = i::Handle<i::JSObject>::cast(value);
- Maybe<bool> has_brand = i::JSObject::HasOwnProperty(object, sym);
- if (has_brand.IsNothing()) return false;
- if (has_brand.ToChecked()) return true;
- }
- v8::Local<v8::Value> e = v8::Exception::TypeError(v8_str(isolate, msg));
- isolate->ThrowException(e);
- return false;
-}
-
+// WebAssembly.compile(bytes) -> Promise
void WebAssemblyCompile(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- ErrorThrower thrower(reinterpret_cast<i::Isolate*>(isolate),
- "WebAssembly.compile()");
+ ErrorThrower thrower(i_isolate, "WebAssembly.compile()");
Local<Context> context = isolate->GetCurrentContext();
v8::Local<v8::Promise::Resolver> resolver;
@@ -242,40 +185,29 @@ void WebAssemblyCompile(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
return_value.Set(resolver->GetPromise());
- if (args.Length() < 1) {
- thrower.TypeError("Argument 0 must be a buffer source");
- resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
- return;
- }
auto bytes = GetFirstArgumentAsBytes(args, &thrower);
- USE(bytes);
if (!IsCompilationAllowed(i_isolate, &thrower, args[0], true)) {
resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
return;
}
- i::MaybeHandle<i::JSObject> module_obj =
- CreateModuleObject(isolate, args[0], &thrower);
-
- if (thrower.error()) {
- resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
- } else {
- resolver->Resolve(context, Utils::ToLocal(module_obj.ToHandleChecked()));
- }
+ DCHECK(!thrower.error());
+ i::Handle<i::JSPromise> promise = Utils::OpenHandle(*resolver->GetPromise());
+ i::wasm::AsyncCompile(i_isolate, promise, bytes);
}
+// WebAssembly.validate(bytes) -> bool
void WebAssemblyValidate(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- ErrorThrower thrower(reinterpret_cast<i::Isolate*>(isolate),
- "WebAssembly.validate()");
+ ErrorThrower thrower(i_isolate, "WebAssembly.validate()");
- if (args.Length() < 1) {
- thrower.TypeError("Argument 0 must be a buffer source");
- return;
- }
+ auto bytes = GetFirstArgumentAsBytes(args, &thrower);
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
- if (ValidateModule(isolate, args[0], &thrower)) {
+ if (!thrower.error() &&
+ i::wasm::SyncValidate(reinterpret_cast<i::Isolate*>(isolate), &thrower,
+ bytes)) {
return_value.Set(v8::True(isolate));
} else {
if (thrower.wasm_error()) thrower.Reify(); // Clear error.
@@ -283,128 +215,61 @@ void WebAssemblyValidate(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
}
+// new WebAssembly.Module(bytes) -> WebAssembly.Module
void WebAssemblyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- ErrorThrower thrower(reinterpret_cast<i::Isolate*>(isolate),
- "WebAssembly.Module()");
+ ErrorThrower thrower(i_isolate, "WebAssembly.Module()");
- if (args.Length() < 1) {
- thrower.TypeError("Argument 0 must be a buffer source");
- return;
- }
auto bytes = GetFirstArgumentAsBytes(args, &thrower);
- USE(bytes);
if (!IsCompilationAllowed(i_isolate, &thrower, args[0], false)) return;
- i::MaybeHandle<i::JSObject> module_obj =
- CreateModuleObject(isolate, args[0], &thrower);
+ DCHECK(!thrower.error());
+ i::MaybeHandle<i::Object> module_obj =
+ i::wasm::SyncCompile(i_isolate, &thrower, bytes);
if (module_obj.is_null()) return;
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
return_value.Set(Utils::ToLocal(module_obj.ToHandleChecked()));
}
-MaybeLocal<Value> InstantiateModuleImpl(
- i::Isolate* i_isolate, i::Handle<i::WasmModuleObject> i_module_obj,
- const v8::FunctionCallbackInfo<v8::Value>& args, ErrorThrower* thrower) {
- // It so happens that in both the WebAssembly.instantiate, as well as
- // WebAssembly.Instance ctor, the positions of the ffi object and memory
- // are the same. If that changes later, we refactor the consts into
- // parameters.
- static const int kFfiOffset = 1;
-
- MaybeLocal<Value> nothing;
- i::Handle<i::JSReceiver> ffi = i::Handle<i::JSObject>::null();
- // This is a first - level validation of the argument. If present, we only
- // check its type. {Instantiate} will further check that if the module
- // has imports, the argument must be present, as well as piecemeal
- // import satisfaction.
- if (args.Length() > kFfiOffset && !args[kFfiOffset]->IsUndefined()) {
- if (!args[kFfiOffset]->IsObject()) {
- thrower->TypeError("Argument %d must be an object", kFfiOffset);
- return nothing;
- }
- Local<Object> obj = Local<Object>::Cast(args[kFfiOffset]);
- ffi = i::Handle<i::JSReceiver>::cast(v8::Utils::OpenHandle(*obj));
- }
-
- i::MaybeHandle<i::JSObject> instance =
- i::wasm::WasmModule::Instantiate(i_isolate, thrower, i_module_obj, ffi);
- if (instance.is_null()) {
- if (!thrower->error())
- thrower->RuntimeError("Could not instantiate module");
- return nothing;
- }
- DCHECK(!i_isolate->has_pending_exception());
- return Utils::ToLocal(instance.ToHandleChecked());
-}
-
-namespace {
-i::MaybeHandle<i::WasmModuleObject> GetFirstArgumentAsModule(
- const v8::FunctionCallbackInfo<v8::Value>& args, ErrorThrower& thrower) {
- v8::Isolate* isolate = args.GetIsolate();
- i::MaybeHandle<i::WasmModuleObject> nothing;
- if (args.Length() < 1) {
- thrower.TypeError("Argument 0 must be a WebAssembly.Module");
- return nothing;
- }
-
- Local<Context> context = isolate->GetCurrentContext();
- i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
- if (!BrandCheck(isolate, Utils::OpenHandle(*args[0]),
- i::Handle<i::Symbol>(i_context->wasm_module_sym()),
- "Argument 0 must be a WebAssembly.Module")) {
- return nothing;
- }
-
- Local<Object> module_obj = Local<Object>::Cast(args[0]);
- return i::Handle<i::WasmModuleObject>::cast(
- v8::Utils::OpenHandle(*module_obj));
-}
-} // namespace
-
+// WebAssembly.Module.imports(module) -> Array<Import>
void WebAssemblyModuleImports(const v8::FunctionCallbackInfo<v8::Value>& args) {
HandleScope scope(args.GetIsolate());
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
ErrorThrower thrower(i_isolate, "WebAssembly.Module.imports()");
- auto maybe_module = GetFirstArgumentAsModule(args, thrower);
-
- if (!maybe_module.is_null()) {
- auto imports =
- i::wasm::GetImports(i_isolate, maybe_module.ToHandleChecked());
- args.GetReturnValue().Set(Utils::ToLocal(imports));
- }
+ auto maybe_module = GetFirstArgumentAsModule(args, &thrower);
+ if (thrower.error()) return;
+ auto imports = i::wasm::GetImports(i_isolate, maybe_module.ToHandleChecked());
+ args.GetReturnValue().Set(Utils::ToLocal(imports));
}
+// WebAssembly.Module.exports(module) -> Array<Export>
void WebAssemblyModuleExports(const v8::FunctionCallbackInfo<v8::Value>& args) {
HandleScope scope(args.GetIsolate());
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-
ErrorThrower thrower(i_isolate, "WebAssembly.Module.exports()");
- auto maybe_module = GetFirstArgumentAsModule(args, thrower);
-
- if (!maybe_module.is_null()) {
- auto exports =
- i::wasm::GetExports(i_isolate, maybe_module.ToHandleChecked());
- args.GetReturnValue().Set(Utils::ToLocal(exports));
- }
+ auto maybe_module = GetFirstArgumentAsModule(args, &thrower);
+ if (thrower.error()) return;
+ auto exports = i::wasm::GetExports(i_isolate, maybe_module.ToHandleChecked());
+ args.GetReturnValue().Set(Utils::ToLocal(exports));
}
+// WebAssembly.Module.customSections(module, name) -> Array<Section>
void WebAssemblyModuleCustomSections(
const v8::FunctionCallbackInfo<v8::Value>& args) {
HandleScope scope(args.GetIsolate());
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-
ErrorThrower thrower(i_isolate, "WebAssembly.Module.customSections()");
- auto maybe_module = GetFirstArgumentAsModule(args, thrower);
+ auto maybe_module = GetFirstArgumentAsModule(args, &thrower);
+ if (thrower.error()) return;
if (args.Length() < 2) {
thrower.TypeError("Argument 1 must be a string");
@@ -417,25 +282,23 @@ void WebAssemblyModuleCustomSections(
return;
}
- if (!maybe_module.is_null()) {
- auto custom_sections =
- i::wasm::GetCustomSections(i_isolate, maybe_module.ToHandleChecked(),
- i::Handle<i::String>::cast(name), &thrower);
- if (!thrower.error()) {
- args.GetReturnValue().Set(Utils::ToLocal(custom_sections));
- }
- }
+ auto custom_sections =
+ i::wasm::GetCustomSections(i_isolate, maybe_module.ToHandleChecked(),
+ i::Handle<i::String>::cast(name), &thrower);
+ if (thrower.error()) return;
+ args.GetReturnValue().Set(Utils::ToLocal(custom_sections));
}
+// new WebAssembly.Instance(module, imports) -> WebAssembly.Instance
void WebAssemblyInstance(const v8::FunctionCallbackInfo<v8::Value>& args) {
HandleScope scope(args.GetIsolate());
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-
ErrorThrower thrower(i_isolate, "WebAssembly.Instance()");
- auto maybe_module = GetFirstArgumentAsModule(args, thrower);
+ auto maybe_module = GetFirstArgumentAsModule(args, &thrower);
if (thrower.error()) return;
+
auto maybe_imports = GetSecondArgumentAsImports(args, &thrower);
if (!IsInstantiationAllowed(i_isolate, &thrower, args[0], maybe_imports,
false)) {
@@ -443,23 +306,22 @@ void WebAssemblyInstance(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
DCHECK(!thrower.error());
- if (!maybe_module.is_null()) {
- MaybeLocal<Value> instance = InstantiateModuleImpl(
- i_isolate, maybe_module.ToHandleChecked(), args, &thrower);
- if (instance.IsEmpty()) {
- DCHECK(thrower.error());
- return;
- }
- args.GetReturnValue().Set(instance.ToLocalChecked());
- }
+ i::MaybeHandle<i::Object> instance_object = i::wasm::SyncInstantiate(
+ i_isolate, &thrower, maybe_module.ToHandleChecked(), maybe_imports,
+ i::MaybeHandle<i::JSArrayBuffer>());
+ if (instance_object.is_null()) return;
+ args.GetReturnValue().Set(Utils::ToLocal(instance_object.ToHandleChecked()));
}
+// WebAssembly.instantiate(module, imports) -> WebAssembly.Instance
+// WebAssembly.instantiate(bytes, imports) ->
+// {module: WebAssembly.Module, instance: WebAssembly.Instance}
void WebAssemblyInstantiate(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ ErrorThrower thrower(i_isolate, "WebAssembly.instantiate()");
HandleScope scope(isolate);
- ErrorThrower thrower(i_isolate, "WebAssembly.instantiate()");
Local<Context> context = isolate->GetCurrentContext();
i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
@@ -485,8 +347,6 @@ void WebAssemblyInstantiate(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
- bool want_pair = !BrandCheck(
- isolate, first_arg, i::Handle<i::Symbol>(i_context->wasm_module_sym()));
auto maybe_imports = GetSecondArgumentAsImports(args, &thrower);
if (thrower.error()) {
resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
@@ -497,48 +357,21 @@ void WebAssemblyInstantiate(const v8::FunctionCallbackInfo<v8::Value>& args) {
resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
return;
}
- i::Handle<i::WasmModuleObject> module_obj;
- if (want_pair) {
- i::MaybeHandle<i::WasmModuleObject> maybe_module_obj =
- CreateModuleObject(isolate, args[0], &thrower);
- if (!maybe_module_obj.ToHandle(&module_obj)) {
- DCHECK(thrower.error());
+ i::Handle<i::JSPromise> promise = Utils::OpenHandle(*resolver->GetPromise());
+ if (HasBrand(first_arg, i::Handle<i::Symbol>(i_context->wasm_module_sym()))) {
+ // WebAssembly.instantiate(module, imports) -> WebAssembly.Instance
+ auto module_object = GetFirstArgumentAsModule(args, &thrower);
+ i::wasm::AsyncInstantiate(i_isolate, promise,
+ module_object.ToHandleChecked(), maybe_imports);
+ } else {
+ // WebAssembly.instantiate(bytes, imports) -> {module, instance}
+ auto bytes = GetFirstArgumentAsBytes(args, &thrower);
+ if (thrower.error()) {
resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
return;
}
- } else {
- module_obj = i::Handle<i::WasmModuleObject>::cast(first_arg);
- }
- DCHECK(!module_obj.is_null());
- MaybeLocal<Value> instance =
- InstantiateModuleImpl(i_isolate, module_obj, args, &thrower);
- if (instance.IsEmpty()) {
- DCHECK(thrower.error());
- resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
- } else {
- DCHECK(!thrower.error());
- Local<Value> retval;
- if (want_pair) {
- i::Handle<i::JSFunction> object_function = i::Handle<i::JSFunction>(
- i_isolate->native_context()->object_function(), i_isolate);
-
- i::Handle<i::JSObject> i_retval =
- i_isolate->factory()->NewJSObject(object_function, i::TENURED);
- i::Handle<i::String> module_property_name =
- i_isolate->factory()->InternalizeUtf8String("module");
- i::Handle<i::String> instance_property_name =
- i_isolate->factory()->InternalizeUtf8String("instance");
- i::JSObject::AddProperty(i_retval, module_property_name, module_obj,
- i::NONE);
- i::JSObject::AddProperty(i_retval, instance_property_name,
- Utils::OpenHandle(*instance.ToLocalChecked()),
- i::NONE);
- retval = Utils::ToLocal(i_retval);
- } else {
- retval = instance.ToLocalChecked();
- }
- DCHECK(!retval.IsEmpty());
- resolver->Resolve(context, retval);
+ i::wasm::AsyncCompileAndInstantiate(i_isolate, promise, bytes,
+ maybe_imports);
}
}
@@ -569,11 +402,12 @@ bool GetIntegerProperty(v8::Isolate* isolate, ErrorThrower* thrower,
return false;
}
+// new WebAssembly.Table(args) -> WebAssembly.Table
void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- ErrorThrower thrower(reinterpret_cast<i::Isolate*>(isolate),
- "WebAssembly.Module()");
+ ErrorThrower thrower(i_isolate, "WebAssembly.Module()");
if (args.Length() < 1 || !args[0]->IsObject()) {
thrower.TypeError("Argument 0 must be a table descriptor");
return;
@@ -599,7 +433,7 @@ void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
int initial = 0;
if (!GetIntegerProperty(isolate, &thrower, context, descriptor,
v8_str(isolate, "initial"), &initial, 0,
- i::wasm::kV8MaxWasmTableSize)) {
+ i::FLAG_wasm_max_table_size)) {
return;
}
// The descriptor's 'maximum'.
@@ -615,7 +449,6 @@ void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
}
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::Handle<i::FixedArray> fixed_array;
i::Handle<i::JSObject> table_obj =
i::WasmTableObject::New(i_isolate, initial, maximum, &fixed_array);
@@ -625,9 +458,9 @@ void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- ErrorThrower thrower(reinterpret_cast<i::Isolate*>(isolate),
- "WebAssembly.Memory()");
+ ErrorThrower thrower(i_isolate, "WebAssembly.Memory()");
if (args.Length() < 1 || !args[0]->IsObject()) {
thrower.TypeError("Argument 0 must be a memory descriptor");
return;
@@ -638,7 +471,7 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
int initial = 0;
if (!GetIntegerProperty(isolate, &thrower, context, descriptor,
v8_str(isolate, "initial"), &initial, 0,
- i::wasm::kV8MaxWasmMemoryPages)) {
+ i::FLAG_wasm_max_mem_pages)) {
return;
}
// The descriptor's 'maximum'.
@@ -653,7 +486,6 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
}
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
size_t size = static_cast<size_t>(i::wasm::WasmModule::kPageSize) *
static_cast<size_t>(initial);
i::Handle<i::JSArrayBuffer> buffer =
@@ -670,10 +502,13 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
void WebAssemblyTableGetLength(
const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ HandleScope scope(isolate);
+ ErrorThrower thrower(i_isolate, "WebAssembly.Table.length()");
Local<Context> context = isolate->GetCurrentContext();
i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
- if (!BrandCheck(isolate, Utils::OpenHandle(*args.This()),
- i::Handle<i::Symbol>(i_context->wasm_table_sym()),
+ if (!BrandCheck(Utils::OpenHandle(*args.This()),
+ i::Handle<i::Symbol>(i_context->wasm_table_sym()), &thrower,
"Receiver is not a WebAssembly.Table")) {
return;
}
@@ -683,17 +518,20 @@ void WebAssemblyTableGetLength(
v8::Number::New(isolate, receiver->current_length()));
}
+// WebAssembly.Table.grow(num) -> num
void WebAssemblyTableGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ HandleScope scope(isolate);
+ ErrorThrower thrower(i_isolate, "WebAssembly.Table.grow()");
Local<Context> context = isolate->GetCurrentContext();
i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
- if (!BrandCheck(isolate, Utils::OpenHandle(*args.This()),
- i::Handle<i::Symbol>(i_context->wasm_table_sym()),
+ if (!BrandCheck(Utils::OpenHandle(*args.This()),
+ i::Handle<i::Symbol>(i_context->wasm_table_sym()), &thrower,
"Receiver is not a WebAssembly.Table")) {
return;
}
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
auto receiver =
i::Handle<i::WasmTableObject>::cast(Utils::OpenHandle(*args.This()));
i::Handle<i::FixedArray> old_array(receiver->functions(), i_isolate);
@@ -706,15 +544,13 @@ void WebAssemblyTableGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
int64_t max_size64 = receiver->maximum_length();
if (max_size64 < 0 ||
- max_size64 > static_cast<int64_t>(i::wasm::kV8MaxWasmTableSize)) {
- max_size64 = i::wasm::kV8MaxWasmTableSize;
+ max_size64 > static_cast<int64_t>(i::FLAG_wasm_max_table_size)) {
+ max_size64 = i::FLAG_wasm_max_table_size;
}
if (new_size64 < old_size || new_size64 > max_size64) {
- v8::Local<v8::Value> e = v8::Exception::RangeError(
- v8_str(isolate, new_size64 < old_size ? "trying to shrink table"
- : "maximum table size exceeded"));
- isolate->ThrowException(e);
+ thrower.RangeError(new_size64 < old_size ? "trying to shrink table"
+ : "maximum table size exceeded");
return;
}
@@ -736,17 +572,20 @@ void WebAssemblyTableGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
return_value.Set(old_size);
}
+// WebAssembly.Table.get(num) -> JSFunction
void WebAssemblyTableGet(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ HandleScope scope(isolate);
+ ErrorThrower thrower(i_isolate, "WebAssembly.Table.get()");
Local<Context> context = isolate->GetCurrentContext();
i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
- if (!BrandCheck(isolate, Utils::OpenHandle(*args.This()),
- i::Handle<i::Symbol>(i_context->wasm_table_sym()),
+ if (!BrandCheck(Utils::OpenHandle(*args.This()),
+ i::Handle<i::Symbol>(i_context->wasm_table_sym()), &thrower,
"Receiver is not a WebAssembly.Table")) {
return;
}
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
auto receiver =
i::Handle<i::WasmTableObject>::cast(Utils::OpenHandle(*args.This()));
i::Handle<i::FixedArray> array(receiver->functions(), i_isolate);
@@ -754,9 +593,7 @@ void WebAssemblyTableGet(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() > 0 && !args[0]->Int32Value(context).To(&i)) return;
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
if (i < 0 || i >= array->length()) {
- v8::Local<v8::Value> e =
- v8::Exception::RangeError(v8_str(isolate, "index out of bounds"));
- isolate->ThrowException(e);
+ thrower.RangeError("index out of bounds");
return;
}
@@ -764,20 +601,21 @@ void WebAssemblyTableGet(const v8::FunctionCallbackInfo<v8::Value>& args) {
return_value.Set(Utils::ToLocal(value));
}
+// WebAssembly.Table.set(num, JSFunction)
void WebAssemblyTableSet(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ HandleScope scope(isolate);
+ ErrorThrower thrower(i_isolate, "WebAssembly.Table.set()");
Local<Context> context = isolate->GetCurrentContext();
i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
- if (!BrandCheck(isolate, Utils::OpenHandle(*args.This()),
- i::Handle<i::Symbol>(i_context->wasm_table_sym()),
+ if (!BrandCheck(Utils::OpenHandle(*args.This()),
+ i::Handle<i::Symbol>(i_context->wasm_table_sym()), &thrower,
"Receiver is not a WebAssembly.Table")) {
return;
}
if (args.Length() < 2) {
- v8::Local<v8::Value> e = v8::Exception::TypeError(
- v8_str(isolate, "Argument 1 must be null or a function"));
- isolate->ThrowException(e);
+ thrower.TypeError("Argument 1 must be null or a function");
return;
}
i::Handle<i::Object> value = Utils::OpenHandle(*args[1]);
@@ -785,9 +623,7 @@ void WebAssemblyTableSet(const v8::FunctionCallbackInfo<v8::Value>& args) {
(!value->IsJSFunction() ||
i::Handle<i::JSFunction>::cast(value)->code()->kind() !=
i::Code::JS_TO_WASM_FUNCTION)) {
- v8::Local<v8::Value> e = v8::Exception::TypeError(
- v8_str(isolate, "Argument 1 must be null or a WebAssembly function"));
- isolate->ThrowException(e);
+ thrower.TypeError("Argument 1 must be null or a WebAssembly function");
return;
}
@@ -797,9 +633,7 @@ void WebAssemblyTableSet(const v8::FunctionCallbackInfo<v8::Value>& args) {
int i;
if (!args[0]->Int32Value(context).To(&i)) return;
if (i < 0 || i >= array->length()) {
- v8::Local<v8::Value> e =
- v8::Exception::RangeError(v8_str(isolate, "index out of bounds"));
- isolate->ThrowException(e);
+ thrower.RangeError("index out of bounds");
return;
}
@@ -816,64 +650,65 @@ void WebAssemblyTableSet(const v8::FunctionCallbackInfo<v8::Value>& args) {
i::Handle<i::FixedArray>::cast(array)->set(i, *value);
}
+// WebAssembly.Memory.grow(num) -> num
void WebAssemblyMemoryGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ HandleScope scope(isolate);
+ ErrorThrower thrower(i_isolate, "WebAssembly.Memory.grow()");
Local<Context> context = isolate->GetCurrentContext();
i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
- if (!BrandCheck(isolate, Utils::OpenHandle(*args.This()),
- i::Handle<i::Symbol>(i_context->wasm_memory_sym()),
+ if (!BrandCheck(Utils::OpenHandle(*args.This()),
+ i::Handle<i::Symbol>(i_context->wasm_memory_sym()), &thrower,
"Receiver is not a WebAssembly.Memory")) {
return;
}
int64_t delta_size = 0;
if (args.Length() < 1 || !args[0]->IntegerValue(context).To(&delta_size)) {
- v8::Local<v8::Value> e = v8::Exception::TypeError(
- v8_str(isolate, "Argument 0 required, must be numeric value of pages"));
- isolate->ThrowException(e);
+ thrower.TypeError("Argument 0 required, must be numeric value of pages");
return;
}
i::Handle<i::WasmMemoryObject> receiver =
i::Handle<i::WasmMemoryObject>::cast(Utils::OpenHandle(*args.This()));
int64_t max_size64 = receiver->maximum_pages();
if (max_size64 < 0 ||
- max_size64 > static_cast<int64_t>(i::wasm::kV8MaxWasmTableSize)) {
- max_size64 = i::wasm::kV8MaxWasmMemoryPages;
+ max_size64 > static_cast<int64_t>(i::FLAG_wasm_max_mem_pages)) {
+ max_size64 = i::FLAG_wasm_max_mem_pages;
}
i::Handle<i::JSArrayBuffer> old_buffer(receiver->buffer());
uint32_t old_size =
old_buffer->byte_length()->Number() / i::wasm::kSpecMaxWasmMemoryPages;
int64_t new_size64 = old_size + delta_size;
if (delta_size < 0 || max_size64 < new_size64 || new_size64 < old_size) {
- v8::Local<v8::Value> e = v8::Exception::RangeError(v8_str(
- isolate, new_size64 < old_size ? "trying to shrink memory"
- : "maximum memory size exceeded"));
- isolate->ThrowException(e);
+ thrower.RangeError(new_size64 < old_size ? "trying to shrink memory"
+ : "maximum memory size exceeded");
return;
}
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
int32_t ret = i::wasm::GrowWebAssemblyMemory(
i_isolate, receiver, static_cast<uint32_t>(delta_size));
if (ret == -1) {
- v8::Local<v8::Value> e = v8::Exception::RangeError(
- v8_str(isolate, "Unable to grow instance memory."));
- isolate->ThrowException(e);
+ thrower.RangeError("Unable to grow instance memory.");
return;
}
+ i::wasm::DetachWebAssemblyMemoryBuffer(i_isolate, old_buffer);
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
return_value.Set(ret);
}
+// WebAssembly.Memory.buffer -> ArrayBuffer
void WebAssemblyMemoryGetBuffer(
const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ HandleScope scope(isolate);
+ ErrorThrower thrower(i_isolate, "WebAssembly.Memory.buffer");
Local<Context> context = isolate->GetCurrentContext();
i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
- if (!BrandCheck(isolate, Utils::OpenHandle(*args.This()),
- i::Handle<i::Symbol>(i_context->wasm_memory_sym()),
+ if (!BrandCheck(Utils::OpenHandle(*args.This()),
+ i::Handle<i::Symbol>(i_context->wasm_memory_sym()), &thrower,
"Receiver is not a WebAssembly.Memory")) {
return;
}
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::Handle<i::WasmMemoryObject> receiver =
i::Handle<i::WasmMemoryObject>::cast(Utils::OpenHandle(*args.This()));
i::Handle<i::Object> buffer(receiver->buffer(), i_isolate);
@@ -1078,16 +913,6 @@ void WasmJs::Install(Isolate* isolate) {
runtime_error, attributes);
}
-static bool HasBrand(i::Handle<i::Object> value, i::Handle<i::Symbol> symbol) {
- if (value->IsJSObject()) {
- i::Handle<i::JSObject> object = i::Handle<i::JSObject>::cast(value);
- Maybe<bool> has_brand = i::JSObject::HasOwnProperty(object, symbol);
- if (has_brand.IsNothing()) return false;
- if (has_brand.ToChecked()) return true;
- }
- return false;
-}
-
bool WasmJs::IsWasmMemoryObject(Isolate* isolate, Handle<Object> value) {
i::Handle<i::Symbol> symbol(isolate->context()->wasm_memory_sym(), isolate);
return HasBrand(value, symbol);
diff --git a/deps/v8/src/wasm/wasm-limits.h b/deps/v8/src/wasm/wasm-limits.h
index 4c7455adc5..bf657a8c5b 100644
--- a/deps/v8/src/wasm/wasm-limits.h
+++ b/deps/v8/src/wasm/wasm-limits.h
@@ -17,6 +17,7 @@ const size_t kV8MaxWasmImports = 100000;
const size_t kV8MaxWasmExports = 100000;
const size_t kV8MaxWasmGlobals = 1000000;
const size_t kV8MaxWasmDataSegments = 100000;
+// Don't use this limit directly, but use the value of FLAG_wasm_max_mem_pages.
const size_t kV8MaxWasmMemoryPages = 16384; // = 1 GiB
const size_t kV8MaxWasmStringSize = 100000;
const size_t kV8MaxWasmModuleSize = 1024 * 1024 * 1024; // = 1 GiB
@@ -25,6 +26,7 @@ const size_t kV8MaxWasmFunctionLocals = 50000;
const size_t kV8MaxWasmFunctionParams = 1000;
const size_t kV8MaxWasmFunctionMultiReturns = 1000;
const size_t kV8MaxWasmFunctionReturns = 1;
+// Don't use this limit directly, but use the value of FLAG_wasm_max_table_size.
const size_t kV8MaxWasmTableSize = 10000000;
const size_t kV8MaxWasmTableEntries = 10000000;
const size_t kV8MaxWasmTables = 1;
diff --git a/deps/v8/src/wasm/wasm-macro-gen.h b/deps/v8/src/wasm/wasm-macro-gen.h
index 1ec9ee80ff..931ad92373 100644
--- a/deps/v8/src/wasm/wasm-macro-gen.h
+++ b/deps/v8/src/wasm/wasm-macro-gen.h
@@ -372,15 +372,16 @@ class LocalDeclEncoder {
static_cast<byte>(bit_cast<uint32_t>(static_cast<float>(val)) >> 8), \
static_cast<byte>(bit_cast<uint32_t>(static_cast<float>(val)) >> 16), \
static_cast<byte>(bit_cast<uint32_t>(static_cast<float>(val)) >> 24)
-#define WASM_F64(val) \
- kExprF64Const, static_cast<byte>(bit_cast<uint64_t>(val)), \
- static_cast<byte>(bit_cast<uint64_t>(val) >> 8), \
- static_cast<byte>(bit_cast<uint64_t>(val) >> 16), \
- static_cast<byte>(bit_cast<uint64_t>(val) >> 24), \
- static_cast<byte>(bit_cast<uint64_t>(val) >> 32), \
- static_cast<byte>(bit_cast<uint64_t>(val) >> 40), \
- static_cast<byte>(bit_cast<uint64_t>(val) >> 48), \
- static_cast<byte>(bit_cast<uint64_t>(val) >> 56)
+#define WASM_F64(val) \
+ kExprF64Const, \
+ static_cast<byte>(bit_cast<uint64_t>(static_cast<double>(val))), \
+ static_cast<byte>(bit_cast<uint64_t>(static_cast<double>(val)) >> 8), \
+ static_cast<byte>(bit_cast<uint64_t>(static_cast<double>(val)) >> 16), \
+ static_cast<byte>(bit_cast<uint64_t>(static_cast<double>(val)) >> 24), \
+ static_cast<byte>(bit_cast<uint64_t>(static_cast<double>(val)) >> 32), \
+ static_cast<byte>(bit_cast<uint64_t>(static_cast<double>(val)) >> 40), \
+ static_cast<byte>(bit_cast<uint64_t>(static_cast<double>(val)) >> 48), \
+ static_cast<byte>(bit_cast<uint64_t>(static_cast<double>(val)) >> 56)
#define WASM_GET_LOCAL(index) kExprGetLocal, static_cast<byte>(index)
#define WASM_SET_LOCAL(index, val) val, kExprSetLocal, static_cast<byte>(index)
#define WASM_TEE_LOCAL(index, val) val, kExprTeeLocal, static_cast<byte>(index)
@@ -460,9 +461,6 @@ class LocalDeclEncoder {
static_cast<byte>(index)
#define WASM_UNOP(opcode, x) x, static_cast<byte>(opcode)
#define WASM_BINOP(opcode, x, y) x, y, static_cast<byte>(opcode)
-#define WASM_SIMD_UNOP(opcode, x) x, kSimdPrefix, static_cast<byte>(opcode)
-#define WASM_SIMD_BINOP(opcode, x, y) \
- x, y, kSimdPrefix, static_cast<byte>(opcode)
//------------------------------------------------------------------------------
// Int32 operations
@@ -624,31 +622,32 @@ class LocalDeclEncoder {
//------------------------------------------------------------------------------
// Simd Operations.
//------------------------------------------------------------------------------
-#define WASM_SIMD_F32x4_SPLAT(x) x, kSimdPrefix, kExprF32x4Splat & 0xff
-#define WASM_SIMD_F32x4_EXTRACT_LANE(lane, x) \
- x, kSimdPrefix, kExprF32x4ExtractLane & 0xff, static_cast<byte>(lane)
-#define WASM_SIMD_F32x4_REPLACE_LANE(lane, x, y) \
- x, y, kSimdPrefix, kExprF32x4ReplaceLane & 0xff, static_cast<byte>(lane)
-#define WASM_SIMD_F32x4_FROM_I32x4(x) \
- x, kSimdPrefix, kExprF32x4FromInt32x4 & 0xff
-#define WASM_SIMD_F32x4_FROM_U32x4(x) \
- x, kSimdPrefix, kExprF32x4FromUint32x4 & 0xff
-#define WASM_SIMD_F32x4_ADD(x, y) x, y, kSimdPrefix, kExprF32x4Add & 0xff
-#define WASM_SIMD_F32x4_SUB(x, y) x, y, kSimdPrefix, kExprF32x4Sub & 0xff
-
-#define WASM_SIMD_I32x4_SPLAT(x) x, kSimdPrefix, kExprI32x4Splat & 0xff
-#define WASM_SIMD_I32x4_EXTRACT_LANE(lane, x) \
- x, kSimdPrefix, kExprI32x4ExtractLane & 0xff, static_cast<byte>(lane)
-#define WASM_SIMD_I32x4_REPLACE_LANE(lane, x, y) \
- x, y, kSimdPrefix, kExprI32x4ReplaceLane & 0xff, static_cast<byte>(lane)
-#define WASM_SIMD_I32x4_FROM_F32x4(x) \
- x, kSimdPrefix, kExprI32x4FromFloat32x4 & 0xff
-#define WASM_SIMD_U32x4_FROM_F32x4(x) \
- x, kSimdPrefix, kExprUi32x4FromFloat32x4 & 0xff
-#define WASM_SIMD_S32x4_SELECT(x, y, z) \
- x, y, z, kSimdPrefix, kExprS32x4Select & 0xff
-#define WASM_SIMD_I32x4_ADD(x, y) x, y, kSimdPrefix, kExprI32x4Add & 0xff
-#define WASM_SIMD_I32x4_SUB(x, y) x, y, kSimdPrefix, kExprI32x4Sub & 0xff
+// TODO(bbudge) Migrate these into tests.
+#define WASM_SIMD_F32x4_SPLAT(x) \
+ x, kSimdPrefix, static_cast<byte>(kExprF32x4Splat)
+#define WASM_SIMD_F32x4_EXTRACT_LANE(lane, x) \
+ x, kSimdPrefix, static_cast<byte>(kExprF32x4ExtractLane), \
+ static_cast<byte>(lane)
+#define WASM_SIMD_F32x4_REPLACE_LANE(lane, x, y) \
+ x, y, kSimdPrefix, static_cast<byte>(kExprF32x4ReplaceLane), \
+ static_cast<byte>(lane)
+#define WASM_SIMD_F32x4_ADD(x, y) \
+ x, y, kSimdPrefix, static_cast<byte>(kExprF32x4Add)
+#define WASM_SIMD_F32x4_SUB(x, y) \
+ x, y, kSimdPrefix, static_cast<byte>(kExprF32x4Sub)
+
+#define WASM_SIMD_I32x4_SPLAT(x) \
+ x, kSimdPrefix, static_cast<byte>(kExprI32x4Splat)
+#define WASM_SIMD_I32x4_EXTRACT_LANE(lane, x) \
+ x, kSimdPrefix, static_cast<byte>(kExprI32x4ExtractLane), \
+ static_cast<byte>(lane)
+#define WASM_SIMD_I32x4_REPLACE_LANE(lane, x, y) \
+ x, y, kSimdPrefix, static_cast<byte>(kExprI32x4ReplaceLane), \
+ static_cast<byte>(lane)
+#define WASM_SIMD_I32x4_ADD(x, y) \
+ x, y, kSimdPrefix, static_cast<byte>(kExprI32x4Add)
+#define WASM_SIMD_I32x4_SUB(x, y) \
+ x, y, kSimdPrefix, static_cast<byte>(kExprI32x4Sub)
#define SIG_ENTRY_v_v kWasmFunctionTypeForm, 0, 0
#define SIZEOF_SIG_ENTRY_v_v 3
diff --git a/deps/v8/src/wasm/wasm-module-builder.cc b/deps/v8/src/wasm/wasm-module-builder.cc
index cd83d46d3e..a9c724a78d 100644
--- a/deps/v8/src/wasm/wasm-module-builder.cc
+++ b/deps/v8/src/wasm/wasm-module-builder.cc
@@ -5,11 +5,13 @@
#include "src/signature.h"
#include "src/handles.h"
+#include "src/objects-inl.h"
#include "src/v8.h"
#include "src/zone/zone-containers.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/leb-helper.h"
+#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-macro-gen.h"
#include "src/wasm/wasm-module-builder.h"
#include "src/wasm/wasm-module.h"
@@ -61,13 +63,20 @@ WasmFunctionBuilder::WasmFunctionBuilder(WasmModuleBuilder* builder)
direct_calls_(builder->zone()),
asm_offsets_(builder->zone(), 8) {}
-void WasmFunctionBuilder::EmitVarInt(uint32_t val) {
- byte buffer[8];
+void WasmFunctionBuilder::EmitVarInt(int32_t val) {
+ byte buffer[5];
+ byte* ptr = buffer;
+ LEBHelper::write_i32v(&ptr, val);
+ DCHECK_GE(5, ptr - buffer);
+ body_.insert(body_.end(), buffer, ptr);
+}
+
+void WasmFunctionBuilder::EmitVarUint(uint32_t val) {
+ byte buffer[5];
byte* ptr = buffer;
LEBHelper::write_u32v(&ptr, val);
- for (byte* p = buffer; p < ptr; p++) {
- body_.push_back(*p);
- }
+ DCHECK_GE(5, ptr - buffer);
+ body_.insert(body_.end(), buffer, ptr);
}
void WasmFunctionBuilder::SetSignature(FunctionSig* sig) {
@@ -82,15 +91,15 @@ uint32_t WasmFunctionBuilder::AddLocal(ValueType type) {
}
void WasmFunctionBuilder::EmitGetLocal(uint32_t local_index) {
- EmitWithVarInt(kExprGetLocal, local_index);
+ EmitWithVarUint(kExprGetLocal, local_index);
}
void WasmFunctionBuilder::EmitSetLocal(uint32_t local_index) {
- EmitWithVarInt(kExprSetLocal, local_index);
+ EmitWithVarUint(kExprSetLocal, local_index);
}
void WasmFunctionBuilder::EmitTeeLocal(uint32_t local_index) {
- EmitWithVarInt(kExprTeeLocal, local_index);
+ EmitWithVarUint(kExprTeeLocal, local_index);
}
void WasmFunctionBuilder::EmitCode(const byte* code, uint32_t code_size) {
@@ -115,20 +124,19 @@ void WasmFunctionBuilder::EmitWithU8U8(WasmOpcode opcode, const byte imm1,
body_.push_back(imm2);
}
-void WasmFunctionBuilder::EmitWithVarInt(WasmOpcode opcode,
- uint32_t immediate) {
+void WasmFunctionBuilder::EmitWithVarInt(WasmOpcode opcode, int32_t immediate) {
body_.push_back(static_cast<byte>(opcode));
EmitVarInt(immediate);
}
+void WasmFunctionBuilder::EmitWithVarUint(WasmOpcode opcode,
+ uint32_t immediate) {
+ body_.push_back(static_cast<byte>(opcode));
+ EmitVarUint(immediate);
+}
+
void WasmFunctionBuilder::EmitI32Const(int32_t value) {
- if (-64 <= value && value <= 63) {
- EmitWithU8(kExprI32Const, static_cast<byte>(value & 0x7F));
- } else {
- // TODO(titzer): variable-length signed and unsigned i32 constants.
- byte code[] = {WASM_I32V_5(value)};
- EmitCode(code, sizeof(code));
- }
+ EmitWithVarInt(kExprI32Const, value);
}
void WasmFunctionBuilder::EmitDirectCallIndex(uint32_t index) {
diff --git a/deps/v8/src/wasm/wasm-module-builder.h b/deps/v8/src/wasm/wasm-module-builder.h
index 3258f78d50..c6903cd953 100644
--- a/deps/v8/src/wasm/wasm-module-builder.h
+++ b/deps/v8/src/wasm/wasm-module-builder.h
@@ -121,7 +121,8 @@ class V8_EXPORT_PRIVATE WasmFunctionBuilder : public ZoneObject {
// Building methods.
void SetSignature(FunctionSig* sig);
uint32_t AddLocal(ValueType type);
- void EmitVarInt(uint32_t val);
+ void EmitVarInt(int32_t val);
+ void EmitVarUint(uint32_t val);
void EmitCode(const byte* code, uint32_t code_size);
void Emit(WasmOpcode opcode);
void EmitGetLocal(uint32_t index);
@@ -130,7 +131,8 @@ class V8_EXPORT_PRIVATE WasmFunctionBuilder : public ZoneObject {
void EmitI32Const(int32_t val);
void EmitWithU8(WasmOpcode opcode, const byte immediate);
void EmitWithU8U8(WasmOpcode opcode, const byte imm1, const byte imm2);
- void EmitWithVarInt(WasmOpcode opcode, uint32_t immediate);
+ void EmitWithVarInt(WasmOpcode opcode, int32_t immediate);
+ void EmitWithVarUint(WasmOpcode opcode, uint32_t immediate);
void EmitDirectCallIndex(uint32_t index);
void ExportAs(Vector<const char> name);
void SetName(Vector<const char> name);
diff --git a/deps/v8/src/wasm/wasm-module.cc b/deps/v8/src/wasm/wasm-module.cc
index 60dda925fa..9df236fa9e 100644
--- a/deps/v8/src/wasm/wasm-module.cc
+++ b/deps/v8/src/wasm/wasm-module.cc
@@ -16,8 +16,10 @@
#include "src/snapshot/snapshot.h"
#include "src/v8.h"
+#include "src/asmjs/asm-wasm-builder.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-code-specialization.h"
#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
@@ -46,27 +48,20 @@ byte* raw_buffer_ptr(MaybeHandle<JSArrayBuffer> buffer, int offset) {
return static_cast<byte*>(buffer.ToHandleChecked()->backing_store()) + offset;
}
-void ReplaceReferenceInCode(Handle<Code> code, Handle<Object> old_ref,
- Handle<Object> new_ref) {
- for (RelocIterator it(*code, 1 << RelocInfo::EMBEDDED_OBJECT); !it.done();
- it.next()) {
- if (it.rinfo()->target_object() == *old_ref) {
- it.rinfo()->set_target_object(*new_ref);
- }
- }
-}
-
static void MemoryFinalizer(const v8::WeakCallbackInfo<void>& data) {
DisallowHeapAllocation no_gc;
JSArrayBuffer** p = reinterpret_cast<JSArrayBuffer**>(data.GetParameter());
JSArrayBuffer* buffer = *p;
- void* memory = buffer->backing_store();
- base::OS::Free(memory,
- RoundUp(kWasmMaxHeapOffset, base::OS::CommitPageSize()));
+ if (!buffer->was_neutered()) {
+ void* memory = buffer->backing_store();
+ DCHECK(memory != nullptr);
+ base::OS::Free(memory,
+ RoundUp(kWasmMaxHeapOffset, base::OS::CommitPageSize()));
- data.GetIsolate()->AdjustAmountOfExternalAllocatedMemory(
- -buffer->byte_length()->Number());
+ data.GetIsolate()->AdjustAmountOfExternalAllocatedMemory(
+ -buffer->byte_length()->Number());
+ }
GlobalHandles::Destroy(reinterpret_cast<Object**>(p));
}
@@ -81,6 +76,19 @@ bool EnableGuardRegions() {
return FLAG_wasm_guard_pages && kGuardRegionsSupported;
}
+static void RecordStats(Isolate* isolate, Code* code) {
+ isolate->counters()->wasm_generated_code_size()->Increment(code->body_size());
+ isolate->counters()->wasm_reloc_size()->Increment(
+ code->relocation_info()->length());
+}
+
+static void RecordStats(Isolate* isolate, Handle<FixedArray> functions) {
+ DisallowHeapAllocation no_gc;
+ for (int i = 0; i < functions->length(); ++i) {
+ RecordStats(isolate, Code::cast(functions->get(i)));
+ }
+}
+
void* TryAllocateBackingStore(Isolate* isolate, size_t size,
bool enable_guard_regions, bool& is_external) {
is_external = false;
@@ -118,58 +126,6 @@ void* TryAllocateBackingStore(Isolate* isolate, size_t size,
}
}
-void RelocateMemoryReferencesInCode(Handle<FixedArray> code_table,
- uint32_t num_imported_functions,
- Address old_start, Address start,
- uint32_t prev_size, uint32_t new_size) {
- for (int i = static_cast<int>(num_imported_functions);
- i < code_table->length(); ++i) {
- DCHECK(code_table->get(i)->IsCode());
- Handle<Code> code = Handle<Code>(Code::cast(code_table->get(i)));
- AllowDeferredHandleDereference embedding_raw_address;
- int mask = (1 << RelocInfo::WASM_MEMORY_REFERENCE) |
- (1 << RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
- for (RelocIterator it(*code, mask); !it.done(); it.next()) {
- it.rinfo()->update_wasm_memory_reference(old_start, start, prev_size,
- new_size);
- }
- }
-}
-
-void RelocateGlobals(Handle<FixedArray> code_table, Address old_start,
- Address globals_start) {
- for (int i = 0; i < code_table->length(); ++i) {
- DCHECK(code_table->get(i)->IsCode());
- Handle<Code> code = Handle<Code>(Code::cast(code_table->get(i)));
- AllowDeferredHandleDereference embedding_raw_address;
- int mask = 1 << RelocInfo::WASM_GLOBAL_REFERENCE;
- for (RelocIterator it(*code, mask); !it.done(); it.next()) {
- it.rinfo()->update_wasm_global_reference(old_start, globals_start);
- }
- }
-}
-
-void RelocateTableSizeReferences(Handle<FixedArray> code_table,
- uint32_t old_size, uint32_t new_size) {
- for (int i = 0; i < code_table->length(); ++i) {
- DCHECK(code_table->get(i)->IsCode());
- Handle<Code> code = Handle<Code>(Code::cast(code_table->get(i)));
- AllowDeferredHandleDereference embedding_raw_address;
- int mask = 1 << RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE;
- for (RelocIterator it(*code, mask); !it.done(); it.next()) {
- it.rinfo()->update_wasm_function_table_size_reference(old_size, new_size);
- }
- }
-}
-
-Handle<Code> CreatePlaceholder(Factory* factory, Code::Kind kind) {
- byte buffer[] = {0, 0, 0, 0}; // fake instructions.
- CodeDesc desc = {
- buffer, arraysize(buffer), arraysize(buffer), 0, 0, nullptr, 0, nullptr};
- return factory->NewCode(desc, Code::KindField::encode(kind),
- Handle<Object>::null());
-}
-
void FlushICache(Isolate* isolate, Handle<FixedArray> code_table) {
for (int i = 0; i < code_table->length(); ++i) {
Handle<Code> code = code_table->GetValueChecked<Code>(isolate, i);
@@ -178,423 +134,476 @@ void FlushICache(Isolate* isolate, Handle<FixedArray> code_table) {
}
}
-// Fetches the compilation unit of a wasm function and executes its parallel
-// phase.
-bool FetchAndExecuteCompilationUnit(
- Isolate* isolate,
- std::vector<compiler::WasmCompilationUnit*>* compilation_units,
- std::queue<compiler::WasmCompilationUnit*>* executed_units,
- base::Mutex* result_mutex, base::AtomicNumber<size_t>* next_unit) {
- DisallowHeapAllocation no_allocation;
- DisallowHandleAllocation no_handles;
- DisallowHandleDereference no_deref;
- DisallowCodeDependencyChange no_dependency_change;
-
- // - 1 because AtomicIncrement returns the value after the atomic increment.
- size_t index = next_unit->Increment(1) - 1;
- if (index >= compilation_units->size()) {
- return false;
- }
+Handle<Script> CreateWasmScript(Isolate* isolate,
+ const ModuleWireBytes& wire_bytes) {
+ Handle<Script> script =
+ isolate->factory()->NewScript(isolate->factory()->empty_string());
+ FixedArray* array = isolate->native_context()->embedder_data();
+ script->set_context_data(array->get(v8::Context::kDebugIdIndex));
+ script->set_type(Script::TYPE_WASM);
- compiler::WasmCompilationUnit* unit = compilation_units->at(index);
- if (unit != nullptr) {
- unit->ExecuteCompilation();
- base::LockGuard<base::Mutex> guard(result_mutex);
- executed_units->push(unit);
- }
- return true;
-}
+ int hash = StringHasher::HashSequentialString(
+ reinterpret_cast<const char*>(wire_bytes.start()), wire_bytes.length(),
+ kZeroHashSeed);
-class WasmCompilationTask : public CancelableTask {
- public:
- WasmCompilationTask(
- Isolate* isolate,
- std::vector<compiler::WasmCompilationUnit*>* compilation_units,
- std::queue<compiler::WasmCompilationUnit*>* executed_units,
- base::Semaphore* on_finished, base::Mutex* result_mutex,
- base::AtomicNumber<size_t>* next_unit)
- : CancelableTask(isolate),
- isolate_(isolate),
- compilation_units_(compilation_units),
- executed_units_(executed_units),
- on_finished_(on_finished),
- result_mutex_(result_mutex),
- next_unit_(next_unit) {}
-
- void RunInternal() override {
- while (FetchAndExecuteCompilationUnit(isolate_, compilation_units_,
- executed_units_, result_mutex_,
- next_unit_)) {
- }
- on_finished_->Signal();
- }
+ const int kBufferSize = 32;
+ char buffer[kBufferSize];
+ int url_chars = SNPrintF(ArrayVector(buffer), "wasm://wasm/%08x", hash);
+ DCHECK(url_chars >= 0 && url_chars < kBufferSize);
+ MaybeHandle<String> url_str = isolate->factory()->NewStringFromOneByte(
+ Vector<const uint8_t>(reinterpret_cast<uint8_t*>(buffer), url_chars),
+ TENURED);
+ script->set_source_url(*url_str.ToHandleChecked());
- Isolate* isolate_;
- std::vector<compiler::WasmCompilationUnit*>* compilation_units_;
- std::queue<compiler::WasmCompilationUnit*>* executed_units_;
- base::Semaphore* on_finished_;
- base::Mutex* result_mutex_;
- base::AtomicNumber<size_t>* next_unit_;
-};
+ int name_chars = SNPrintF(ArrayVector(buffer), "wasm-%08x", hash);
+ DCHECK(name_chars >= 0 && name_chars < kBufferSize);
+ MaybeHandle<String> name_str = isolate->factory()->NewStringFromOneByte(
+ Vector<const uint8_t>(reinterpret_cast<uint8_t*>(buffer), name_chars),
+ TENURED);
+ script->set_name(*name_str.ToHandleChecked());
-static void RecordStats(Isolate* isolate, Code* code) {
- isolate->counters()->wasm_generated_code_size()->Increment(code->body_size());
- isolate->counters()->wasm_reloc_size()->Increment(
- code->relocation_info()->length());
+ return script;
}
-static void RecordStats(Isolate* isolate, Handle<FixedArray> functions) {
- DisallowHeapAllocation no_gc;
- for (int i = 0; i < functions->length(); ++i) {
- RecordStats(isolate, Code::cast(functions->get(i)));
- }
-}
+class JSToWasmWrapperCache {
+ public:
+ Handle<Code> CloneOrCompileJSToWasmWrapper(Isolate* isolate,
+ const wasm::WasmModule* module,
+ Handle<Code> wasm_code,
+ uint32_t index) {
+ const wasm::WasmFunction* func = &module->functions[index];
+ int cached_idx = sig_map_.Find(func->sig);
+ if (cached_idx >= 0) {
+ Handle<Code> code = isolate->factory()->CopyCode(code_cache_[cached_idx]);
+ // Now patch the call to wasm code.
+ for (RelocIterator it(*code, RelocInfo::kCodeTargetMask);; it.next()) {
+ DCHECK(!it.done());
+ Code* target =
+ Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
+ if (target->kind() == Code::WASM_FUNCTION ||
+ target->kind() == Code::WASM_TO_JS_FUNCTION ||
+ target->builtin_index() == Builtins::kIllegal) {
+ it.rinfo()->set_target_address(wasm_code->instruction_start());
+ break;
+ }
+ }
+ return code;
+ }
-Address GetGlobalStartAddressFromCodeTemplate(Object* undefined,
- JSObject* object) {
- auto instance = WasmInstanceObject::cast(object);
- Address old_address = nullptr;
- if (instance->has_globals_buffer()) {
- old_address =
- static_cast<Address>(instance->globals_buffer()->backing_store());
+ Handle<Code> code =
+ compiler::CompileJSToWasmWrapper(isolate, module, wasm_code, index);
+ uint32_t new_cache_idx = sig_map_.FindOrInsert(func->sig);
+ DCHECK_EQ(code_cache_.size(), new_cache_idx);
+ USE(new_cache_idx);
+ code_cache_.push_back(code);
+ return code;
}
- return old_address;
-}
-void InitializeParallelCompilation(
- Isolate* isolate, const std::vector<WasmFunction>& functions,
- std::vector<compiler::WasmCompilationUnit*>& compilation_units,
- ModuleBytesEnv& module_env, ErrorThrower* thrower) {
- for (uint32_t i = FLAG_skip_compiling_wasm_funcs; i < functions.size(); ++i) {
- const WasmFunction* func = &functions[i];
- compilation_units[i] =
- func->imported ? nullptr : new compiler::WasmCompilationUnit(
- thrower, isolate, &module_env, func, i);
- }
-}
+ private:
+ // sig_map_ maps signatures to an index in code_cache_.
+ wasm::SignatureMap sig_map_;
+ std::vector<Handle<Code>> code_cache_;
+};
-uint32_t* StartCompilationTasks(
- Isolate* isolate,
- std::vector<compiler::WasmCompilationUnit*>& compilation_units,
- std::queue<compiler::WasmCompilationUnit*>& executed_units,
- base::Semaphore* pending_tasks, base::Mutex& result_mutex,
- base::AtomicNumber<size_t>& next_unit) {
- const size_t num_tasks =
- Min(static_cast<size_t>(FLAG_wasm_num_compilation_tasks),
- V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads());
- uint32_t* task_ids = new uint32_t[num_tasks];
- for (size_t i = 0; i < num_tasks; ++i) {
- WasmCompilationTask* task =
- new WasmCompilationTask(isolate, &compilation_units, &executed_units,
- pending_tasks, &result_mutex, &next_unit);
- task_ids[i] = task->id();
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- task, v8::Platform::kShortRunningTask);
- }
- return task_ids;
-}
+// A helper for compiling an entire module.
+class CompilationHelper {
+ public:
+ CompilationHelper(Isolate* isolate, WasmModule* module)
+ : isolate_(isolate), module_(module) {}
+
+ // The actual runnable task that performs compilations in the background.
+ class CompilationTask : public CancelableTask {
+ public:
+ CompilationHelper* helper_;
+ explicit CompilationTask(CompilationHelper* helper)
+ : CancelableTask(helper->isolate_), helper_(helper) {}
+
+ void RunInternal() override {
+ while (helper_->FetchAndExecuteCompilationUnit()) {
+ }
+ helper_->module_->pending_tasks.get()->Signal();
+ }
+ };
-void WaitForCompilationTasks(Isolate* isolate, uint32_t* task_ids,
- base::Semaphore* pending_tasks) {
- const size_t num_tasks =
- Min(static_cast<size_t>(FLAG_wasm_num_compilation_tasks),
- V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads());
- for (size_t i = 0; i < num_tasks; ++i) {
- // If the task has not started yet, then we abort it. Otherwise we wait for
- // it to finish.
- if (isolate->cancelable_task_manager()->TryAbort(task_ids[i]) !=
- CancelableTaskManager::kTaskAborted) {
- pending_tasks->Wait();
+ Isolate* isolate_;
+ WasmModule* module_;
+ std::vector<compiler::WasmCompilationUnit*> compilation_units_;
+ std::queue<compiler::WasmCompilationUnit*> executed_units_;
+ base::Mutex result_mutex_;
+ base::AtomicNumber<size_t> next_unit_;
+
+ // Run by each compilation task and by the main thread.
+ bool FetchAndExecuteCompilationUnit() {
+ DisallowHeapAllocation no_allocation;
+ DisallowHandleAllocation no_handles;
+ DisallowHandleDereference no_deref;
+ DisallowCodeDependencyChange no_dependency_change;
+
+ // - 1 because AtomicIncrement returns the value after the atomic increment.
+ size_t index = next_unit_.Increment(1) - 1;
+ if (index >= compilation_units_.size()) {
+ return false;
+ }
+
+ compiler::WasmCompilationUnit* unit = compilation_units_.at(index);
+ if (unit != nullptr) {
+ unit->ExecuteCompilation();
+ base::LockGuard<base::Mutex> guard(&result_mutex_);
+ executed_units_.push(unit);
}
+ return true;
}
-}
-void FinishCompilationUnits(
- std::queue<compiler::WasmCompilationUnit*>& executed_units,
- std::vector<Handle<Code>>& results, base::Mutex& result_mutex) {
- while (true) {
- compiler::WasmCompilationUnit* unit = nullptr;
- {
- base::LockGuard<base::Mutex> guard(&result_mutex);
- if (executed_units.empty()) {
- break;
+ void InitializeParallelCompilation(const std::vector<WasmFunction>& functions,
+ ModuleBytesEnv& module_env,
+ ErrorThrower* thrower) {
+ compilation_units_.reserve(functions.size());
+ for (uint32_t i = FLAG_skip_compiling_wasm_funcs; i < functions.size();
+ ++i) {
+ const WasmFunction* func = &functions[i];
+ compilation_units_.push_back(
+ func->imported ? nullptr
+ : new compiler::WasmCompilationUnit(
+ thrower, isolate_, &module_env, func, i));
+ }
+ }
+
+ uint32_t* StartCompilationTasks() {
+ const size_t num_tasks =
+ Min(static_cast<size_t>(FLAG_wasm_num_compilation_tasks),
+ V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads());
+ uint32_t* task_ids = new uint32_t[num_tasks];
+ for (size_t i = 0; i < num_tasks; ++i) {
+ CompilationTask* task = new CompilationTask(this);
+ task_ids[i] = task->id();
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ task, v8::Platform::kShortRunningTask);
+ }
+ return task_ids;
+ }
+
+ void WaitForCompilationTasks(uint32_t* task_ids) {
+ const size_t num_tasks =
+ Min(static_cast<size_t>(FLAG_wasm_num_compilation_tasks),
+ V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads());
+ for (size_t i = 0; i < num_tasks; ++i) {
+ // If the task has not started yet, then we abort it. Otherwise we wait
+ // for
+ // it to finish.
+ if (isolate_->cancelable_task_manager()->TryAbort(task_ids[i]) !=
+ CancelableTaskManager::kTaskAborted) {
+ module_->pending_tasks.get()->Wait();
}
- unit = executed_units.front();
- executed_units.pop();
}
- int j = unit->index();
- results[j] = unit->FinishCompilation();
- delete unit;
}
-}
-void CompileInParallel(Isolate* isolate, ModuleBytesEnv* module_env,
- std::vector<Handle<Code>>& functions,
- ErrorThrower* thrower) {
- const WasmModule* module = module_env->module;
- // Data structures for the parallel compilation.
- std::vector<compiler::WasmCompilationUnit*> compilation_units(
- module->functions.size());
- std::queue<compiler::WasmCompilationUnit*> executed_units;
-
- //-----------------------------------------------------------------------
- // For parallel compilation:
- // 1) The main thread allocates a compilation unit for each wasm function
- // and stores them in the vector {compilation_units}.
- // 2) The main thread spawns {WasmCompilationTask} instances which run on
- // the background threads.
- // 3.a) The background threads and the main thread pick one compilation
- // unit at a time and execute the parallel phase of the compilation
- // unit. After finishing the execution of the parallel phase, the
- // result is enqueued in {executed_units}.
- // 3.b) If {executed_units} contains a compilation unit, the main thread
- // dequeues it and finishes the compilation.
- // 4) After the parallel phase of all compilation units has started, the
- // main thread waits for all {WasmCompilationTask} instances to finish.
- // 5) The main thread finishes the compilation.
-
- // Turn on the {CanonicalHandleScope} so that the background threads can
- // use the node cache.
- CanonicalHandleScope canonical(isolate);
-
- // 1) The main thread allocates a compilation unit for each wasm function
- // and stores them in the vector {compilation_units}.
- InitializeParallelCompilation(isolate, module->functions, compilation_units,
- *module_env, thrower);
-
- // Objects for the synchronization with the background threads.
- base::Mutex result_mutex;
- base::AtomicNumber<size_t> next_unit(
- static_cast<size_t>(FLAG_skip_compiling_wasm_funcs));
-
- // 2) The main thread spawns {WasmCompilationTask} instances which run on
- // the background threads.
- std::unique_ptr<uint32_t[]> task_ids(StartCompilationTasks(
- isolate, compilation_units, executed_units, module->pending_tasks.get(),
- result_mutex, next_unit));
-
- // 3.a) The background threads and the main thread pick one compilation
- // unit at a time and execute the parallel phase of the compilation
- // unit. After finishing the execution of the parallel phase, the
- // result is enqueued in {executed_units}.
- while (FetchAndExecuteCompilationUnit(isolate, &compilation_units,
- &executed_units, &result_mutex,
- &next_unit)) {
- // 3.b) If {executed_units} contains a compilation unit, the main thread
- // dequeues it and finishes the compilation unit. Compilation units
- // are finished concurrently to the background threads to save
- // memory.
- FinishCompilationUnits(executed_units, functions, result_mutex);
- }
- // 4) After the parallel phase of all compilation units has started, the
- // main thread waits for all {WasmCompilationTask} instances to finish.
- WaitForCompilationTasks(isolate, task_ids.get(), module->pending_tasks.get());
- // Finish the compilation of the remaining compilation units.
- FinishCompilationUnits(executed_units, functions, result_mutex);
-}
+ void FinishCompilationUnits(std::vector<Handle<Code>>& results) {
+ while (true) {
+ compiler::WasmCompilationUnit* unit = nullptr;
+ {
+ base::LockGuard<base::Mutex> guard(&result_mutex_);
+ if (executed_units_.empty()) {
+ break;
+ }
+ unit = executed_units_.front();
+ executed_units_.pop();
+ }
+ int j = unit->index();
+ results[j] = unit->FinishCompilation();
+ delete unit;
+ }
+ }
-void CompileSequentially(Isolate* isolate, ModuleBytesEnv* module_env,
- std::vector<Handle<Code>>& functions,
+ void CompileInParallel(ModuleBytesEnv* module_env,
+ std::vector<Handle<Code>>& results,
ErrorThrower* thrower) {
- DCHECK(!thrower->error());
-
- const WasmModule* module = module_env->module;
- for (uint32_t i = FLAG_skip_compiling_wasm_funcs;
- i < module->functions.size(); ++i) {
- const WasmFunction& func = module->functions[i];
- if (func.imported) continue; // Imports are compiled at instantiation time.
-
- Handle<Code> code = Handle<Code>::null();
- // Compile the function.
- code = compiler::WasmCompilationUnit::CompileWasmFunction(
- thrower, isolate, module_env, &func);
- if (code.is_null()) {
- WasmName str = module_env->GetName(&func);
- thrower->CompileError("Compilation of #%d:%.*s failed.", i, str.length(),
- str.start());
- break;
- }
- // Install the code into the linker table.
- functions[i] = code;
+ const WasmModule* module = module_env->module_env.module;
+ // Data structures for the parallel compilation.
+
+ //-----------------------------------------------------------------------
+ // For parallel compilation:
+ // 1) The main thread allocates a compilation unit for each wasm function
+ // and stores them in the vector {compilation_units}.
+ // 2) The main thread spawns {CompilationTask} instances which run on
+ // the background threads.
+ // 3.a) The background threads and the main thread pick one compilation
+ // unit at a time and execute the parallel phase of the compilation
+ // unit. After finishing the execution of the parallel phase, the
+ // result is enqueued in {executed_units}.
+ // 3.b) If {executed_units} contains a compilation unit, the main thread
+ // dequeues it and finishes the compilation.
+ // 4) After the parallel phase of all compilation units has started, the
+ // main thread waits for all {CompilationTask} instances to finish.
+ // 5) The main thread finishes the compilation.
+
+ // Turn on the {CanonicalHandleScope} so that the background threads can
+ // use the node cache.
+ CanonicalHandleScope canonical(isolate_);
+
+ // 1) The main thread allocates a compilation unit for each wasm function
+ // and stores them in the vector {compilation_units}.
+ InitializeParallelCompilation(module->functions, *module_env, thrower);
+
+ // Objects for the synchronization with the background threads.
+ base::AtomicNumber<size_t> next_unit(
+ static_cast<size_t>(FLAG_skip_compiling_wasm_funcs));
+
+ // 2) The main thread spawns {CompilationTask} instances which run on
+ // the background threads.
+ std::unique_ptr<uint32_t[]> task_ids(StartCompilationTasks());
+
+ // 3.a) The background threads and the main thread pick one compilation
+ // unit at a time and execute the parallel phase of the compilation
+ // unit. After finishing the execution of the parallel phase, the
+ // result is enqueued in {executed_units}.
+ while (FetchAndExecuteCompilationUnit()) {
+ // 3.b) If {executed_units} contains a compilation unit, the main thread
+ // dequeues it and finishes the compilation unit. Compilation units
+ // are finished concurrently to the background threads to save
+ // memory.
+ FinishCompilationUnits(results);
+ }
+ // 4) After the parallel phase of all compilation units has started, the
+ // main thread waits for all {CompilationTask} instances to finish.
+ WaitForCompilationTasks(task_ids.get());
+ // Finish the compilation of the remaining compilation units.
+ FinishCompilationUnits(results);
+ }
+
+ void CompileSequentially(ModuleBytesEnv* module_env,
+ std::vector<Handle<Code>>& results,
+ ErrorThrower* thrower) {
+ DCHECK(!thrower->error());
+
+ const WasmModule* module = module_env->module_env.module;
+ for (uint32_t i = FLAG_skip_compiling_wasm_funcs;
+ i < module->functions.size(); ++i) {
+ const WasmFunction& func = module->functions[i];
+ if (func.imported)
+ continue; // Imports are compiled at instantiation time.
+
+ Handle<Code> code = Handle<Code>::null();
+ // Compile the function.
+ code = compiler::WasmCompilationUnit::CompileWasmFunction(
+ thrower, isolate_, module_env, &func);
+ if (code.is_null()) {
+ WasmName str = module_env->wire_bytes.GetName(&func);
+ thrower->CompileError("Compilation of #%d:%.*s failed.", i,
+ str.length(), str.start());
+ break;
+ }
+ results[i] = code;
+ }
}
-}
-
-int ExtractDirectCallIndex(wasm::Decoder& decoder, const byte* pc) {
- DCHECK_EQ(static_cast<int>(kExprCallFunction), static_cast<int>(*pc));
- decoder.Reset(pc + 1, pc + 6);
- uint32_t call_idx = decoder.consume_u32v("call index");
- DCHECK(decoder.ok());
- DCHECK_GE(kMaxInt, call_idx);
- return static_cast<int>(call_idx);
-}
-
-int AdvanceSourcePositionTableIterator(SourcePositionTableIterator& iterator,
- size_t offset_l) {
- DCHECK_GE(kMaxInt, offset_l);
- int offset = static_cast<int>(offset_l);
- DCHECK(!iterator.done());
- int byte_pos;
- do {
- byte_pos = iterator.source_position().ScriptOffset();
- iterator.Advance();
- } while (!iterator.done() && iterator.code_offset() <= offset);
- return byte_pos;
-}
-
-void PatchContext(RelocIterator& it, Context* context) {
- Object* old = it.rinfo()->target_object();
- // The only context we use is the native context.
- DCHECK_IMPLIES(old->IsContext(), old->IsNativeContext());
- if (!old->IsNativeContext()) return;
- it.rinfo()->set_target_object(context, UPDATE_WRITE_BARRIER,
- SKIP_ICACHE_FLUSH);
-}
-void PatchDirectCallsAndContext(Handle<FixedArray> new_functions,
- Handle<WasmCompiledModule> compiled_module,
- WasmModule* module, int start) {
- DisallowHeapAllocation no_gc;
- AllowDeferredHandleDereference embedding_raw_address;
- SeqOneByteString* module_bytes = compiled_module->module_bytes();
- std::vector<WasmFunction>* wasm_functions =
- &compiled_module->module()->functions;
- DCHECK_EQ(wasm_functions->size() +
- compiled_module->module()->num_exported_functions,
- new_functions->length());
- DCHECK_EQ(start, compiled_module->module()->num_imported_functions);
- Context* context = compiled_module->ptr_to_native_context();
- int mode_mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
- RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
-
- // Allocate decoder outside of the loop and reuse it to decode all function
- // indexes.
- wasm::Decoder decoder(nullptr, nullptr);
- int num_wasm_functions = static_cast<int>(wasm_functions->size());
- int func_index = start;
- // Patch all wasm functions.
- for (; func_index < num_wasm_functions; ++func_index) {
- Code* wasm_function = Code::cast(new_functions->get(func_index));
- DCHECK(wasm_function->kind() == Code::WASM_FUNCTION);
- // Iterate simultaneously over the relocation information and the source
- // position table. For each call in the reloc info, move the source position
- // iterator forward to that position to find the byte offset of the
- // respective call. Then extract the call index from the module wire bytes
- // to find the new compiled function.
- SourcePositionTableIterator source_pos_iterator(
- wasm_function->source_position_table());
- const byte* func_bytes =
- module_bytes->GetChars() +
- compiled_module->module()->functions[func_index].code_start_offset;
- for (RelocIterator it(wasm_function, mode_mask); !it.done(); it.next()) {
- if (RelocInfo::IsEmbeddedObject(it.rinfo()->rmode())) {
- PatchContext(it, context);
- continue;
- }
- DCHECK(RelocInfo::IsCodeTarget(it.rinfo()->rmode()));
- Code::Kind kind =
- Code::GetCodeFromTargetAddress(it.rinfo()->target_address())->kind();
- if (kind != Code::WASM_FUNCTION && kind != Code::WASM_TO_JS_FUNCTION)
- continue;
- size_t offset = it.rinfo()->pc() - wasm_function->instruction_start();
- int byte_pos =
- AdvanceSourcePositionTableIterator(source_pos_iterator, offset);
- int called_func_index =
- ExtractDirectCallIndex(decoder, func_bytes + byte_pos);
- Code* new_code = Code::cast(new_functions->get(called_func_index));
- it.rinfo()->set_target_address(new_code->instruction_start(),
- UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
- }
- }
- // Patch all exported functions.
- for (auto exp : module->export_table) {
- if (exp.kind != kExternalFunction) continue;
- Code* export_wrapper = Code::cast(new_functions->get(func_index));
- DCHECK_EQ(Code::JS_TO_WASM_FUNCTION, export_wrapper->kind());
- // There must be exactly one call to WASM_FUNCTION or WASM_TO_JS_FUNCTION.
- int num_wasm_calls = 0;
- for (RelocIterator it(export_wrapper, mode_mask); !it.done(); it.next()) {
- if (RelocInfo::IsEmbeddedObject(it.rinfo()->rmode())) {
- PatchContext(it, context);
- continue;
- }
- DCHECK(RelocInfo::IsCodeTarget(it.rinfo()->rmode()));
- Code::Kind kind =
- Code::GetCodeFromTargetAddress(it.rinfo()->target_address())->kind();
- if (kind != Code::WASM_FUNCTION && kind != Code::WASM_TO_JS_FUNCTION)
- continue;
- ++num_wasm_calls;
- Code* new_code = Code::cast(new_functions->get(exp.index));
- DCHECK(new_code->kind() == Code::WASM_FUNCTION ||
- new_code->kind() == Code::WASM_TO_JS_FUNCTION);
- it.rinfo()->set_target_address(new_code->instruction_start(),
- UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
- }
- DCHECK_EQ(1, num_wasm_calls);
- func_index++;
- }
- DCHECK_EQ(new_functions->length(), func_index);
+ MaybeHandle<WasmModuleObject> CompileToModuleObject(
+ ErrorThrower* thrower, const ModuleWireBytes& wire_bytes,
+ Handle<Script> asm_js_script,
+ Vector<const byte> asm_js_offset_table_bytes) {
+ Factory* factory = isolate_->factory();
+ // The {module_wrapper} will take ownership of the {WasmModule} object,
+ // and it will be destroyed when the GC reclaims the wrapper object.
+ Handle<WasmModuleWrapper> module_wrapper =
+ WasmModuleWrapper::New(isolate_, module_);
+ WasmInstance temp_instance(module_);
+ temp_instance.context = isolate_->native_context();
+ temp_instance.mem_size = WasmModule::kPageSize * module_->min_mem_pages;
+ temp_instance.mem_start = nullptr;
+ temp_instance.globals_start = nullptr;
+
+ // Initialize the indirect tables with placeholders.
+ int function_table_count =
+ static_cast<int>(module_->function_tables.size());
+ Handle<FixedArray> function_tables =
+ factory->NewFixedArray(function_table_count, TENURED);
+ Handle<FixedArray> signature_tables =
+ factory->NewFixedArray(function_table_count, TENURED);
+ for (int i = 0; i < function_table_count; ++i) {
+ temp_instance.function_tables[i] = factory->NewFixedArray(1, TENURED);
+ temp_instance.signature_tables[i] = factory->NewFixedArray(1, TENURED);
+ function_tables->set(i, *temp_instance.function_tables[i]);
+ signature_tables->set(i, *temp_instance.signature_tables[i]);
+ }
+
+ HistogramTimerScope wasm_compile_module_time_scope(
+ isolate_->counters()->wasm_compile_module_time());
+
+ ModuleBytesEnv module_env(module_, &temp_instance, wire_bytes);
+
+ // The {code_table} array contains import wrappers and functions (which
+ // are both included in {functions.size()}, and export wrappers.
+ int code_table_size = static_cast<int>(module_->functions.size() +
+ module_->num_exported_functions);
+ Handle<FixedArray> code_table =
+ factory->NewFixedArray(static_cast<int>(code_table_size), TENURED);
+
+ // Initialize the code table with the illegal builtin. All call sites will
+ // be
+ // patched at instantiation.
+ Handle<Code> illegal_builtin = isolate_->builtins()->Illegal();
+ for (uint32_t i = 0; i < module_->functions.size(); ++i) {
+ code_table->set(static_cast<int>(i), *illegal_builtin);
+ temp_instance.function_code[i] = illegal_builtin;
+ }
+
+ isolate_->counters()->wasm_functions_per_module()->AddSample(
+ static_cast<int>(module_->functions.size()));
+ CompilationHelper helper(isolate_, module_);
+ if (!FLAG_trace_wasm_decoder && FLAG_wasm_num_compilation_tasks != 0) {
+ // Avoid a race condition by collecting results into a second vector.
+ std::vector<Handle<Code>> results(temp_instance.function_code);
+ helper.CompileInParallel(&module_env, results, thrower);
+ temp_instance.function_code.swap(results);
+ } else {
+ helper.CompileSequentially(&module_env, temp_instance.function_code,
+ thrower);
+ }
+ if (thrower->error()) return {};
+
+ // At this point, compilation has completed. Update the code table.
+ for (size_t i = FLAG_skip_compiling_wasm_funcs;
+ i < temp_instance.function_code.size(); ++i) {
+ Code* code = *temp_instance.function_code[i];
+ code_table->set(static_cast<int>(i), code);
+ RecordStats(isolate_, code);
+ }
+
+ // Create heap objects for script, module bytes and asm.js offset table to
+ // be
+ // stored in the shared module data.
+ Handle<Script> script;
+ Handle<ByteArray> asm_js_offset_table;
+ if (asm_js_script.is_null()) {
+ script = CreateWasmScript(isolate_, wire_bytes);
+ } else {
+ script = asm_js_script;
+ asm_js_offset_table =
+ isolate_->factory()->NewByteArray(asm_js_offset_table_bytes.length());
+ asm_js_offset_table->copy_in(0, asm_js_offset_table_bytes.start(),
+ asm_js_offset_table_bytes.length());
+ }
+ // TODO(wasm): only save the sections necessary to deserialize a
+ // {WasmModule}. E.g. function bodies could be omitted.
+ Handle<String> module_bytes =
+ factory
+ ->NewStringFromOneByte({wire_bytes.start(), wire_bytes.length()},
+ TENURED)
+ .ToHandleChecked();
+ DCHECK(module_bytes->IsSeqOneByteString());
+
+ // Create the shared module data.
+ // TODO(clemensh): For the same module (same bytes / same hash), we should
+ // only have one WasmSharedModuleData. Otherwise, we might only set
+ // breakpoints on a (potentially empty) subset of the instances.
+
+ Handle<WasmSharedModuleData> shared = WasmSharedModuleData::New(
+ isolate_, module_wrapper, Handle<SeqOneByteString>::cast(module_bytes),
+ script, asm_js_offset_table);
+
+ // Create the compiled module object, and populate with compiled functions
+ // and information needed at instantiation time. This object needs to be
+ // serializable. Instantiation may occur off a deserialized version of this
+ // object.
+ Handle<WasmCompiledModule> compiled_module =
+ WasmCompiledModule::New(isolate_, shared);
+ compiled_module->set_num_imported_functions(
+ module_->num_imported_functions);
+ compiled_module->set_code_table(code_table);
+ compiled_module->set_min_mem_pages(module_->min_mem_pages);
+ compiled_module->set_max_mem_pages(module_->max_mem_pages);
+ if (function_table_count > 0) {
+ compiled_module->set_function_tables(function_tables);
+ compiled_module->set_signature_tables(signature_tables);
+ compiled_module->set_empty_function_tables(function_tables);
+ }
+
+ // If we created a wasm script, finish it now and make it public to the
+ // debugger.
+ if (asm_js_script.is_null()) {
+ script->set_wasm_compiled_module(*compiled_module);
+ isolate_->debug()->OnAfterCompile(script);
+ }
+
+ // Compile JS->WASM wrappers for exported functions.
+ JSToWasmWrapperCache js_to_wasm_cache;
+ int func_index = 0;
+ for (auto exp : module_->export_table) {
+ if (exp.kind != kExternalFunction) continue;
+ Handle<Code> wasm_code(Code::cast(code_table->get(exp.index)), isolate_);
+ Handle<Code> wrapper_code =
+ js_to_wasm_cache.CloneOrCompileJSToWasmWrapper(isolate_, module_,
+ wasm_code, exp.index);
+ int export_index =
+ static_cast<int>(module_->functions.size() + func_index);
+ code_table->set(export_index, *wrapper_code);
+ RecordStats(isolate_, *wrapper_code);
+ func_index++;
+ }
+
+ return WasmModuleObject::New(isolate_, compiled_module);
}
+};
static void ResetCompiledModule(Isolate* isolate, WasmInstanceObject* owner,
WasmCompiledModule* compiled_module) {
TRACE("Resetting %d\n", compiled_module->instance_id());
Object* undefined = *isolate->factory()->undefined_value();
- uint32_t old_mem_size = compiled_module->mem_size();
- uint32_t default_mem_size = compiled_module->default_mem_size();
- Object* mem_start = compiled_module->maybe_ptr_to_memory();
- Address old_mem_address = nullptr;
- Address globals_start =
- GetGlobalStartAddressFromCodeTemplate(undefined, owner);
-
- // Reset function tables.
- FixedArray* function_tables = nullptr;
- FixedArray* empty_function_tables = nullptr;
- if (compiled_module->has_function_tables()) {
- function_tables = compiled_module->ptr_to_function_tables();
- empty_function_tables = compiled_module->ptr_to_empty_function_tables();
- compiled_module->set_ptr_to_function_tables(empty_function_tables);
- }
-
- if (old_mem_size > 0) {
- CHECK_NE(mem_start, undefined);
- old_mem_address =
- static_cast<Address>(JSArrayBuffer::cast(mem_start)->backing_store());
- }
- int mode_mask = RelocInfo::ModeMask(RelocInfo::WASM_MEMORY_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::WASM_MEMORY_SIZE_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::WASM_GLOBAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
-
- // Patch code to update memory references, global references, and function
- // table references.
Object* fct_obj = compiled_module->ptr_to_code_table();
- if (fct_obj != nullptr && fct_obj != undefined &&
- (old_mem_size > 0 || globals_start != nullptr || function_tables)) {
+ if (fct_obj != nullptr && fct_obj != undefined) {
+ uint32_t old_mem_size = compiled_module->mem_size();
+ uint32_t default_mem_size = compiled_module->default_mem_size();
+ Object* mem_start = compiled_module->maybe_ptr_to_memory();
+
+ // Patch code to update memory references, global references, and function
+ // table references.
+ Zone specialization_zone(isolate->allocator(), ZONE_NAME);
+ CodeSpecialization code_specialization(isolate, &specialization_zone);
+
+ if (old_mem_size > 0) {
+ CHECK_NE(mem_start, undefined);
+ Address old_mem_address =
+ static_cast<Address>(JSArrayBuffer::cast(mem_start)->backing_store());
+ code_specialization.RelocateMemoryReferences(
+ old_mem_address, old_mem_size, nullptr, default_mem_size);
+ }
+
+ if (owner->has_globals_buffer()) {
+ Address globals_start =
+ static_cast<Address>(owner->globals_buffer()->backing_store());
+ code_specialization.RelocateGlobals(globals_start, nullptr);
+ }
+
+ // Reset function tables.
+ if (compiled_module->has_function_tables()) {
+ FixedArray* function_tables = compiled_module->ptr_to_function_tables();
+ FixedArray* empty_function_tables =
+ compiled_module->ptr_to_empty_function_tables();
+ DCHECK_EQ(function_tables->length(), empty_function_tables->length());
+ for (int i = 0, e = function_tables->length(); i < e; ++i) {
+ code_specialization.RelocateObject(
+ handle(function_tables->get(i), isolate),
+ handle(empty_function_tables->get(i), isolate));
+ }
+ compiled_module->set_ptr_to_function_tables(empty_function_tables);
+ }
+
FixedArray* functions = FixedArray::cast(fct_obj);
- for (int i = compiled_module->num_imported_functions();
- i < functions->length(); ++i) {
+ for (int i = compiled_module->num_imported_functions(),
+ end = functions->length();
+ i < end; ++i) {
Code* code = Code::cast(functions->get(i));
- bool changed = false;
- for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (RelocInfo::IsWasmMemoryReference(mode) ||
- RelocInfo::IsWasmMemorySizeReference(mode)) {
- it.rinfo()->update_wasm_memory_reference(
- old_mem_address, nullptr, old_mem_size, default_mem_size);
- changed = true;
- } else if (RelocInfo::IsWasmGlobalReference(mode)) {
- it.rinfo()->update_wasm_global_reference(globals_start, nullptr);
- changed = true;
- } else if (RelocInfo::IsEmbeddedObject(mode) && function_tables) {
- Object* old = it.rinfo()->target_object();
- for (int j = 0; j < function_tables->length(); ++j) {
- if (function_tables->get(j) == old) {
- it.rinfo()->set_target_object(empty_function_tables->get(j));
- changed = true;
- }
- }
+ if (code->kind() != Code::WASM_FUNCTION) {
+ // From here on, there should only be wrappers for exported functions.
+ for (; i < end; ++i) {
+ DCHECK_EQ(Code::JS_TO_WASM_FUNCTION,
+ Code::cast(functions->get(i))->kind());
}
+ break;
}
+ bool changed =
+ code_specialization.ApplyToWasmCode(code, SKIP_ICACHE_FLUSH);
+ // TODO(wasm): Check if this is faster than passing FLUSH_ICACHE_IF_NEEDED
+ // above.
if (changed) {
Assembler::FlushICache(isolate, code->instruction_start(),
code->instruction_size());
@@ -728,40 +737,33 @@ std::pair<int, int> GetFunctionOffsetAndLength(
return {static_cast<int>(func.code_start_offset),
static_cast<int>(func.code_end_offset - func.code_start_offset)};
}
+} // namespace
-Handle<Script> CreateWasmScript(Isolate* isolate,
- const ModuleWireBytes& wire_bytes) {
- Handle<Script> script =
- isolate->factory()->NewScript(isolate->factory()->empty_string());
- script->set_type(Script::TYPE_WASM);
-
- int hash = StringHasher::HashSequentialString(
- reinterpret_cast<const char*>(wire_bytes.module_bytes.start()),
- wire_bytes.module_bytes.length(), kZeroHashSeed);
-
- const int kBufferSize = 50;
- char buffer[kBufferSize];
- int url_chars = SNPrintF(ArrayVector(buffer), "wasm://wasm/%08x", hash);
- DCHECK(url_chars >= 0 && url_chars < kBufferSize);
- MaybeHandle<String> url_str = isolate->factory()->NewStringFromOneByte(
- Vector<const uint8_t>(reinterpret_cast<uint8_t*>(buffer), url_chars),
- TENURED);
- script->set_source_url(*url_str.ToHandleChecked());
+Handle<JSArrayBuffer> SetupArrayBuffer(Isolate* isolate, void* backing_store,
+ size_t size, bool is_external,
+ bool enable_guard_regions) {
+ Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
+ JSArrayBuffer::Setup(buffer, isolate, is_external, backing_store,
+ static_cast<int>(size));
+ buffer->set_is_neuterable(false);
+ buffer->set_has_guard_region(enable_guard_regions);
- int name_chars = SNPrintF(ArrayVector(buffer), "wasm-%08x", hash);
- DCHECK(name_chars >= 0 && name_chars < kBufferSize);
- MaybeHandle<String> name_str = isolate->factory()->NewStringFromOneByte(
- Vector<const uint8_t>(reinterpret_cast<uint8_t*>(buffer), name_chars),
- TENURED);
- script->set_name(*name_str.ToHandleChecked());
+ if (is_external) {
+ // We mark the buffer as external if we allocated it here with guard
+ // pages. That means we need to arrange for it to be freed.
- return script;
+ // TODO(eholk): Finalizers may not run when the main thread is shutting
+ // down, which means we may leak memory here.
+ Handle<Object> global_handle = isolate->global_handles()->Create(*buffer);
+ GlobalHandles::MakeWeak(global_handle.location(), global_handle.location(),
+ &MemoryFinalizer, v8::WeakCallbackType::kFinalizer);
+ }
+ return buffer;
}
-} // namespace
Handle<JSArrayBuffer> wasm::NewArrayBuffer(Isolate* isolate, size_t size,
bool enable_guard_regions) {
- if (size > (kV8MaxWasmMemoryPages * WasmModule::kPageSize)) {
+ if (size > (FLAG_wasm_max_mem_pages * WasmModule::kPageSize)) {
// TODO(titzer): lift restriction on maximum memory allocated here.
return Handle<JSArrayBuffer>::null();
}
@@ -784,57 +786,8 @@ Handle<JSArrayBuffer> wasm::NewArrayBuffer(Isolate* isolate, size_t size,
}
#endif
- Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
- JSArrayBuffer::Setup(buffer, isolate, is_external, memory,
- static_cast<int>(size));
- buffer->set_is_neuterable(false);
- buffer->set_has_guard_region(enable_guard_regions);
-
- if (is_external) {
- // We mark the buffer as external if we allocated it here with guard
- // pages. That means we need to arrange for it to be freed.
-
- // TODO(eholk): Finalizers may not run when the main thread is shutting
- // down, which means we may leak memory here.
- Handle<Object> global_handle = isolate->global_handles()->Create(*buffer);
- GlobalHandles::MakeWeak(global_handle.location(), global_handle.location(),
- &MemoryFinalizer, v8::WeakCallbackType::kFinalizer);
- }
-
- return buffer;
-}
-
-const char* wasm::SectionName(WasmSectionCode code) {
- switch (code) {
- case kUnknownSectionCode:
- return "Unknown";
- case kTypeSectionCode:
- return "Type";
- case kImportSectionCode:
- return "Import";
- case kFunctionSectionCode:
- return "Function";
- case kTableSectionCode:
- return "Table";
- case kMemorySectionCode:
- return "Memory";
- case kGlobalSectionCode:
- return "Global";
- case kExportSectionCode:
- return "Export";
- case kStartSectionCode:
- return "Start";
- case kCodeSectionCode:
- return "Code";
- case kElementSectionCode:
- return "Element";
- case kDataSectionCode:
- return "Data";
- case kNameSectionCode:
- return "Name";
- default:
- return "<unknown>";
- }
+ return SetupArrayBuffer(isolate, memory, size, is_external,
+ enable_guard_regions);
}
std::ostream& wasm::operator<<(std::ostream& os, const WasmModule& module) {
@@ -869,15 +822,17 @@ std::ostream& wasm::operator<<(std::ostream& os, const WasmFunctionName& name) {
}
WasmInstanceObject* wasm::GetOwningWasmInstance(Code* code) {
- DCHECK(code->kind() == Code::WASM_FUNCTION);
DisallowHeapAllocation no_gc;
+ DCHECK(code->kind() == Code::WASM_FUNCTION ||
+ code->kind() == Code::WASM_INTERPRETER_ENTRY);
FixedArray* deopt_data = code->deoptimization_data();
DCHECK_NOT_NULL(deopt_data);
- DCHECK_EQ(2, deopt_data->length());
+ DCHECK_EQ(code->kind() == Code::WASM_INTERPRETER_ENTRY ? 1 : 2,
+ deopt_data->length());
Object* weak_link = deopt_data->get(0);
DCHECK(weak_link->IsWeakCell());
WeakCell* cell = WeakCell::cast(weak_link);
- if (!cell->value()) return nullptr;
+ if (cell->cleared()) return nullptr;
return WasmInstanceObject::cast(cell->value());
}
@@ -889,150 +844,6 @@ int wasm::GetFunctionCodeOffset(Handle<WasmCompiledModule> compiled_module,
WasmModule::WasmModule(Zone* owned)
: owned_zone(owned), pending_tasks(new base::Semaphore(0)) {}
-MaybeHandle<WasmCompiledModule> WasmModule::CompileFunctions(
- Isolate* isolate, Handle<WasmModuleWrapper> module_wrapper,
- ErrorThrower* thrower, const ModuleWireBytes& wire_bytes,
- Handle<Script> asm_js_script,
- Vector<const byte> asm_js_offset_table_bytes) const {
- Factory* factory = isolate->factory();
-
- MaybeHandle<WasmCompiledModule> nothing;
-
- WasmInstance temp_instance(this);
- temp_instance.context = isolate->native_context();
- temp_instance.mem_size = WasmModule::kPageSize * min_mem_pages;
- temp_instance.mem_start = nullptr;
- temp_instance.globals_start = nullptr;
-
- // Initialize the indirect tables with placeholders.
- int function_table_count = static_cast<int>(function_tables.size());
- Handle<FixedArray> function_tables =
- factory->NewFixedArray(function_table_count, TENURED);
- Handle<FixedArray> signature_tables =
- factory->NewFixedArray(function_table_count, TENURED);
- for (int i = 0; i < function_table_count; ++i) {
- temp_instance.function_tables[i] = factory->NewFixedArray(1, TENURED);
- temp_instance.signature_tables[i] = factory->NewFixedArray(1, TENURED);
- function_tables->set(i, *temp_instance.function_tables[i]);
- signature_tables->set(i, *temp_instance.signature_tables[i]);
- }
-
- HistogramTimerScope wasm_compile_module_time_scope(
- isolate->counters()->wasm_compile_module_time());
-
- ModuleBytesEnv module_env(this, &temp_instance, wire_bytes);
-
- // The {code_table} array contains import wrappers and functions (which
- // are both included in {functions.size()}, and export wrappers.
- int code_table_size =
- static_cast<int>(functions.size() + num_exported_functions);
- Handle<FixedArray> code_table =
- factory->NewFixedArray(static_cast<int>(code_table_size), TENURED);
-
- // Initialize the code table with placeholders.
- Handle<Code> code_placeholder =
- CreatePlaceholder(factory, Code::WASM_FUNCTION);
- for (uint32_t i = 0; i < functions.size(); ++i) {
- code_table->set(static_cast<int>(i), *code_placeholder);
- temp_instance.function_code[i] = code_placeholder;
- }
-
- isolate->counters()->wasm_functions_per_module()->AddSample(
- static_cast<int>(functions.size()));
- if (!FLAG_trace_wasm_decoder && FLAG_wasm_num_compilation_tasks != 0) {
- // Avoid a race condition by collecting results into a second vector.
- std::vector<Handle<Code>> results;
- results.reserve(temp_instance.function_code.size());
- for (size_t i = 0; i < temp_instance.function_code.size(); ++i) {
- results.push_back(temp_instance.function_code[i]);
- }
- CompileInParallel(isolate, &module_env, results, thrower);
-
- for (size_t i = 0; i < results.size(); ++i) {
- temp_instance.function_code[i] = results[i];
- }
- } else {
- CompileSequentially(isolate, &module_env, temp_instance.function_code,
- thrower);
- }
- if (thrower->error()) return nothing;
-
- // At this point, compilation has completed. Update the code table.
- for (size_t i = FLAG_skip_compiling_wasm_funcs;
- i < temp_instance.function_code.size(); ++i) {
- Code* code = *temp_instance.function_code[i];
- code_table->set(static_cast<int>(i), code);
- RecordStats(isolate, code);
- }
-
- // Create heap objects for script, module bytes and asm.js offset table to be
- // stored in the shared module data.
- Handle<Script> script;
- Handle<ByteArray> asm_js_offset_table;
- if (asm_js_script.is_null()) {
- script = CreateWasmScript(isolate, wire_bytes);
- } else {
- script = asm_js_script;
- asm_js_offset_table =
- isolate->factory()->NewByteArray(asm_js_offset_table_bytes.length());
- asm_js_offset_table->copy_in(0, asm_js_offset_table_bytes.start(),
- asm_js_offset_table_bytes.length());
- }
- // TODO(wasm): only save the sections necessary to deserialize a
- // {WasmModule}. E.g. function bodies could be omitted.
- Handle<String> module_bytes =
- factory->NewStringFromOneByte(wire_bytes.module_bytes, TENURED)
- .ToHandleChecked();
- DCHECK(module_bytes->IsSeqOneByteString());
-
- // Create the shared module data.
- // TODO(clemensh): For the same module (same bytes / same hash), we should
- // only have one WasmSharedModuleData. Otherwise, we might only set
- // breakpoints on a (potentially empty) subset of the instances.
-
- Handle<WasmSharedModuleData> shared = WasmSharedModuleData::New(
- isolate, module_wrapper, Handle<SeqOneByteString>::cast(module_bytes),
- script, asm_js_offset_table);
-
- // Create the compiled module object, and populate with compiled functions
- // and information needed at instantiation time. This object needs to be
- // serializable. Instantiation may occur off a deserialized version of this
- // object.
- Handle<WasmCompiledModule> ret = WasmCompiledModule::New(isolate, shared);
- ret->set_num_imported_functions(num_imported_functions);
- ret->set_code_table(code_table);
- ret->set_min_mem_pages(min_mem_pages);
- ret->set_max_mem_pages(max_mem_pages);
- if (function_table_count > 0) {
- ret->set_function_tables(function_tables);
- ret->set_signature_tables(signature_tables);
- ret->set_empty_function_tables(function_tables);
- }
-
- // If we created a wasm script, finish it now and make it public to the
- // debugger.
- if (asm_js_script.is_null()) {
- script->set_wasm_compiled_module(*ret);
- isolate->debug()->OnAfterCompile(script);
- }
-
- // Compile JS->WASM wrappers for exported functions.
- int func_index = 0;
- for (auto exp : export_table) {
- if (exp.kind != kExternalFunction) continue;
- Handle<Code> wasm_code =
- code_table->GetValueChecked<Code>(isolate, exp.index);
- Handle<Code> wrapper_code =
- compiler::CompileJSToWasmWrapper(isolate, this, wasm_code, exp.index);
- int export_index = static_cast<int>(functions.size() + func_index);
- code_table->set(export_index, *wrapper_code);
- RecordStats(isolate, *wrapper_code);
- func_index++;
- }
-
- return ret;
-}
-
static WasmFunction* GetWasmFunctionForImportWrapper(Isolate* isolate,
Handle<Object> target) {
if (target->IsJSFunction()) {
@@ -1135,28 +946,29 @@ void wasm::UpdateDispatchTables(Isolate* isolate,
// A helper class to simplify instantiating a module from a compiled module.
// It closes over the {Isolate}, the {ErrorThrower}, the {WasmCompiledModule},
// etc.
-class WasmInstanceBuilder {
+class InstantiationHelper {
public:
- WasmInstanceBuilder(Isolate* isolate, ErrorThrower* thrower,
+ InstantiationHelper(Isolate* isolate, ErrorThrower* thrower,
Handle<WasmModuleObject> module_object,
- Handle<JSReceiver> ffi, Handle<JSArrayBuffer> memory)
+ MaybeHandle<JSReceiver> ffi,
+ MaybeHandle<JSArrayBuffer> memory)
: isolate_(isolate),
module_(module_object->compiled_module()->module()),
thrower_(thrower),
module_object_(module_object),
- ffi_(ffi),
- memory_(memory) {}
+ ffi_(ffi.is_null() ? Handle<JSReceiver>::null()
+ : ffi.ToHandleChecked()),
+ memory_(memory.is_null() ? Handle<JSArrayBuffer>::null()
+ : memory.ToHandleChecked()) {}
// Build an instance, in all of its glory.
MaybeHandle<WasmInstanceObject> Build() {
- MaybeHandle<WasmInstanceObject> nothing;
-
// Check that an imports argument was provided, if the module requires it.
// No point in continuing otherwise.
if (!module_->import_table.empty() && ffi_.is_null()) {
thrower_->TypeError(
"Imports argument must be present and must be an object");
- return nothing;
+ return {};
}
HistogramTimerScope wasm_instantiate_module_time_scope(
@@ -1236,6 +1048,8 @@ class WasmInstanceBuilder {
//--------------------------------------------------------------------------
// Allocate the instance object.
//--------------------------------------------------------------------------
+ Zone instantiation_zone(isolate_->allocator(), ZONE_NAME);
+ CodeSpecialization code_specialization(isolate_, &instantiation_zone);
Handle<WasmInstanceObject> instance =
WasmInstanceObject::New(isolate_, compiled_module_);
@@ -1251,14 +1065,17 @@ class WasmInstanceBuilder {
globals_ = global_buffer;
if (globals_.is_null()) {
thrower_->RangeError("Out of memory: wasm globals");
- return nothing;
+ return {};
+ }
+ Address old_globals_start = nullptr;
+ if (!owner.is_null()) {
+ DCHECK(owner.ToHandleChecked()->has_globals_buffer());
+ old_globals_start = static_cast<Address>(
+ owner.ToHandleChecked()->globals_buffer()->backing_store());
}
- Address old_address =
- owner.is_null() ? nullptr : GetGlobalStartAddressFromCodeTemplate(
- isolate_->heap()->undefined_value(),
- *owner.ToHandleChecked());
- RelocateGlobals(code_table, old_address,
- static_cast<Address>(global_buffer->backing_store()));
+ Address new_globals_start =
+ static_cast<Address>(global_buffer->backing_store());
+ code_specialization.RelocateGlobals(old_globals_start, new_globals_start);
instance->set_globals_buffer(*global_buffer);
}
@@ -1278,7 +1095,7 @@ class WasmInstanceBuilder {
// Process the imports for the module.
//--------------------------------------------------------------------------
int num_imported_functions = ProcessImports(code_table, instance);
- if (num_imported_functions < 0) return nothing;
+ if (num_imported_functions < 0) return {};
//--------------------------------------------------------------------------
// Process the initialization for the module's globals.
@@ -1288,7 +1105,8 @@ class WasmInstanceBuilder {
//--------------------------------------------------------------------------
// Set up the indirect function tables for the new instance.
//--------------------------------------------------------------------------
- if (function_table_count > 0) InitializeTables(code_table, instance);
+ if (function_table_count > 0)
+ InitializeTables(code_table, instance, &code_specialization);
//--------------------------------------------------------------------------
// Set up the memory for the new instance.
@@ -1306,7 +1124,7 @@ class WasmInstanceBuilder {
memory_->has_guard_region());
} else if (min_mem_pages > 0) {
memory_ = AllocateMemory(min_mem_pages);
- if (memory_.is_null()) return nothing; // failed to allocate memory
+ if (memory_.is_null()) return {}; // failed to allocate memory
}
//--------------------------------------------------------------------------
@@ -1320,7 +1138,7 @@ class WasmInstanceBuilder {
if (!in_bounds(base, static_cast<uint32_t>(table_init.entries.size()),
table_size)) {
thrower_->LinkError("table initializer is out of bounds");
- return nothing;
+ return {};
}
}
@@ -1333,7 +1151,7 @@ class WasmInstanceBuilder {
? 0 : static_cast<uint32_t>(memory_->byte_length()->Number());
if (!in_bounds(base, seg.source_size, mem_size)) {
thrower_->LinkError("data segment is out of bounds");
- return nothing;
+ return {};
}
}
@@ -1345,7 +1163,7 @@ class WasmInstanceBuilder {
Address mem_start = static_cast<Address>(memory_->backing_store());
uint32_t mem_size =
static_cast<uint32_t>(memory_->byte_length()->Number());
- if (!LoadDataSegments(mem_start, mem_size)) return nothing;
+ LoadDataSegments(mem_start, mem_size);
uint32_t old_mem_size = compiled_module_->mem_size();
Address old_mem_start =
@@ -1353,9 +1171,12 @@ class WasmInstanceBuilder {
? static_cast<Address>(
compiled_module_->memory()->backing_store())
: nullptr;
- RelocateMemoryReferencesInCode(
- code_table, module_->num_imported_functions, old_mem_start, mem_start,
- old_mem_size, mem_size);
+ // We might get instantiated again with the same memory. No patching
+ // needed in this case.
+ if (old_mem_start != mem_start || old_mem_size != mem_size) {
+ code_specialization.RelocateMemoryReferences(
+ old_mem_start, old_mem_size, mem_start, mem_size);
+ }
compiled_module_->set_memory(memory_);
}
@@ -1394,16 +1215,18 @@ class WasmInstanceBuilder {
//--------------------------------------------------------------------------
if (function_table_count > 0) LoadTableSegments(code_table, instance);
- // Patch new call sites and the context.
- PatchDirectCallsAndContext(code_table, compiled_module_, module_,
- num_imported_functions);
+ // Patch all code with the relocations registered in code_specialization.
+ {
+ code_specialization.RelocateDirectCalls(instance);
+ code_specialization.ApplyToWholeInstance(*instance, SKIP_ICACHE_FLUSH);
+ }
FlushICache(isolate_, code_table);
//--------------------------------------------------------------------------
// Unpack and notify signal handler of protected instructions.
//--------------------------------------------------------------------------
- {
+ if (FLAG_wasm_trap_handler) {
for (int i = 0; i < code_table->length(); ++i) {
Handle<Code> code = code_table->GetValueChecked<Code>(isolate_, i);
@@ -1411,21 +1234,17 @@ class WasmInstanceBuilder {
continue;
}
- FixedArray* protected_instructions = code->protected_instructions();
- DCHECK(protected_instructions != nullptr);
+ const intptr_t base = reinterpret_cast<intptr_t>(code->entry());
+
Zone zone(isolate_->allocator(), "Wasm Module");
ZoneVector<trap_handler::ProtectedInstructionData> unpacked(&zone);
- for (int i = 0; i < protected_instructions->length();
- i += Code::kTrapDataSize) {
+ const int mode_mask =
+ RelocInfo::ModeMask(RelocInfo::WASM_PROTECTED_INSTRUCTION_LANDING);
+ for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
trap_handler::ProtectedInstructionData data;
- data.instr_offset =
- protected_instructions
- ->GetValueChecked<Smi>(isolate_, i + Code::kTrapCodeOffset)
- ->value();
+ data.instr_offset = it.rinfo()->data();
data.landing_offset =
- protected_instructions
- ->GetValueChecked<Smi>(isolate_, i + Code::kTrapLandingOffset)
- ->value();
+ reinterpret_cast<intptr_t>(it.rinfo()->pc()) - base;
unpacked.emplace_back(data);
}
// TODO(eholk): Register the protected instruction information once the
@@ -1469,6 +1288,13 @@ class WasmInstanceBuilder {
v8::WeakCallbackType::kFinalizer);
}
}
+
+ //--------------------------------------------------------------------------
+ // Set all breakpoints that were set on the shared module.
+ //--------------------------------------------------------------------------
+ WasmSharedModuleData::SetBreakpointsOnNewInstance(
+ compiled_module_->shared(), instance);
+
//--------------------------------------------------------------------------
// Run the start function if one was specified.
//--------------------------------------------------------------------------
@@ -1478,8 +1304,9 @@ class WasmInstanceBuilder {
Handle<Code> startup_code =
code_table->GetValueChecked<Code>(isolate_, start_index);
FunctionSig* sig = module_->functions[start_index].sig;
- Handle<Code> wrapper_code = compiler::CompileJSToWasmWrapper(
- isolate_, module_, startup_code, start_index);
+ Handle<Code> wrapper_code =
+ js_to_wasm_cache_.CloneOrCompileJSToWasmWrapper(
+ isolate_, module_, startup_code, start_index);
Handle<WasmExportedFunction> startup_fct = WasmExportedFunction::New(
isolate_, instance, MaybeHandle<String>(), start_index,
static_cast<int>(sig->parameter_count()), wrapper_code);
@@ -1496,7 +1323,7 @@ class WasmInstanceBuilder {
// chain. However, we need to set up everything before executing the
// start function, such that stack trace information can be generated
// correctly already in the start function.
- return nothing;
+ return {};
}
}
@@ -1519,12 +1346,13 @@ class WasmInstanceBuilder {
WasmModule* const module_;
ErrorThrower* thrower_;
Handle<WasmModuleObject> module_object_;
- Handle<JSReceiver> ffi_;
- Handle<JSArrayBuffer> memory_;
+ Handle<JSReceiver> ffi_; // TODO(titzer): Use MaybeHandle
+ Handle<JSArrayBuffer> memory_; // TODO(titzer): Use MaybeHandle
Handle<JSArrayBuffer> globals_;
Handle<WasmCompiledModule> compiled_module_;
std::vector<TableInstance> table_instances_;
std::vector<Handle<JSFunction>> js_wrappers_;
+ JSToWasmWrapperCache js_to_wasm_cache_;
// Helper routines to print out errors with imports.
void ReportLinkError(const char* error, uint32_t index,
@@ -1593,7 +1421,7 @@ class WasmInstanceBuilder {
}
// Load data segments into the memory.
- bool LoadDataSegments(Address mem_addr, size_t mem_size) {
+ void LoadDataSegments(Address mem_addr, size_t mem_size) {
Handle<SeqOneByteString> module_bytes(compiled_module_->module_bytes(),
isolate_);
for (const WasmDataSegment& segment : module_->data_segments) {
@@ -1608,7 +1436,6 @@ class WasmInstanceBuilder {
module_bytes->GetCharsAddress() + segment.source_offset);
memcpy(dest, src, source_size);
}
- return true;
}
void WriteGlobalValue(WasmGlobal& global, Handle<Object> value) {
@@ -1873,7 +1700,7 @@ class WasmInstanceBuilder {
// Allocate memory for a module instance as a new JSArrayBuffer.
Handle<JSArrayBuffer> AllocateMemory(uint32_t min_mem_pages) {
- if (min_mem_pages > kV8MaxWasmMemoryPages) {
+ if (min_mem_pages > FLAG_wasm_max_mem_pages) {
thrower_->RangeError("Out of memory: wasm memory too large");
return Handle<JSArrayBuffer>::null();
}
@@ -1909,20 +1736,30 @@ class WasmInstanceBuilder {
Handle<JSFunction>::null());
}
- Handle<JSObject> exports_object = instance;
+ Handle<JSObject> exports_object;
if (module_->origin == kWasmOrigin) {
// Create the "exports" object.
+ exports_object = isolate_->factory()->NewJSObjectWithNullProto();
+ } else if (module_->origin == kAsmJsOrigin) {
Handle<JSFunction> object_function = Handle<JSFunction>(
isolate_->native_context()->object_function(), isolate_);
- exports_object =
- isolate_->factory()->NewJSObject(object_function, TENURED);
- Handle<String> exports_name =
- isolate_->factory()->InternalizeUtf8String("exports");
- JSObject::AddProperty(instance, exports_name, exports_object, NONE);
+ exports_object = isolate_->factory()->NewJSObject(object_function);
+ } else {
+ UNREACHABLE();
}
+ Handle<String> exports_name =
+ isolate_->factory()->InternalizeUtf8String("exports");
+ JSObject::AddProperty(instance, exports_name, exports_object, NONE);
+
+ Handle<String> foreign_init_name =
+ isolate_->factory()->InternalizeUtf8String(
+ wasm::AsmWasmBuilder::foreign_init_name);
+ Handle<String> single_function_name =
+ isolate_->factory()->InternalizeUtf8String(
+ wasm::AsmWasmBuilder::single_function_name);
PropertyDescriptor desc;
- desc.set_writable(false);
+ desc.set_writable(module_->origin == kAsmJsOrigin);
desc.set_enumerable(true);
// Count up export indexes.
@@ -1951,6 +1788,15 @@ class WasmInstanceBuilder {
WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
isolate_, compiled_module_, exp.name_offset, exp.name_length)
.ToHandleChecked();
+ Handle<JSObject> export_to;
+ if (module_->origin == kAsmJsOrigin && exp.kind == kExternalFunction &&
+ (String::Equals(name, foreign_init_name) ||
+ String::Equals(name, single_function_name))) {
+ export_to = instance;
+ } else {
+ export_to = exports_object;
+ }
+
switch (exp.kind) {
case kExternalFunction: {
// Wrap and export the code as a JSFunction.
@@ -1989,7 +1835,7 @@ class WasmInstanceBuilder {
module_->function_tables[exp.index];
if (table_instance.table_object.is_null()) {
uint32_t maximum =
- table.has_max ? table.max_size : kV8MaxWasmTableSize;
+ table.has_max ? table.max_size : FLAG_wasm_max_table_size;
table_instance.table_object = WasmTableObject::New(
isolate_, table.min_size, maximum, &table_instance.js_wrappers);
}
@@ -2047,14 +1893,13 @@ class WasmInstanceBuilder {
// Skip duplicates for asm.js.
if (module_->origin == kAsmJsOrigin) {
- v8::Maybe<bool> status =
- JSReceiver::HasOwnProperty(exports_object, name);
+ v8::Maybe<bool> status = JSReceiver::HasOwnProperty(export_to, name);
if (status.FromMaybe(false)) {
continue;
}
}
v8::Maybe<bool> status = JSReceiver::DefineOwnProperty(
- isolate_, exports_object, name, &desc, Object::THROW_ON_ERROR);
+ isolate_, export_to, name, &desc, Object::THROW_ON_ERROR);
if (!status.IsJust()) {
thrower_->LinkError("export of %.*s failed.", name->length(),
name->ToCString().get());
@@ -2071,7 +1916,8 @@ class WasmInstanceBuilder {
}
void InitializeTables(Handle<FixedArray> code_table,
- Handle<WasmInstanceObject> instance) {
+ Handle<WasmInstanceObject> instance,
+ CodeSpecialization* code_specialization) {
int function_table_count =
static_cast<int>(module_->function_tables.size());
Handle<FixedArray> new_function_tables =
@@ -2099,8 +1945,8 @@ class WasmInstanceBuilder {
// Table is imported, patch table bounds check
DCHECK(table_size <= table_instance.function_table->length());
if (table_size < table_instance.function_table->length()) {
- RelocateTableSizeReferences(code_table, table_size,
- table_instance.function_table->length());
+ code_specialization->PatchTableSize(
+ table_size, table_instance.function_table->length());
}
}
@@ -2110,23 +1956,23 @@ class WasmInstanceBuilder {
*table_instance.signature_table);
}
- // Patch all code that has references to the old indirect tables.
- Handle<FixedArray> old_function_tables =
- compiled_module_->function_tables();
- Handle<FixedArray> old_signature_tables =
- compiled_module_->signature_tables();
- for (int i = 0; i < code_table->length(); ++i) {
- if (!code_table->get(i)->IsCode()) continue;
- Handle<Code> code(Code::cast(code_table->get(i)), isolate_);
- for (int j = 0; j < function_table_count; ++j) {
- ReplaceReferenceInCode(
- code, Handle<Object>(old_function_tables->get(j), isolate_),
- Handle<Object>(new_function_tables->get(j), isolate_));
- ReplaceReferenceInCode(
- code, Handle<Object>(old_signature_tables->get(j), isolate_),
- Handle<Object>(new_signature_tables->get(j), isolate_));
- }
+ FixedArray* old_function_tables =
+ compiled_module_->ptr_to_function_tables();
+ DCHECK_EQ(old_function_tables->length(), new_function_tables->length());
+ for (int i = 0, e = new_function_tables->length(); i < e; ++i) {
+ code_specialization->RelocateObject(
+ handle(old_function_tables->get(i), isolate_),
+ handle(new_function_tables->get(i), isolate_));
}
+ FixedArray* old_signature_tables =
+ compiled_module_->ptr_to_signature_tables();
+ DCHECK_EQ(old_signature_tables->length(), new_signature_tables->length());
+ for (int i = 0, e = new_signature_tables->length(); i < e; ++i) {
+ code_specialization->RelocateObject(
+ handle(old_signature_tables->get(i), isolate_),
+ handle(new_signature_tables->get(i), isolate_));
+ }
+
compiled_module_->set_function_tables(new_function_tables);
compiled_module_->set_signature_tables(new_signature_tables);
}
@@ -2173,14 +2019,10 @@ class WasmInstanceBuilder {
// TODO(titzer): We compile JS->WASM wrappers for functions are
// not exported but are in an exported table. This should be done
// at module compile time and cached instead.
- WasmInstance temp_instance(module_);
- temp_instance.context = isolate_->native_context();
- temp_instance.mem_size = 0;
- temp_instance.mem_start = nullptr;
- temp_instance.globals_start = nullptr;
-
- Handle<Code> wrapper_code = compiler::CompileJSToWasmWrapper(
- isolate_, module_, wasm_code, func_index);
+
+ Handle<Code> wrapper_code =
+ js_to_wasm_cache_.CloneOrCompileJSToWasmWrapper(
+ isolate_, module_, wasm_code, func_index);
MaybeHandle<String> func_name;
if (module_->origin == kAsmJsOrigin) {
// For modules arising from asm.js, honor the names section.
@@ -2219,16 +2061,6 @@ class WasmInstanceBuilder {
}
};
-// Instantiates a WASM module, creating a WebAssembly.Instance from a
-// WebAssembly.Module.
-MaybeHandle<WasmInstanceObject> WasmModule::Instantiate(
- Isolate* isolate, ErrorThrower* thrower,
- Handle<WasmModuleObject> wasm_module, Handle<JSReceiver> ffi,
- Handle<JSArrayBuffer> memory) {
- WasmInstanceBuilder builder(isolate, thrower, wasm_module, ffi, memory);
- return builder.Build();
-}
-
bool wasm::IsWasmInstance(Object* object) {
return WasmInstanceObject::IsWasmInstanceObject(object);
}
@@ -2244,57 +2076,6 @@ bool wasm::IsWasmCodegenAllowed(Isolate* isolate, Handle<Context> context) {
isolate->allow_code_gen_callback()(v8::Utils::ToLocal(context));
}
-// TODO(clemensh): origin can be inferred from asm_js_script; remove it.
-MaybeHandle<WasmModuleObject> wasm::CreateModuleObjectFromBytes(
- Isolate* isolate, const byte* start, const byte* end, ErrorThrower* thrower,
- ModuleOrigin origin, Handle<Script> asm_js_script,
- Vector<const byte> asm_js_offset_table_bytes) {
- MaybeHandle<WasmModuleObject> nothing;
-
- if (origin != kAsmJsOrigin &&
- !IsWasmCodegenAllowed(isolate, isolate->native_context())) {
- thrower->CompileError("Wasm code generation disallowed in this context");
- return nothing;
- }
-
- ModuleResult result = DecodeWasmModule(isolate, start, end, false, origin);
- if (result.failed()) {
- if (result.val) delete result.val;
- thrower->CompileFailed("Wasm decoding failed", result);
- return nothing;
- }
-
- // The {module_wrapper} will take ownership of the {WasmModule} object,
- // and it will be destroyed when the GC reclaims the wrapper object.
- Handle<WasmModuleWrapper> module_wrapper =
- WasmModuleWrapper::New(isolate, const_cast<WasmModule*>(result.val));
-
- // Compile the functions of the module, producing a compiled module.
- MaybeHandle<WasmCompiledModule> maybe_compiled_module =
- result.val->CompileFunctions(isolate, module_wrapper, thrower,
- ModuleWireBytes(start, end), asm_js_script,
- asm_js_offset_table_bytes);
-
- if (maybe_compiled_module.is_null()) return nothing;
-
- Handle<WasmCompiledModule> compiled_module =
- maybe_compiled_module.ToHandleChecked();
-
- return WasmModuleObject::New(isolate, compiled_module);
-}
-
-bool wasm::ValidateModuleBytes(Isolate* isolate, const byte* start,
- const byte* end, ErrorThrower* thrower,
- ModuleOrigin origin) {
- ModuleResult result = DecodeWasmModule(isolate, start, end, true, origin);
- if (result.val) {
- delete result.val;
- } else {
- DCHECK(!result.ok());
- }
- return result.ok();
-}
-
MaybeHandle<JSArrayBuffer> wasm::GetInstanceMemory(
Isolate* isolate, Handle<WasmInstanceObject> object) {
auto instance = Handle<WasmInstanceObject>::cast(object);
@@ -2330,14 +2111,14 @@ uint32_t GetMaxInstanceMemoryPages(Isolate* isolate,
Handle<WasmMemoryObject> memory_object(instance->memory_object(), isolate);
if (memory_object->has_maximum_pages()) {
uint32_t maximum = static_cast<uint32_t>(memory_object->maximum_pages());
- if (maximum < kV8MaxWasmMemoryPages) return maximum;
+ if (maximum < FLAG_wasm_max_mem_pages) return maximum;
}
}
uint32_t compiled_max_pages = instance->compiled_module()->max_mem_pages();
isolate->counters()->wasm_max_mem_pages_count()->AddSample(
compiled_max_pages);
if (compiled_max_pages != 0) return compiled_max_pages;
- return kV8MaxWasmMemoryPages;
+ return FLAG_wasm_max_mem_pages;
}
Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
@@ -2346,7 +2127,8 @@ Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
Handle<JSArrayBuffer> old_buffer;
Address old_mem_start = nullptr;
uint32_t old_size = 0;
- if (buffer.ToHandle(&old_buffer) && old_buffer->backing_store() != nullptr) {
+ if (buffer.ToHandle(&old_buffer) && old_buffer->backing_store() != nullptr &&
+ old_buffer->byte_length()->IsNumber()) {
old_mem_start = static_cast<Address>(old_buffer->backing_store());
DCHECK_NOT_NULL(old_mem_start);
old_size = old_buffer->byte_length()->Number();
@@ -2355,29 +2137,20 @@ Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
std::numeric_limits<uint32_t>::max());
uint32_t new_size = old_size + pages * WasmModule::kPageSize;
if (new_size <= old_size || max_pages * WasmModule::kPageSize < new_size ||
- kV8MaxWasmMemoryPages * WasmModule::kPageSize < new_size) {
+ FLAG_wasm_max_mem_pages * WasmModule::kPageSize < new_size) {
return Handle<JSArrayBuffer>::null();
}
- Handle<JSArrayBuffer> new_buffer;
- if (!old_buffer.is_null() && old_buffer->has_guard_region()) {
- // We don't move the backing store, we simply change the protection to make
- // more of it accessible.
- base::OS::Unprotect(old_buffer->backing_store(), new_size);
- reinterpret_cast<v8::Isolate*>(isolate)
- ->AdjustAmountOfExternalAllocatedMemory(pages * WasmModule::kPageSize);
- Handle<Object> new_size_object =
- isolate->factory()->NewNumberFromSize(new_size);
- old_buffer->set_byte_length(*new_size_object);
- new_buffer = old_buffer;
- } else {
- const bool enable_guard_regions = false;
- new_buffer = NewArrayBuffer(isolate, new_size, enable_guard_regions);
- if (new_buffer.is_null()) return new_buffer;
- Address new_mem_start = static_cast<Address>(new_buffer->backing_store());
- if (old_size != 0) {
- memcpy(new_mem_start, old_mem_start, old_size);
- }
+ // TODO(gdeepti): Change the protection here instead of allocating a new
+ // buffer before guard regions are turned on, see issue #5886.
+ const bool enable_guard_regions =
+ !old_buffer.is_null() && old_buffer->has_guard_region();
+ Handle<JSArrayBuffer> new_buffer =
+ NewArrayBuffer(isolate, new_size, enable_guard_regions);
+ if (new_buffer.is_null()) return new_buffer;
+ Address new_mem_start = static_cast<Address>(new_buffer->backing_store());
+ if (old_size != 0) {
+ memcpy(new_mem_start, old_mem_start, old_size);
}
return new_buffer;
}
@@ -2386,15 +2159,42 @@ void UncheckedUpdateInstanceMemory(Isolate* isolate,
Handle<WasmInstanceObject> instance,
Address old_mem_start, uint32_t old_size) {
DCHECK(instance->has_memory_buffer());
- Handle<JSArrayBuffer> new_buffer(instance->memory_buffer());
- uint32_t new_size = new_buffer->byte_length()->Number();
- DCHECK(new_size <= std::numeric_limits<uint32_t>::max());
- Address new_mem_start = static_cast<Address>(new_buffer->backing_store());
+ Handle<JSArrayBuffer> mem_buffer(instance->memory_buffer());
+ uint32_t new_size = mem_buffer->byte_length()->Number();
+ Address new_mem_start = static_cast<Address>(mem_buffer->backing_store());
DCHECK_NOT_NULL(new_mem_start);
- Handle<FixedArray> code_table = instance->compiled_module()->code_table();
- RelocateMemoryReferencesInCode(
- code_table, instance->compiled_module()->module()->num_imported_functions,
- old_mem_start, new_mem_start, old_size, new_size);
+ Zone specialization_zone(isolate->allocator(), ZONE_NAME);
+ CodeSpecialization code_specialization(isolate, &specialization_zone);
+ code_specialization.RelocateMemoryReferences(old_mem_start, old_size,
+ new_mem_start, new_size);
+ code_specialization.ApplyToWholeInstance(*instance);
+}
+
+void wasm::DetachWebAssemblyMemoryBuffer(Isolate* isolate,
+ Handle<JSArrayBuffer> buffer) {
+ int64_t byte_length =
+ buffer->byte_length()->IsNumber()
+ ? static_cast<uint32_t>(buffer->byte_length()->Number())
+ : 0;
+ if (buffer.is_null() || byte_length == 0) return;
+ const bool has_guard_regions = buffer->has_guard_region();
+ const bool is_external = buffer->is_external();
+ void* backing_store = buffer->backing_store();
+ DCHECK(!buffer->is_neuterable());
+ if (!has_guard_regions && !is_external) {
+ buffer->set_is_external(true);
+ isolate->heap()->UnregisterArrayBuffer(*buffer);
+ }
+ buffer->set_is_neuterable(true);
+ buffer->Neuter();
+ if (has_guard_regions) {
+ base::OS::Free(backing_store, RoundUp(i::wasm::kWasmMaxHeapOffset,
+ base::OS::CommitPageSize()));
+ reinterpret_cast<v8::Isolate*>(isolate)
+ ->AdjustAmountOfExternalAllocatedMemory(-byte_length);
+ } else if (!has_guard_regions && !is_external) {
+ isolate->array_buffer_allocator()->Free(backing_store, byte_length);
+ }
}
int32_t wasm::GrowWebAssemblyMemory(Isolate* isolate,
@@ -2407,25 +2207,41 @@ int32_t wasm::GrowWebAssemblyMemory(Isolate* isolate,
Handle<JSArrayBuffer> old_buffer;
uint32_t old_size = 0;
Address old_mem_start = nullptr;
+ // Force byte_length to 0, if byte_length fails IsNumber() check.
if (memory_buffer.ToHandle(&old_buffer) &&
- old_buffer->backing_store() != nullptr) {
+ old_buffer->backing_store() != nullptr &&
+ old_buffer->byte_length()->IsNumber()) {
old_size = old_buffer->byte_length()->Number();
old_mem_start = static_cast<Address>(old_buffer->backing_store());
}
+ Handle<JSArrayBuffer> new_buffer;
// Return current size if grow by 0
if (pages == 0) {
+ if (!old_buffer.is_null() && old_buffer->backing_store() != nullptr) {
+ new_buffer = SetupArrayBuffer(isolate, old_buffer->backing_store(),
+ old_size, old_buffer->is_external(),
+ old_buffer->has_guard_region());
+ memory_object->set_buffer(*new_buffer);
+ old_buffer->set_is_neuterable(true);
+ if (!old_buffer->has_guard_region()) {
+ old_buffer->set_is_external(true);
+ isolate->heap()->UnregisterArrayBuffer(*old_buffer);
+ }
+ // Neuter but don't free the memory because it is now being used by
+ // new_buffer.
+ old_buffer->Neuter();
+ }
DCHECK(old_size % WasmModule::kPageSize == 0);
return (old_size / WasmModule::kPageSize);
}
- Handle<JSArrayBuffer> new_buffer;
if (!memory_object->has_instances_link()) {
// Memory object does not have an instance associated with it, just grow
uint32_t max_pages;
if (memory_object->has_maximum_pages()) {
max_pages = static_cast<uint32_t>(memory_object->maximum_pages());
- if (kV8MaxWasmMemoryPages < max_pages) return -1;
+ if (FLAG_wasm_max_mem_pages < max_pages) return -1;
} else {
- max_pages = kV8MaxWasmMemoryPages;
+ max_pages = FLAG_wasm_max_mem_pages;
}
new_buffer = GrowMemoryBuffer(isolate, memory_buffer, pages, max_pages);
if (new_buffer.is_null()) return -1;
@@ -2493,6 +2309,8 @@ void wasm::GrowDispatchTables(Isolate* isolate,
Handle<FixedArray> dispatch_tables,
uint32_t old_size, uint32_t count) {
DCHECK_EQ(0, dispatch_tables->length() % 4);
+
+ Zone specialization_zone(isolate->allocator(), ZONE_NAME);
for (int i = 0; i < dispatch_tables->length(); i += 4) {
Handle<FixedArray> old_function_table(
FixedArray::cast(dispatch_tables->get(i + 2)));
@@ -2503,25 +2321,18 @@ void wasm::GrowDispatchTables(Isolate* isolate,
Handle<FixedArray> new_signature_table =
isolate->factory()->CopyFixedArrayAndGrow(old_signature_table, count);
- // Get code table for the instance
- Handle<WasmInstanceObject> instance(
- WasmInstanceObject::cast(dispatch_tables->get(i)));
- Handle<FixedArray> code_table(instance->compiled_module()->code_table());
-
- // Relocate size references
- RelocateTableSizeReferences(code_table, old_size, old_size + count);
-
- // Replace references of old tables with new tables.
- for (int j = 0; j < code_table->length(); ++j) {
- if (!code_table->get(j)->IsCode()) continue;
- Handle<Code> code = Handle<Code>(Code::cast(code_table->get(j)));
- ReplaceReferenceInCode(code, old_function_table, new_function_table);
- ReplaceReferenceInCode(code, old_signature_table, new_signature_table);
- }
-
// Update dispatch tables with new function/signature tables
dispatch_tables->set(i + 2, *new_function_table);
dispatch_tables->set(i + 3, *new_signature_table);
+
+ // Patch the code of the respective instance.
+ CodeSpecialization code_specialization(isolate, &specialization_zone);
+ code_specialization.PatchTableSize(old_size, old_size + count);
+ code_specialization.RelocateObject(old_function_table, new_function_table);
+ code_specialization.RelocateObject(old_signature_table,
+ new_signature_table);
+ code_specialization.ApplyToWholeInstance(
+ WasmInstanceObject::cast(dispatch_tables->get(i)));
}
}
@@ -2771,3 +2582,140 @@ Handle<JSArray> wasm::GetCustomSections(Isolate* isolate,
return array_object;
}
+
+bool wasm::SyncValidate(Isolate* isolate, ErrorThrower* thrower,
+ const ModuleWireBytes& bytes) {
+ if (bytes.start() == nullptr || bytes.length() == 0) return false;
+ ModuleResult result =
+ DecodeWasmModule(isolate, bytes.start(), bytes.end(), true, kWasmOrigin);
+ if (result.val) delete result.val;
+ return result.ok();
+}
+
+MaybeHandle<WasmModuleObject> wasm::SyncCompileTranslatedAsmJs(
+ Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
+ Handle<Script> asm_js_script,
+ Vector<const byte> asm_js_offset_table_bytes) {
+
+ ModuleResult result = DecodeWasmModule(isolate, bytes.start(), bytes.end(),
+ false, kAsmJsOrigin);
+ if (result.failed()) {
+ // TODO(titzer): use Result<std::unique_ptr<const WasmModule*>>?
+ if (result.val) delete result.val;
+ thrower->CompileFailed("Wasm decoding failed", result);
+ return {};
+ }
+
+ CompilationHelper helper(isolate, const_cast<WasmModule*>(result.val));
+ return helper.CompileToModuleObject(thrower, bytes, asm_js_script,
+ asm_js_offset_table_bytes);
+}
+
+MaybeHandle<WasmModuleObject> wasm::SyncCompile(Isolate* isolate,
+ ErrorThrower* thrower,
+ const ModuleWireBytes& bytes) {
+ if (!IsWasmCodegenAllowed(isolate, isolate->native_context())) {
+ thrower->CompileError("Wasm code generation disallowed in this context");
+ return {};
+ }
+
+ ModuleResult result =
+ DecodeWasmModule(isolate, bytes.start(), bytes.end(), false, kWasmOrigin);
+ if (result.failed()) {
+ if (result.val) delete result.val;
+ thrower->CompileFailed("Wasm decoding failed", result);
+ return {};
+ }
+
+ CompilationHelper helper(isolate, const_cast<WasmModule*>(result.val));
+ return helper.CompileToModuleObject(thrower, bytes, Handle<Script>(),
+ Vector<const byte>());
+}
+
+MaybeHandle<WasmInstanceObject> wasm::SyncInstantiate(
+ Isolate* isolate, ErrorThrower* thrower,
+ Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
+ MaybeHandle<JSArrayBuffer> memory) {
+ InstantiationHelper helper(isolate, thrower, module_object, imports, memory);
+ return helper.Build();
+}
+
+void RejectPromise(Isolate* isolate, ErrorThrower* thrower,
+ Handle<JSPromise> promise) {
+ v8::Local<v8::Promise::Resolver> resolver =
+ v8::Utils::PromiseToLocal(promise).As<v8::Promise::Resolver>();
+ Handle<Context> context(isolate->context(), isolate);
+ resolver->Reject(v8::Utils::ToLocal(context),
+ v8::Utils::ToLocal(thrower->Reify()));
+}
+
+void ResolvePromise(Isolate* isolate, Handle<JSPromise> promise,
+ Handle<Object> result) {
+ v8::Local<v8::Promise::Resolver> resolver =
+ v8::Utils::PromiseToLocal(promise).As<v8::Promise::Resolver>();
+ Handle<Context> context(isolate->context(), isolate);
+ resolver->Resolve(v8::Utils::ToLocal(context), v8::Utils::ToLocal(result));
+}
+
+void wasm::AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
+ const ModuleWireBytes& bytes) {
+ ErrorThrower thrower(isolate, nullptr);
+ MaybeHandle<WasmModuleObject> module_object =
+ SyncCompile(isolate, &thrower, bytes);
+ if (thrower.error()) {
+ RejectPromise(isolate, &thrower, promise);
+ return;
+ }
+ ResolvePromise(isolate, promise, module_object.ToHandleChecked());
+}
+
+void wasm::AsyncInstantiate(Isolate* isolate, Handle<JSPromise> promise,
+ Handle<WasmModuleObject> module_object,
+ MaybeHandle<JSReceiver> imports) {
+ ErrorThrower thrower(isolate, nullptr);
+ MaybeHandle<WasmInstanceObject> instance_object = SyncInstantiate(
+ isolate, &thrower, module_object, imports, Handle<JSArrayBuffer>::null());
+ if (thrower.error()) {
+ RejectPromise(isolate, &thrower, promise);
+ return;
+ }
+ ResolvePromise(isolate, promise, instance_object.ToHandleChecked());
+}
+
+void wasm::AsyncCompileAndInstantiate(Isolate* isolate,
+ Handle<JSPromise> promise,
+ const ModuleWireBytes& bytes,
+ MaybeHandle<JSReceiver> imports) {
+ ErrorThrower thrower(isolate, nullptr);
+
+ // Compile the module.
+ MaybeHandle<WasmModuleObject> module_object =
+ SyncCompile(isolate, &thrower, bytes);
+ if (thrower.error()) {
+ RejectPromise(isolate, &thrower, promise);
+ return;
+ }
+ Handle<WasmModuleObject> module = module_object.ToHandleChecked();
+
+ // Instantiate the module.
+ MaybeHandle<WasmInstanceObject> instance_object = SyncInstantiate(
+ isolate, &thrower, module, imports, Handle<JSArrayBuffer>::null());
+ if (thrower.error()) {
+ RejectPromise(isolate, &thrower, promise);
+ return;
+ }
+
+ Handle<JSFunction> object_function =
+ Handle<JSFunction>(isolate->native_context()->object_function(), isolate);
+ Handle<JSObject> ret =
+ isolate->factory()->NewJSObject(object_function, TENURED);
+ Handle<String> module_property_name =
+ isolate->factory()->InternalizeUtf8String("module");
+ Handle<String> instance_property_name =
+ isolate->factory()->InternalizeUtf8String("instance");
+ JSObject::AddProperty(ret, module_property_name, module, NONE);
+ JSObject::AddProperty(ret, instance_property_name,
+ instance_object.ToHandleChecked(), NONE);
+
+ ResolvePromise(isolate, promise, ret);
+}
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index 2f368a7391..1aaf9a4e96 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -11,9 +11,9 @@
#include "src/debug/debug-interface.h"
#include "src/globals.h"
#include "src/handles.h"
+#include "src/managed.h"
#include "src/parsing/preparse-data.h"
-#include "src/wasm/managed.h"
#include "src/wasm/signature-map.h"
#include "src/wasm/wasm-opcodes.h"
@@ -28,44 +28,11 @@ class WasmMemoryObject;
namespace compiler {
class CallDescriptor;
-class WasmCompilationUnit;
}
namespace wasm {
class ErrorThrower;
-const uint32_t kWasmMagic = 0x6d736100;
-const uint32_t kWasmVersion = 0x01;
-
-const uint8_t kWasmFunctionTypeForm = 0x60;
-const uint8_t kWasmAnyFunctionTypeForm = 0x70;
-
-enum WasmSectionCode {
- kUnknownSectionCode = 0, // code for unknown sections
- kTypeSectionCode = 1, // Function signature declarations
- kImportSectionCode = 2, // Import declarations
- kFunctionSectionCode = 3, // Function declarations
- kTableSectionCode = 4, // Indirect function table and other tables
- kMemorySectionCode = 5, // Memory attributes
- kGlobalSectionCode = 6, // Global declarations
- kExportSectionCode = 7, // Exports
- kStartSectionCode = 8, // Start function declaration
- kElementSectionCode = 9, // Elements section
- kCodeSectionCode = 10, // Function code
- kDataSectionCode = 11, // Data segments
- kNameSectionCode = 12, // Name section (encoded as a string)
-};
-
-inline bool IsValidSectionCode(uint8_t byte) {
- return kTypeSectionCode <= byte && byte <= kDataSectionCode;
-}
-
-const char* SectionName(WasmSectionCode code);
-
-// Constants for fixed-size elements within a module.
-static const uint8_t kResizableMaximumFlag = 1;
-static const int32_t kInvalidFunctionIndex = -1;
-
enum WasmExternalKind {
kExternalFunction = 0,
kExternalTable = 1,
@@ -215,18 +182,6 @@ struct V8_EXPORT_PRIVATE WasmModule {
~WasmModule() {
if (owned_zone) delete owned_zone;
}
-
- // Creates a new instantiation of the module in the given isolate.
- static MaybeHandle<WasmInstanceObject> Instantiate(
- Isolate* isolate, ErrorThrower* thrower,
- Handle<WasmModuleObject> wasm_module, Handle<JSReceiver> ffi,
- Handle<JSArrayBuffer> memory = Handle<JSArrayBuffer>::null());
-
- MaybeHandle<WasmCompiledModule> CompileFunctions(
- Isolate* isolate, Handle<Managed<WasmModule>> module_wrapper,
- ErrorThrower* thrower, const ModuleWireBytes& wire_bytes,
- Handle<Script> asm_js_script,
- Vector<const byte> asm_js_offset_table_bytes) const;
};
typedef Managed<WasmModule> WasmModuleWrapper;
@@ -259,21 +214,19 @@ struct WasmInstance {
// this struct is alive.
struct V8_EXPORT_PRIVATE ModuleWireBytes {
ModuleWireBytes(Vector<const byte> module_bytes)
- : module_bytes(module_bytes) {}
+ : module_bytes_(module_bytes) {}
ModuleWireBytes(const byte* start, const byte* end)
- : module_bytes(start, static_cast<int>(end - start)) {
+ : module_bytes_(start, static_cast<int>(end - start)) {
DCHECK_GE(kMaxInt, end - start);
}
- const Vector<const byte> module_bytes;
-
// Get a string stored in the module bytes representing a name.
WasmName GetName(uint32_t offset, uint32_t length) const {
if (length == 0) return {"<?>", 3}; // no name.
CHECK(BoundsCheck(offset, length));
DCHECK_GE(length, 0);
return Vector<const char>::cast(
- module_bytes.SubVector(offset, offset + length));
+ module_bytes_.SubVector(offset, offset + length));
}
// Get a string stored in the module bytes representing a function name.
@@ -287,7 +240,7 @@ struct V8_EXPORT_PRIVATE ModuleWireBytes {
CHECK(BoundsCheck(offset, length));
DCHECK_GE(length, 0);
return Vector<const char>::cast(
- module_bytes.SubVector(offset, offset + length));
+ module_bytes_.SubVector(offset, offset + length));
}
// Get a string stored in the module bytes representing a function name.
@@ -297,9 +250,21 @@ struct V8_EXPORT_PRIVATE ModuleWireBytes {
// Checks the given offset range is contained within the module bytes.
bool BoundsCheck(uint32_t offset, uint32_t length) const {
- uint32_t size = static_cast<uint32_t>(module_bytes.length());
+ uint32_t size = static_cast<uint32_t>(module_bytes_.length());
return offset <= size && length <= size - offset;
}
+
+ Vector<const byte> GetFunctionBytes(const WasmFunction* function) const {
+ return module_bytes_.SubVector(function->code_start_offset,
+ function->code_end_offset);
+ }
+
+ const byte* start() const { return module_bytes_.start(); }
+ const byte* end() const { return module_bytes_.end(); }
+ int length() const { return module_bytes_.length(); }
+
+ private:
+ const Vector<const byte> module_bytes_;
};
// Interface provided to the decoder/graph builder which contains only
@@ -347,6 +312,7 @@ struct V8_EXPORT_PRIVATE ModuleEnv {
return instance->function_code[index];
}
+ // TODO(titzer): move these into src/compiler/wasm-compiler.cc
static compiler::CallDescriptor* GetWasmCallDescriptor(Zone* zone,
FunctionSig* sig);
static compiler::CallDescriptor* GetI32WasmCallDescriptor(
@@ -356,19 +322,22 @@ struct V8_EXPORT_PRIVATE ModuleEnv {
};
// A ModuleEnv together with ModuleWireBytes.
-struct ModuleBytesEnv : public ModuleEnv, public ModuleWireBytes {
+struct ModuleBytesEnv {
ModuleBytesEnv(const WasmModule* module, WasmInstance* instance,
Vector<const byte> module_bytes)
- : ModuleEnv(module, instance), ModuleWireBytes(module_bytes) {}
+ : module_env(module, instance), wire_bytes(module_bytes) {}
ModuleBytesEnv(const WasmModule* module, WasmInstance* instance,
const ModuleWireBytes& wire_bytes)
- : ModuleEnv(module, instance), ModuleWireBytes(wire_bytes) {}
+ : module_env(module, instance), wire_bytes(wire_bytes) {}
+
+ ModuleEnv module_env;
+ ModuleWireBytes wire_bytes;
};
// A helper for printing out the names of functions.
struct WasmFunctionName {
- WasmFunctionName(const WasmFunction* function, ModuleBytesEnv* module_env)
- : function_(function), name_(module_env->GetNameOrNull(function)) {}
+ WasmFunctionName(const WasmFunction* function, WasmName name)
+ : function_(function), name_(name) {}
const WasmFunction* function_;
WasmName name_;
@@ -410,11 +379,6 @@ V8_EXPORT_PRIVATE Handle<JSArray> GetCustomSections(
Isolate* isolate, Handle<WasmModuleObject> module, Handle<String> name,
ErrorThrower* thrower);
-V8_EXPORT_PRIVATE bool ValidateModuleBytes(Isolate* isolate, const byte* start,
- const byte* end,
- ErrorThrower* thrower,
- ModuleOrigin origin);
-
// Get the offset of the code of a function within a module.
int GetFunctionCodeOffset(Handle<WasmCompiledModule> compiled_module,
int func_index);
@@ -443,21 +407,52 @@ int32_t GrowWebAssemblyMemory(Isolate* isolate,
int32_t GrowMemory(Isolate* isolate, Handle<WasmInstanceObject> instance,
uint32_t pages);
+void DetachWebAssemblyMemoryBuffer(Isolate* isolate,
+ Handle<JSArrayBuffer> buffer);
+
void UpdateDispatchTables(Isolate* isolate, Handle<FixedArray> dispatch_tables,
int index, Handle<JSFunction> js_function);
void GrowDispatchTables(Isolate* isolate, Handle<FixedArray> dispatch_tables,
uint32_t old_size, uint32_t count);
-namespace testing {
+//============================================================================
+//== Compilation and instantiation ===========================================
+//============================================================================
+V8_EXPORT_PRIVATE bool SyncValidate(Isolate* isolate, ErrorThrower* thrower,
+ const ModuleWireBytes& bytes);
+
+V8_EXPORT_PRIVATE MaybeHandle<WasmModuleObject> SyncCompileTranslatedAsmJs(
+ Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
+ Handle<Script> asm_js_script, Vector<const byte> asm_js_offset_table_bytes);
+
+V8_EXPORT_PRIVATE MaybeHandle<WasmModuleObject> SyncCompile(
+ Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes);
+V8_EXPORT_PRIVATE MaybeHandle<WasmInstanceObject> SyncInstantiate(
+ Isolate* isolate, ErrorThrower* thrower,
+ Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
+ MaybeHandle<JSArrayBuffer> memory);
+
+V8_EXPORT_PRIVATE void AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
+ const ModuleWireBytes& bytes);
+
+V8_EXPORT_PRIVATE void AsyncInstantiate(Isolate* isolate,
+ Handle<JSPromise> promise,
+ Handle<WasmModuleObject> module_object,
+ MaybeHandle<JSReceiver> imports);
+
+V8_EXPORT_PRIVATE void AsyncCompileAndInstantiate(
+ Isolate* isolate, Handle<JSPromise> promise, const ModuleWireBytes& bytes,
+ MaybeHandle<JSReceiver> imports);
+
+namespace testing {
void ValidateInstancesChain(Isolate* isolate,
Handle<WasmModuleObject> module_obj,
int instance_count);
void ValidateModuleState(Isolate* isolate, Handle<WasmModuleObject> module_obj);
void ValidateOrphanedInstance(Isolate* isolate,
Handle<WasmInstanceObject> instance);
-
} // namespace testing
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index 3f694c579f..d74bf0c97c 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -5,7 +5,9 @@
#include "src/wasm/wasm-objects.h"
#include "src/utils.h"
+#include "src/base/iterator.h"
#include "src/debug/debug-interface.h"
+#include "src/objects-inl.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-text.h"
@@ -37,6 +39,15 @@ using namespace v8::internal::wasm;
return !getter(field)->IsUndefined(GetIsolate()); \
}
+#define DEFINE_OPTIONAL_GETTER0(getter, Container, name, field, type) \
+ DEFINE_GETTER0(getter, Container, name, field, type) \
+ bool Container::has_##name() { \
+ return !getter(field)->IsUndefined(GetIsolate()); \
+ }
+
+#define DEFINE_GETTER0(getter, Container, name, field, type) \
+ type* Container::name() { return type::cast(getter(field)); }
+
#define DEFINE_OBJ_GETTER(Container, name, field, type) \
DEFINE_GETTER0(GetInternalField, Container, name, field, type)
#define DEFINE_OBJ_ACCESSORS(Container, name, field, type) \
@@ -51,6 +62,8 @@ using namespace v8::internal::wasm;
DEFINE_ACCESSORS0(get, set, Container, name, field, type)
#define DEFINE_OPTIONAL_ARR_ACCESSORS(Container, name, field, type) \
DEFINE_OPTIONAL_ACCESSORS0(get, set, Container, name, field, type)
+#define DEFINE_OPTIONAL_ARR_GETTER(Container, name, field, type) \
+ DEFINE_OPTIONAL_GETTER0(get, Container, name, field, type)
namespace {
@@ -78,6 +91,129 @@ int32_t SafeInt32(Object* value) {
return static_cast<int32_t>(num->value());
}
+// An iterator that returns first the module itself, then all modules linked via
+// next, then all linked via prev.
+class CompiledModulesIterator
+ : public std::iterator<std::input_iterator_tag,
+ Handle<WasmCompiledModule>> {
+ public:
+ CompiledModulesIterator(Isolate* isolate,
+ Handle<WasmCompiledModule> start_module, bool at_end)
+ : isolate_(isolate),
+ start_module_(start_module),
+ current_(at_end ? Handle<WasmCompiledModule>::null() : start_module) {}
+
+ Handle<WasmCompiledModule> operator*() const {
+ DCHECK(!current_.is_null());
+ return current_;
+ }
+
+ void operator++() { Advance(); }
+
+ bool operator!=(const CompiledModulesIterator& other) {
+ DCHECK(start_module_.is_identical_to(other.start_module_));
+ return !current_.is_identical_to(other.current_);
+ }
+
+ private:
+ void Advance() {
+ DCHECK(!current_.is_null());
+ if (!is_backwards_) {
+ if (current_->has_weak_next_instance()) {
+ WeakCell* weak_next = current_->ptr_to_weak_next_instance();
+ if (!weak_next->cleared()) {
+ current_ =
+ handle(WasmCompiledModule::cast(weak_next->value()), isolate_);
+ return;
+ }
+ }
+ // No more modules in next-links, now try the previous-links.
+ is_backwards_ = true;
+ current_ = start_module_;
+ }
+ if (current_->has_weak_prev_instance()) {
+ WeakCell* weak_prev = current_->ptr_to_weak_prev_instance();
+ if (!weak_prev->cleared()) {
+ current_ =
+ handle(WasmCompiledModule::cast(weak_prev->value()), isolate_);
+ return;
+ }
+ }
+ current_ = Handle<WasmCompiledModule>::null();
+ }
+
+ friend class CompiledModuleInstancesIterator;
+ Isolate* isolate_;
+ Handle<WasmCompiledModule> start_module_;
+ Handle<WasmCompiledModule> current_;
+ bool is_backwards_ = false;
+};
+
+// An iterator based on the CompiledModulesIterator, but it returns all live
+// instances, not the WasmCompiledModules itself.
+class CompiledModuleInstancesIterator
+ : public std::iterator<std::input_iterator_tag,
+ Handle<WasmInstanceObject>> {
+ public:
+ CompiledModuleInstancesIterator(Isolate* isolate,
+ Handle<WasmCompiledModule> start_module,
+ bool at_end)
+ : it(isolate, start_module, at_end) {
+ while (NeedToAdvance()) ++it;
+ }
+
+ Handle<WasmInstanceObject> operator*() {
+ return handle(
+ WasmInstanceObject::cast((*it)->weak_owning_instance()->value()),
+ it.isolate_);
+ }
+
+ void operator++() {
+ do {
+ ++it;
+ } while (NeedToAdvance());
+ }
+
+ bool operator!=(const CompiledModuleInstancesIterator& other) {
+ return it != other.it;
+ }
+
+ private:
+ bool NeedToAdvance() {
+ return !it.current_.is_null() &&
+ (!it.current_->has_weak_owning_instance() ||
+ it.current_->ptr_to_weak_owning_instance()->cleared());
+ }
+ CompiledModulesIterator it;
+};
+
+v8::base::iterator_range<CompiledModuleInstancesIterator>
+iterate_compiled_module_instance_chain(
+ Isolate* isolate, Handle<WasmCompiledModule> compiled_module) {
+ return {CompiledModuleInstancesIterator(isolate, compiled_module, false),
+ CompiledModuleInstancesIterator(isolate, compiled_module, true)};
+}
+
+#ifdef DEBUG
+bool IsBreakablePosition(Handle<WasmCompiledModule> compiled_module,
+ int func_index, int offset_in_func) {
+ DisallowHeapAllocation no_gc;
+ AccountingAllocator alloc;
+ Zone tmp(&alloc, ZONE_NAME);
+ BodyLocalDecls locals(&tmp);
+ const byte* module_start = compiled_module->module_bytes()->GetChars();
+ WasmFunction& func = compiled_module->module()->functions[func_index];
+ BytecodeIterator iterator(module_start + func.code_start_offset,
+ module_start + func.code_end_offset, &locals);
+ DCHECK_LT(0, locals.encoded_size);
+ for (uint32_t offset : iterator.offsets()) {
+ if (offset > static_cast<uint32_t>(offset_in_func)) break;
+ if (offset == static_cast<uint32_t>(offset_in_func)) return true;
+ }
+ return false;
+}
+#endif // DEBUG
+
} // namespace
Handle<WasmModuleObject> WasmModuleObject::New(
@@ -127,6 +263,8 @@ Handle<WasmTableObject> WasmTableObject::New(Isolate* isolate, uint32_t initial,
Handle<JSFunction> table_ctor(
isolate->native_context()->wasm_table_constructor());
Handle<JSObject> table_obj = isolate->factory()->NewJSObject(table_ctor);
+ table_obj->SetInternalField(kWrapperTracerHeader, Smi::kZero);
+
*js_functions = isolate->factory()->NewFixedArray(initial);
Object* null = isolate->heap()->null_value();
for (int i = 0; i < static_cast<int>(initial); ++i) {
@@ -204,6 +342,8 @@ Handle<WasmMemoryObject> WasmMemoryObject::New(Isolate* isolate,
isolate->native_context()->wasm_memory_constructor());
Handle<JSObject> memory_obj =
isolate->factory()->NewJSObject(memory_ctor, TENURED);
+ memory_obj->SetInternalField(kWrapperTracerHeader, Smi::kZero);
+
memory_obj->SetInternalField(kArrayBuffer, *buffer);
Handle<Object> max = isolate->factory()->NewNumber(maximum);
memory_obj->SetInternalField(kMaximum, *max);
@@ -311,6 +451,8 @@ Handle<WasmInstanceObject> WasmInstanceObject::New(
isolate->native_context()->wasm_instance_constructor());
Handle<JSObject> instance_object =
isolate->factory()->NewJSObject(instance_cons, TENURED);
+ instance_object->SetInternalField(kWrapperTracerHeader, Smi::kZero);
+
Handle<Symbol> instance_sym(isolate->native_context()->wasm_instance_sym());
Object::SetProperty(instance_object, instance_sym, instance_object, STRICT)
.Check();
@@ -363,6 +505,8 @@ Handle<WasmExportedFunction> WasmExportedFunction::New(
shared->set_internal_formal_parameter_count(arity);
Handle<JSFunction> function = isolate->factory()->NewFunction(
isolate->wasm_function_map(), name, export_wrapper);
+ function->SetInternalField(kWrapperTracerHeader, Smi::kZero);
+
function->set_shared(*shared);
function->SetInternalField(kInstance, *instance);
@@ -383,6 +527,9 @@ bool WasmSharedModuleData::IsWasmSharedModuleData(Object* object) {
if (!arr->get(kAsmJsOffsetTable)->IsUndefined(isolate) &&
!arr->get(kAsmJsOffsetTable)->IsByteArray())
return false;
+ if (!arr->get(kBreakPointInfos)->IsUndefined(isolate) &&
+ !arr->get(kBreakPointInfos)->IsFixedArray())
+ return false;
return true;
}
@@ -392,7 +539,13 @@ WasmSharedModuleData* WasmSharedModuleData::cast(Object* object) {
}
wasm::WasmModule* WasmSharedModuleData::module() {
- return reinterpret_cast<WasmModuleWrapper*>(get(kModuleWrapper))->get();
+ // We populate the kModuleWrapper field with a Foreign holding the
+ // address to the address of a WasmModule. This is because we can
+ // handle both cases when the WasmModule's lifetime is managed through
+ // a Managed<WasmModule> object, as well as cases when it's managed
+ // by the embedder. CcTests fall into the latter case.
+ return *(reinterpret_cast<wasm::WasmModule**>(
+ Foreign::cast(get(kModuleWrapper))->foreign_address()));
}
DEFINE_OPTIONAL_ARR_ACCESSORS(WasmSharedModuleData, module_bytes, kModuleBytes,
@@ -400,6 +553,8 @@ DEFINE_OPTIONAL_ARR_ACCESSORS(WasmSharedModuleData, module_bytes, kModuleBytes,
DEFINE_ARR_GETTER(WasmSharedModuleData, script, kScript, Script);
DEFINE_OPTIONAL_ARR_ACCESSORS(WasmSharedModuleData, asm_js_offset_table,
kAsmJsOffsetTable, ByteArray);
+DEFINE_OPTIONAL_ARR_GETTER(WasmSharedModuleData, breakpoint_infos,
+ kBreakPointInfos, FixedArray);
Handle<WasmSharedModuleData> WasmSharedModuleData::New(
Isolate* isolate, Handle<Foreign> module_wrapper,
@@ -407,7 +562,7 @@ Handle<WasmSharedModuleData> WasmSharedModuleData::New(
Handle<ByteArray> asm_js_offset_table) {
Handle<FixedArray> arr =
isolate->factory()->NewFixedArray(kFieldCount, TENURED);
-
+ arr->set(kWrapperTracerHeader, Smi::kZero);
arr->set(kModuleWrapper, *module_wrapper);
if (!module_bytes.is_null()) {
arr->set(kModuleBytes, *module_bytes);
@@ -430,9 +585,19 @@ bool WasmSharedModuleData::is_asm_js() {
return asm_js;
}
-void WasmSharedModuleData::RecreateModuleWrapper(
+void WasmSharedModuleData::ReinitializeAfterDeserialization(
Isolate* isolate, Handle<WasmSharedModuleData> shared) {
DCHECK(shared->get(kModuleWrapper)->IsUndefined(isolate));
+#ifdef DEBUG
+ // No BreakpointInfo objects should survive deserialization.
+ if (shared->has_breakpoint_infos()) {
+ for (int i = 0, e = shared->breakpoint_infos()->length(); i < e; ++i) {
+ DCHECK(shared->breakpoint_infos()->get(i)->IsUndefined(isolate));
+ }
+ }
+#endif
+
+ shared->set(kBreakPointInfos, isolate->heap()->undefined_value());
WasmModule* module = nullptr;
{
@@ -459,6 +624,126 @@ void WasmSharedModuleData::RecreateModuleWrapper(
DCHECK(WasmSharedModuleData::IsWasmSharedModuleData(*shared));
}
+namespace {
+
+int GetBreakpointPos(Isolate* isolate, Object* break_point_info_or_undef) {
+ if (break_point_info_or_undef->IsUndefined(isolate)) return kMaxInt;
+ return BreakPointInfo::cast(break_point_info_or_undef)->source_position();
+}
+
+int FindBreakpointInfoInsertPos(Isolate* isolate,
+ Handle<FixedArray> breakpoint_infos,
+ int position) {
+ // Find insert location via binary search, taking care of undefined values on
+ // the right. Position is always greater than zero.
+ DCHECK_LT(0, position);
+
+ int left = 0; // inclusive
+ int right = breakpoint_infos->length(); // exclusive
+ while (right - left > 1) {
+ int mid = left + (right - left) / 2;
+ Object* mid_obj = breakpoint_infos->get(mid);
+ if (GetBreakpointPos(isolate, mid_obj) <= position) {
+ left = mid;
+ } else {
+ right = mid;
+ }
+ }
+
+ int left_pos = GetBreakpointPos(isolate, breakpoint_infos->get(left));
+ return left_pos < position ? left + 1 : left;
+}
+
+} // namespace
+
+void WasmSharedModuleData::AddBreakpoint(Handle<WasmSharedModuleData> shared,
+ int position,
+ Handle<Object> break_point_object) {
+ Isolate* isolate = shared->GetIsolate();
+ Handle<FixedArray> breakpoint_infos;
+ if (shared->has_breakpoint_infos()) {
+ breakpoint_infos = handle(shared->breakpoint_infos(), isolate);
+ } else {
+ breakpoint_infos = isolate->factory()->NewFixedArray(4, TENURED);
+ shared->set(kBreakPointInfos, *breakpoint_infos);
+ }
+
+ int insert_pos =
+ FindBreakpointInfoInsertPos(isolate, breakpoint_infos, position);
+
+ // If a BreakPointInfo object already exists for this position, add the new
+ // breakpoint object and return.
+ if (insert_pos < breakpoint_infos->length() &&
+ GetBreakpointPos(isolate, breakpoint_infos->get(insert_pos)) ==
+ position) {
+ Handle<BreakPointInfo> old_info(
+ BreakPointInfo::cast(breakpoint_infos->get(insert_pos)), isolate);
+ BreakPointInfo::SetBreakPoint(old_info, break_point_object);
+ return;
+ }
+
+ // Enlarge break positions array if necessary.
+ bool need_realloc = !breakpoint_infos->get(breakpoint_infos->length() - 1)
+ ->IsUndefined(isolate);
+ Handle<FixedArray> new_breakpoint_infos = breakpoint_infos;
+ if (need_realloc) {
+ new_breakpoint_infos = isolate->factory()->NewFixedArray(
+ 2 * breakpoint_infos->length(), TENURED);
+ shared->set(kBreakPointInfos, *new_breakpoint_infos);
+ // Copy over the entries [0, insert_pos).
+ for (int i = 0; i < insert_pos; ++i)
+ new_breakpoint_infos->set(i, breakpoint_infos->get(i));
+ }
+
+ // Move elements [insert_pos+1, ...] up by one.
+ for (int i = insert_pos + 1; i < breakpoint_infos->length(); ++i) {
+ Object* entry = breakpoint_infos->get(i);
+ if (entry->IsUndefined(isolate)) break;
+ new_breakpoint_infos->set(i + 1, entry);
+ }
+
+ // Generate new BreakpointInfo.
+ Handle<BreakPointInfo> breakpoint_info =
+ isolate->factory()->NewBreakPointInfo(position);
+ BreakPointInfo::SetBreakPoint(breakpoint_info, break_point_object);
+
+ // Now insert new position at insert_pos.
+ new_breakpoint_infos->set(insert_pos, *breakpoint_info);
+}
+
+void WasmSharedModuleData::SetBreakpointsOnNewInstance(
+ Handle<WasmSharedModuleData> shared, Handle<WasmInstanceObject> instance) {
+ if (!shared->has_breakpoint_infos()) return;
+ Isolate* isolate = shared->GetIsolate();
+ Handle<WasmCompiledModule> compiled_module(instance->compiled_module(),
+ isolate);
+ Handle<WasmDebugInfo> debug_info =
+ WasmInstanceObject::GetOrCreateDebugInfo(instance);
+
+ Handle<FixedArray> breakpoint_infos(shared->breakpoint_infos(), isolate);
+ // If the array exists, it should not be empty.
+ DCHECK_LT(0, breakpoint_infos->length());
+
+ for (int i = 0, e = breakpoint_infos->length(); i < e; ++i) {
+ Handle<Object> obj(breakpoint_infos->get(i), isolate);
+ if (obj->IsUndefined(isolate)) {
+ for (; i < e; ++i) {
+ DCHECK(breakpoint_infos->get(i)->IsUndefined(isolate));
+ }
+ break;
+ }
+ Handle<BreakPointInfo> breakpoint_info = Handle<BreakPointInfo>::cast(obj);
+ int position = breakpoint_info->source_position();
+
+ // Find the function for this breakpoint, and set the breakpoint.
+ int func_index = compiled_module->GetContainingFunction(position);
+ DCHECK_LE(0, func_index);
+ WasmFunction& func = compiled_module->module()->functions[func_index];
+ int offset_in_func = position - func.code_start_offset;
+ WasmDebugInfo::SetBreakpoint(debug_info, func_index, offset_in_func);
+ }
+}
+
Handle<WasmCompiledModule> WasmCompiledModule::New(
Isolate* isolate, Handle<WasmSharedModuleData> shared) {
Handle<FixedArray> ret =
@@ -536,7 +821,7 @@ void WasmCompiledModule::PrintInstancesChain() {
#endif
}
-void WasmCompiledModule::RecreateModuleWrapper(
+void WasmCompiledModule::ReinitializeAfterDeserialization(
Isolate* isolate, Handle<WasmCompiledModule> compiled_module) {
// This method must only be called immediately after deserialization.
// At this point, no module wrapper exists, so the shared module data is
@@ -545,7 +830,7 @@ void WasmCompiledModule::RecreateModuleWrapper(
static_cast<WasmSharedModuleData*>(compiled_module->get(kID_shared)),
isolate);
DCHECK(!WasmSharedModuleData::IsWasmSharedModuleData(*shared));
- WasmSharedModuleData::RecreateModuleWrapper(isolate, shared);
+ WasmSharedModuleData::ReinitializeAfterDeserialization(isolate, shared);
DCHECK(WasmSharedModuleData::IsWasmSharedModuleData(*shared));
}
@@ -833,13 +1118,66 @@ bool WasmCompiledModule::GetPossibleBreakpoints(
return true;
}
+bool WasmCompiledModule::SetBreakPoint(
+ Handle<WasmCompiledModule> compiled_module, int* position,
+ Handle<Object> break_point_object) {
+ Isolate* isolate = compiled_module->GetIsolate();
+
+ // Find the function for this breakpoint.
+ int func_index = compiled_module->GetContainingFunction(*position);
+ if (func_index < 0) return false;
+ WasmFunction& func = compiled_module->module()->functions[func_index];
+ int offset_in_func = *position - func.code_start_offset;
+
+ // According to the current design, we should only be called with valid
+ // breakable positions.
+ DCHECK(IsBreakablePosition(compiled_module, func_index, offset_in_func));
+
+ // Insert new break point into break_positions of shared module data.
+ WasmSharedModuleData::AddBreakpoint(compiled_module->shared(), *position,
+ break_point_object);
+
+ // Iterate over all instances of this module and tell them to set this new
+ // breakpoint.
+ for (Handle<WasmInstanceObject> instance :
+ iterate_compiled_module_instance_chain(isolate, compiled_module)) {
+ Handle<WasmDebugInfo> debug_info =
+ WasmInstanceObject::GetOrCreateDebugInfo(instance);
+ WasmDebugInfo::SetBreakpoint(debug_info, func_index, offset_in_func);
+ }
+
+ return true;
+}
+
+MaybeHandle<FixedArray> WasmCompiledModule::CheckBreakPoints(int position) {
+ Isolate* isolate = GetIsolate();
+ if (!shared()->has_breakpoint_infos()) return {};
+
+ Handle<FixedArray> breakpoint_infos(shared()->breakpoint_infos(), isolate);
+ int insert_pos =
+ FindBreakpointInfoInsertPos(isolate, breakpoint_infos, position);
+ if (insert_pos >= breakpoint_infos->length()) return {};
+
+ Handle<Object> maybe_breakpoint_info(breakpoint_infos->get(insert_pos),
+ isolate);
+ if (maybe_breakpoint_info->IsUndefined(isolate)) return {};
+ Handle<BreakPointInfo> breakpoint_info =
+ Handle<BreakPointInfo>::cast(maybe_breakpoint_info);
+ if (breakpoint_info->source_position() != position) return {};
+
+ Handle<Object> breakpoint_objects(breakpoint_info->break_point_objects(),
+ isolate);
+ return isolate->debug()->GetHitBreakPointObjects(breakpoint_objects);
+}
+
Handle<WasmInstanceWrapper> WasmInstanceWrapper::New(
Isolate* isolate, Handle<WasmInstanceObject> instance) {
Handle<FixedArray> array =
isolate->factory()->NewFixedArray(kWrapperPropertyCount, TENURED);
Handle<WasmInstanceWrapper> instance_wrapper(
reinterpret_cast<WasmInstanceWrapper*>(*array), isolate);
- instance_wrapper->set_instance_object(instance, isolate);
+ Handle<WeakCell> cell = isolate->factory()->NewWeakCell(instance);
+ instance_wrapper->set(kWrapperInstanceObject, *cell);
return instance_wrapper;
}
@@ -857,9 +1195,3 @@ bool WasmInstanceWrapper::IsWasmInstanceWrapper(Object* obj) {
return false;
return true;
}
-
-void WasmInstanceWrapper::set_instance_object(Handle<JSObject> instance,
- Isolate* isolate) {
- Handle<WeakCell> cell = isolate->factory()->NewWeakCell(instance);
- set(kWrapperInstanceObject, *cell);
-}
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index c478fe0419..b198cf2755 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -5,15 +5,16 @@
#ifndef V8_WASM_OBJECTS_H_
#define V8_WASM_OBJECTS_H_
+#include "src/debug/debug.h"
#include "src/debug/interface-types.h"
-#include "src/objects-inl.h"
+#include "src/objects.h"
#include "src/trap-handler/trap-handler.h"
-#include "src/wasm/managed.h"
#include "src/wasm/wasm-limits.h"
namespace v8 {
namespace internal {
namespace wasm {
+class InterpretedFrame;
struct WasmModule;
}
@@ -36,9 +37,14 @@ class WasmInstanceWrapper;
bool has_##name(); \
DECLARE_ACCESSORS(name, type)
+#define DECLARE_OPTIONAL_GETTER(name, type) \
+ bool has_##name(); \
+ DECLARE_GETTER(name, type)
+
// Representation of a WebAssembly.Module JavaScript-level object.
class WasmModuleObject : public JSObject {
public:
+ // If a second field is added, we need a kWrapperTracerHeader field as well.
// TODO(titzer): add the brand as an internal field instead of a property.
enum Fields { kCompiledModule, kFieldCount };
@@ -53,8 +59,15 @@ class WasmModuleObject : public JSObject {
// Representation of a WebAssembly.Table JavaScript-level object.
class WasmTableObject : public JSObject {
public:
+ // The 0-th field is used by the Blink Wrapper Tracer.
// TODO(titzer): add the brand as an internal field instead of a property.
- enum Fields { kFunctions, kMaximum, kDispatchTables, kFieldCount };
+ enum Fields {
+ kWrapperTracerHeader,
+ kFunctions,
+ kMaximum,
+ kDispatchTables,
+ kFieldCount
+ };
DECLARE_CASTS(WasmTableObject);
DECLARE_ACCESSORS(functions, FixedArray);
@@ -78,8 +91,15 @@ class WasmTableObject : public JSObject {
// Representation of a WebAssembly.Memory JavaScript-level object.
class WasmMemoryObject : public JSObject {
public:
+ // The 0-th field is used by the Blink Wrapper Tracer.
// TODO(titzer): add the brand as an internal field instead of a property.
- enum Fields : uint8_t { kArrayBuffer, kMaximum, kInstancesLink, kFieldCount };
+ enum Fields : uint8_t {
+ kWrapperTracerHeader,
+ kArrayBuffer,
+ kMaximum,
+ kInstancesLink,
+ kFieldCount
+ };
DECLARE_CASTS(WasmMemoryObject);
DECLARE_ACCESSORS(buffer, JSArrayBuffer);
@@ -102,8 +122,10 @@ class WasmMemoryObject : public JSObject {
// Representation of a WebAssembly.Instance JavaScript-level object.
class WasmInstanceObject : public JSObject {
public:
+ // The 0-th field is used by the Blink Wrapper Tracer.
// TODO(titzer): add the brand as an internal field instead of a property.
enum Fields {
+ kWrapperTracerHeader,
kCompiledModule,
kMemoryObject,
kMemoryArrayBuffer,
@@ -137,7 +159,8 @@ class WasmInstanceObject : public JSObject {
// Representation of an exported WASM function.
class WasmExportedFunction : public JSFunction {
public:
- enum Fields { kInstance, kIndex, kFieldCount };
+ // The 0-th field is used by the Blink Wrapper Tracer.
+ enum Fields { kWrapperTracerHeader, kInstance, kIndex, kFieldCount };
DECLARE_CASTS(WasmExportedFunction);
@@ -153,11 +176,14 @@ class WasmExportedFunction : public JSFunction {
// Information shared by all WasmCompiledModule objects for the same module.
class WasmSharedModuleData : public FixedArray {
+ // The 0-th field is used by the Blink Wrapper Tracer.
enum Fields {
+ kWrapperTracerHeader,
kModuleWrapper,
kModuleBytes,
kScript,
kAsmJsOffsetTable,
+ kBreakPointInfos,
kFieldCount
};
@@ -168,6 +194,7 @@ class WasmSharedModuleData : public FixedArray {
DECLARE_OPTIONAL_ACCESSORS(module_bytes, SeqOneByteString);
DECLARE_GETTER(script, Script);
DECLARE_OPTIONAL_ACCESSORS(asm_js_offset_table, ByteArray);
+ DECLARE_OPTIONAL_GETTER(breakpoint_infos, FixedArray);
static Handle<WasmSharedModuleData> New(
Isolate* isolate, Handle<Foreign> module_wrapper,
@@ -177,8 +204,14 @@ class WasmSharedModuleData : public FixedArray {
// Check whether this module was generated from asm.js source.
bool is_asm_js();
- // Recreate the ModuleWrapper from the module bytes after deserialization.
- static void RecreateModuleWrapper(Isolate*, Handle<WasmSharedModuleData>);
+ static void ReinitializeAfterDeserialization(Isolate*,
+ Handle<WasmSharedModuleData>);
+
+ static void AddBreakpoint(Handle<WasmSharedModuleData>, int position,
+ Handle<Object> break_point_object);
+
+ static void SetBreakpointsOnNewInstance(Handle<WasmSharedModuleData>,
+ Handle<WasmInstanceObject>);
};
class WasmCompiledModule : public FixedArray {
@@ -311,9 +344,8 @@ class WasmCompiledModule : public FixedArray {
void PrintInstancesChain();
- // Recreate the ModuleWrapper from the module bytes after deserialization.
- static void RecreateModuleWrapper(Isolate* isolate,
- Handle<WasmCompiledModule> compiled_module);
+ static void ReinitializeAfterDeserialization(Isolate*,
+ Handle<WasmCompiledModule>);
// Get the function name of the function identified by the given index.
// Returns a null handle if the function is unnamed or the name is not a valid
@@ -375,6 +407,19 @@ class WasmCompiledModule : public FixedArray {
const debug::Location& end,
std::vector<debug::Location>* locations);
+ // Set a breakpoint on the given byte position inside the given module.
+ // This will affect all live and future instances of the module.
+ // The passed position might be modified to point to the next breakable
+ // location inside the same function.
+ // If it points outside a function, or behind the last breakable location,
+ // this function returns false and does not set any breakpoint.
+ static bool SetBreakPoint(Handle<WasmCompiledModule>, int* position,
+ Handle<Object> break_point_object);
+
+ // Return an empty handle if no breakpoint is hit at that location, or a
+ // FixedArray with all hit breakpoint objects.
+ MaybeHandle<FixedArray> CheckBreakPoints(int position);
+
private:
void InitId();
@@ -383,7 +428,9 @@ class WasmCompiledModule : public FixedArray {
class WasmDebugInfo : public FixedArray {
public:
+ // The 0-th field is used by the Blink Wrapper Tracer.
enum Fields {
+ kWrapperTracerHeader,
kInstance,
kInterpreterHandle,
kInterpretedFunctions,
@@ -395,10 +442,29 @@ class WasmDebugInfo : public FixedArray {
static bool IsDebugInfo(Object*);
static WasmDebugInfo* cast(Object*);
+ // Set a breakpoint in the given function at the given byte offset within that
+ // function. This will redirect all future calls to this function to the
+ // interpreter and will always pause at the given offset.
static void SetBreakpoint(Handle<WasmDebugInfo>, int func_index, int offset);
- static void RunInterpreter(Handle<WasmDebugInfo>, int func_index,
- uint8_t* arg_buffer);
+ // Make a function always execute in the interpreter without setting a
+ // breakpoints.
+ static void RedirectToInterpreter(Handle<WasmDebugInfo>, int func_index);
+
+ void PrepareStep(StepAction);
+
+ void RunInterpreter(int func_index, uint8_t* arg_buffer);
+
+ // Get the stack of the wasm interpreter as pairs of <function index, byte
+ // offset>. The list is ordered bottom-to-top, i.e. caller before callee.
+ std::vector<std::pair<uint32_t, int>> GetInterpretedStack(
+ Address frame_pointer);
+
+ std::unique_ptr<wasm::InterpretedFrame> GetInterpretedFrame(
+ Address frame_pointer, int idx);
+
+ // Returns the number of calls / function frames executed in the interpreter.
+ uint64_t NumInterpretedCalls();
DECLARE_GETTER(wasm_instance, WasmInstanceObject);
};
@@ -424,7 +490,6 @@ class WasmInstanceWrapper : public FixedArray {
bool has_previous() {
return IsWasmInstanceWrapper(get(kPreviousInstanceWrapper));
}
- void set_instance_object(Handle<JSObject> instance, Isolate* isolate);
void set_next_wrapper(Object* obj) {
DCHECK(IsWasmInstanceWrapper(obj));
set(kNextInstanceWrapper, obj);
diff --git a/deps/v8/src/wasm/wasm-opcodes.cc b/deps/v8/src/wasm/wasm-opcodes.cc
index 2a00a73cbd..ec1cbd59b2 100644
--- a/deps/v8/src/wasm/wasm-opcodes.cc
+++ b/deps/v8/src/wasm/wasm-opcodes.cc
@@ -13,30 +13,221 @@ namespace wasm {
typedef Signature<ValueType> FunctionSig;
+#define CASE_OP(name, str) \
+ case kExpr##name: \
+ return str;
+#define CASE_I32_OP(name, str) CASE_OP(I32##name, "i32." str)
+#define CASE_I64_OP(name, str) CASE_OP(I64##name, "i64." str)
+#define CASE_F32_OP(name, str) CASE_OP(F32##name, "f32." str)
+#define CASE_F64_OP(name, str) CASE_OP(F64##name, "f64." str)
+#define CASE_S128_OP(name, str) CASE_OP(S128##name, "s128." str)
+#define CASE_F32x4_OP(name, str) CASE_OP(F32x4##name, "f32x4." str)
+#define CASE_I32x4_OP(name, str) CASE_OP(I32x4##name, "i32x4." str)
+#define CASE_I16x8_OP(name, str) CASE_OP(I16x8##name, "i16x8." str)
+#define CASE_I8x16_OP(name, str) CASE_OP(I8x16##name, "i8x16." str)
+#define CASE_S32x4_OP(name, str) CASE_OP(S32x4##name, "s32x4." str)
+#define CASE_S16x8_OP(name, str) CASE_OP(S16x8##name, "s16x8." str)
+#define CASE_S8x16_OP(name, str) CASE_OP(S8x16##name, "s8x16." str)
+#define CASE_INT_OP(name, str) CASE_I32_OP(name, str) CASE_I64_OP(name, str)
+#define CASE_FLOAT_OP(name, str) CASE_F32_OP(name, str) CASE_F64_OP(name, str)
+#define CASE_ALL_OP(name, str) CASE_FLOAT_OP(name, str) CASE_INT_OP(name, str)
+#define CASE_SIMD_OP(name, str) \
+ CASE_F32x4_OP(name, str) CASE_I32x4_OP(name, str) CASE_I16x8_OP(name, str) \
+ CASE_I8x16_OP(name, str)
+#define CASE_SIMDI_OP(name, str) \
+ CASE_I32x4_OP(name, str) CASE_I16x8_OP(name, str) CASE_I8x16_OP(name, str)
+#define CASE_SIGN_OP(TYPE, name, str) \
+ CASE_##TYPE##_OP(name##S, str "_s") CASE_##TYPE##_OP(name##U, str "_u")
+#define CASE_ALL_SIGN_OP(name, str) \
+ CASE_FLOAT_OP(name, str) CASE_SIGN_OP(INT, name, str)
+#define CASE_CONVERT_OP(name, RES, SRC, src_suffix, str) \
+ CASE_##RES##_OP(U##name##SRC, str "_u/" src_suffix) \
+ CASE_##RES##_OP(S##name##SRC, str "_s/" src_suffix)
+#define CASE_L32_OP(name, str) \
+ CASE_SIGN_OP(I32, name##8, str "8") \
+ CASE_SIGN_OP(I32, name##16, str "16") \
+ CASE_I32_OP(name, str "32")
+
const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
switch (opcode) {
-#define DECLARE_NAME_CASE(name, opcode, sig) \
- case kExpr##name: \
- return "Expr" #name;
- FOREACH_OPCODE(DECLARE_NAME_CASE)
-#undef DECLARE_NAME_CASE
- default:
- break;
- }
- return "Unknown";
-}
+ // clang-format off
-const char* WasmOpcodes::ShortOpcodeName(WasmOpcode opcode) {
- switch (opcode) {
-#define DECLARE_NAME_CASE(name, opcode, sig) \
- case kExpr##name: \
- return #name;
- FOREACH_OPCODE(DECLARE_NAME_CASE)
-#undef DECLARE_NAME_CASE
- default:
- break;
+ // Standard opcodes
+ CASE_INT_OP(Eqz, "eqz")
+ CASE_ALL_OP(Eq, "eq")
+ CASE_ALL_OP(Ne, "ne")
+ CASE_ALL_OP(Add, "add")
+ CASE_ALL_OP(Sub, "sub")
+ CASE_ALL_OP(Mul, "mul")
+ CASE_ALL_SIGN_OP(Lt, "lt")
+ CASE_ALL_SIGN_OP(Gt, "gt")
+ CASE_ALL_SIGN_OP(Le, "le")
+ CASE_ALL_SIGN_OP(Ge, "ge")
+ CASE_INT_OP(Clz, "clz")
+ CASE_INT_OP(Ctz, "ctz")
+ CASE_INT_OP(Popcnt, "popcnt")
+ CASE_ALL_SIGN_OP(Div, "div")
+ CASE_SIGN_OP(INT, Rem, "rem")
+ CASE_INT_OP(And, "and")
+ CASE_INT_OP(Ior, "or")
+ CASE_INT_OP(Xor, "xor")
+ CASE_INT_OP(Shl, "shl")
+ CASE_SIGN_OP(INT, Shr, "shr")
+ CASE_INT_OP(Rol, "rol")
+ CASE_INT_OP(Ror, "ror")
+ CASE_FLOAT_OP(Abs, "abs")
+ CASE_FLOAT_OP(Neg, "neg")
+ CASE_FLOAT_OP(Ceil, "ceil")
+ CASE_FLOAT_OP(Floor, "floor")
+ CASE_FLOAT_OP(Trunc, "trunc")
+ CASE_FLOAT_OP(NearestInt, "nearest")
+ CASE_FLOAT_OP(Sqrt, "sqrt")
+ CASE_FLOAT_OP(Min, "min")
+ CASE_FLOAT_OP(Max, "max")
+ CASE_FLOAT_OP(CopySign, "copysign")
+ CASE_I32_OP(ConvertI64, "wrap/i64")
+ CASE_CONVERT_OP(Convert, INT, F32, "f32", "trunc")
+ CASE_CONVERT_OP(Convert, INT, F64, "f64", "trunc")
+ CASE_CONVERT_OP(Convert, I64, I32, "i32", "extend")
+ CASE_CONVERT_OP(Convert, F32, I32, "i32", "convert")
+ CASE_CONVERT_OP(Convert, F32, I64, "i64", "convert")
+ CASE_F32_OP(ConvertF64, "demote/f64")
+ CASE_CONVERT_OP(Convert, F64, I32, "i32", "convert")
+ CASE_CONVERT_OP(Convert, F64, I64, "i64", "convert")
+ CASE_F64_OP(ConvertF32, "promote/f32")
+ CASE_I32_OP(ReinterpretF32, "reinterpret/f32")
+ CASE_I64_OP(ReinterpretF64, "reinterpret/f64")
+ CASE_F32_OP(ReinterpretI32, "reinterpret/i32")
+ CASE_F64_OP(ReinterpretI64, "reinterpret/i64")
+ CASE_OP(Unreachable, "unreachable")
+ CASE_OP(Nop, "nop")
+ CASE_OP(Block, "block")
+ CASE_OP(Loop, "loop")
+ CASE_OP(If, "if")
+ CASE_OP(Else, "else")
+ CASE_OP(End, "end")
+ CASE_OP(Br, "br")
+ CASE_OP(BrIf, "br_if")
+ CASE_OP(BrTable, "br_table")
+ CASE_OP(Return, "return")
+ CASE_OP(CallFunction, "call")
+ CASE_OP(CallIndirect, "call_indirect")
+ CASE_OP(Drop, "drop")
+ CASE_OP(Select, "select")
+ CASE_OP(GetLocal, "get_local")
+ CASE_OP(SetLocal, "set_local")
+ CASE_OP(TeeLocal, "tee_local")
+ CASE_OP(GetGlobal, "get_global")
+ CASE_OP(SetGlobal, "set_global")
+ CASE_ALL_OP(Const, "const")
+ CASE_OP(MemorySize, "current_memory")
+ CASE_OP(GrowMemory, "grow_memory")
+ CASE_ALL_OP(LoadMem, "load")
+ CASE_SIGN_OP(INT, LoadMem8, "load8")
+ CASE_SIGN_OP(INT, LoadMem16, "load16")
+ CASE_SIGN_OP(I64, LoadMem32, "load32")
+ CASE_ALL_OP(StoreMem, "store")
+ CASE_INT_OP(StoreMem8, "store8")
+ CASE_INT_OP(StoreMem16, "store16")
+ CASE_I64_OP(StoreMem32, "store32")
+
+ // Non-standard opcodes.
+ CASE_OP(Try, "try")
+ CASE_OP(Throw, "throw")
+ CASE_OP(Catch, "catch")
+
+ // asm.js-only opcodes.
+ CASE_F64_OP(Acos, "acos")
+ CASE_F64_OP(Asin, "asin")
+ CASE_F64_OP(Atan, "atan")
+ CASE_F64_OP(Cos, "cos")
+ CASE_F64_OP(Sin, "sin")
+ CASE_F64_OP(Tan, "tan")
+ CASE_F64_OP(Exp, "exp")
+ CASE_F64_OP(Log, "log")
+ CASE_F64_OP(Atan2, "atan2")
+ CASE_F64_OP(Pow, "pow")
+ CASE_F64_OP(Mod, "mod")
+ CASE_F32_OP(AsmjsLoadMem, "asmjs_load")
+ CASE_F64_OP(AsmjsLoadMem, "asmjs_load")
+ CASE_L32_OP(AsmjsLoadMem, "asmjs_load")
+ CASE_I32_OP(AsmjsStoreMem, "asmjs_store")
+ CASE_F32_OP(AsmjsStoreMem, "asmjs_store")
+ CASE_F64_OP(AsmjsStoreMem, "asmjs_store")
+ CASE_I32_OP(AsmjsStoreMem8, "asmjs_store8")
+ CASE_I32_OP(AsmjsStoreMem16, "asmjs_store16")
+ CASE_SIGN_OP(I32, AsmjsDiv, "asmjs_div")
+ CASE_SIGN_OP(I32, AsmjsRem, "asmjs_rem")
+ CASE_I32_OP(AsmjsSConvertF32, "asmjs_convert_s/f32")
+ CASE_I32_OP(AsmjsUConvertF32, "asmjs_convert_u/f32")
+ CASE_I32_OP(AsmjsSConvertF64, "asmjs_convert_s/f64")
+ CASE_I32_OP(AsmjsUConvertF64, "asmjs_convert_u/f64")
+
+ // SIMD opcodes.
+ CASE_SIMD_OP(Splat, "splat")
+ CASE_SIMD_OP(Neg, "neg")
+ CASE_SIMD_OP(Eq, "eq")
+ CASE_SIMD_OP(Ne, "ne")
+ CASE_SIMD_OP(Add, "add")
+ CASE_SIMD_OP(Sub, "sub")
+ CASE_SIMD_OP(Mul, "mul")
+ CASE_F32x4_OP(Abs, "abs")
+ CASE_F32x4_OP(Sqrt, "sqrt")
+ CASE_F32x4_OP(Div, "div")
+ CASE_F32x4_OP(RecipApprox, "recip_approx")
+ CASE_F32x4_OP(SqrtApprox, "sqrt_approx")
+ CASE_F32x4_OP(Min, "min")
+ CASE_F32x4_OP(Max, "max")
+ CASE_F32x4_OP(MinNum, "min_num")
+ CASE_F32x4_OP(MaxNum, "max_num")
+ CASE_F32x4_OP(Lt, "lt")
+ CASE_F32x4_OP(Le, "le")
+ CASE_F32x4_OP(Gt, "gt")
+ CASE_F32x4_OP(Ge, "ge")
+ CASE_CONVERT_OP(Convert, F32x4, I32x4, "i32", "convert")
+ CASE_CONVERT_OP(Convert, I32x4, F32x4, "f32", "convert")
+ CASE_F32x4_OP(ExtractLane, "extract_lane")
+ CASE_F32x4_OP(ReplaceLane, "replace_lane")
+ CASE_SIMDI_OP(ExtractLane, "extract_lane")
+ CASE_SIMDI_OP(ReplaceLane, "replace_lane")
+ CASE_SIGN_OP(SIMDI, Min, "min")
+ CASE_SIGN_OP(SIMDI, Max, "max")
+ CASE_SIGN_OP(SIMDI, Lt, "lt")
+ CASE_SIGN_OP(SIMDI, Le, "le")
+ CASE_SIGN_OP(SIMDI, Gt, "gt")
+ CASE_SIGN_OP(SIMDI, Ge, "ge")
+ CASE_SIGN_OP(SIMDI, Shr, "shr")
+ CASE_SIMDI_OP(Shl, "shl")
+ CASE_SIGN_OP(I16x8, AddSaturate, "add_saturate")
+ CASE_SIGN_OP(I8x16, AddSaturate, "add_saturate")
+ CASE_SIGN_OP(I16x8, SubSaturate, "sub_saturate")
+ CASE_SIGN_OP(I8x16, SubSaturate, "sub_saturate")
+ CASE_S128_OP(Or, "or")
+ CASE_S128_OP(Xor, "xor")
+ CASE_S128_OP(And, "and")
+ CASE_S128_OP(Not, "not")
+ CASE_S32x4_OP(Select, "select")
+ CASE_S32x4_OP(Swizzle, "swizzle")
+ CASE_S32x4_OP(Shuffle, "shuffle")
+ CASE_S16x8_OP(Select, "select")
+ CASE_S16x8_OP(Swizzle, "swizzle")
+ CASE_S16x8_OP(Shuffle, "shuffle")
+ CASE_S8x16_OP(Select, "select")
+ CASE_S8x16_OP(Swizzle, "swizzle")
+ CASE_S8x16_OP(Shuffle, "shuffle")
+
+ // Atomic operations.
+ CASE_L32_OP(AtomicAdd, "atomic_add")
+ CASE_L32_OP(AtomicAnd, "atomic_and")
+ CASE_L32_OP(AtomicCompareExchange, "atomic_cmpxchng")
+ CASE_L32_OP(AtomicExchange, "atomic_xchng")
+ CASE_L32_OP(AtomicOr, "atomic_or")
+ CASE_L32_OP(AtomicSub, "atomic_sub")
+ CASE_L32_OP(AtomicXor, "atomic_xor")
+
+ default : return "unknown";
+ // clang-format on
}
- return "Unknown";
}
bool WasmOpcodes::IsPrefixOpcode(WasmOpcode opcode) {
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index 6c231ac69b..a4812f500a 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -21,7 +21,10 @@ enum ValueTypeCode {
kLocalI64 = 0x7e,
kLocalF32 = 0x7d,
kLocalF64 = 0x7c,
- kLocalS128 = 0x7b
+ kLocalS128 = 0x7b,
+ kLocalS1x4 = 0x7a,
+ kLocalS1x8 = 0x79,
+ kLocalS1x16 = 0x78
};
// Type code for multi-value block types.
@@ -36,6 +39,9 @@ const ValueType kWasmI64 = MachineRepresentation::kWord64;
const ValueType kWasmF32 = MachineRepresentation::kFloat32;
const ValueType kWasmF64 = MachineRepresentation::kFloat64;
const ValueType kWasmS128 = MachineRepresentation::kSimd128;
+const ValueType kWasmS1x4 = MachineRepresentation::kSimd1x4;
+const ValueType kWasmS1x8 = MachineRepresentation::kSimd1x8;
+const ValueType kWasmS1x16 = MachineRepresentation::kSimd1x16;
const ValueType kWasmVar = MachineRepresentation::kTagged;
typedef Signature<ValueType> FunctionSig;
@@ -288,153 +294,150 @@ const WasmCodePosition kNoCodePosition = -1;
V(F32x4Max, 0xe50d, s_ss) \
V(F32x4MinNum, 0xe50e, s_ss) \
V(F32x4MaxNum, 0xe50f, s_ss) \
- V(F32x4Eq, 0xe510, s_ss) \
- V(F32x4Ne, 0xe511, s_ss) \
- V(F32x4Lt, 0xe512, s_ss) \
- V(F32x4Le, 0xe513, s_ss) \
- V(F32x4Gt, 0xe514, s_ss) \
- V(F32x4Ge, 0xe515, s_ss) \
- V(F32x4FromInt32x4, 0xe519, s_s) \
- V(F32x4FromUint32x4, 0xe51a, s_s) \
+ V(F32x4Eq, 0xe510, s1x4_ss) \
+ V(F32x4Ne, 0xe511, s1x4_ss) \
+ V(F32x4Lt, 0xe512, s1x4_ss) \
+ V(F32x4Le, 0xe513, s1x4_ss) \
+ V(F32x4Gt, 0xe514, s1x4_ss) \
+ V(F32x4Ge, 0xe515, s1x4_ss) \
+ V(F32x4SConvertI32x4, 0xe519, s_s) \
+ V(F32x4UConvertI32x4, 0xe51a, s_s) \
V(I32x4Splat, 0xe51b, s_i) \
V(I32x4Neg, 0xe51e, s_s) \
V(I32x4Add, 0xe51f, s_ss) \
V(I32x4Sub, 0xe520, s_ss) \
V(I32x4Mul, 0xe521, s_ss) \
- V(I32x4Min_s, 0xe522, s_ss) \
- V(I32x4Max_s, 0xe523, s_ss) \
- V(I32x4Shl, 0xe524, s_si) \
- V(I32x4Shr_s, 0xe525, s_si) \
- V(I32x4Eq, 0xe526, s_ss) \
- V(I32x4Ne, 0xe527, s_ss) \
- V(I32x4Lt_s, 0xe528, s_ss) \
- V(I32x4Le_s, 0xe529, s_ss) \
- V(I32x4Gt_s, 0xe52a, s_ss) \
- V(I32x4Ge_s, 0xe52b, s_ss) \
- V(I32x4Select, 0xe52c, s_sss) \
- V(I32x4Swizzle, 0xe52d, s_s) \
- V(I32x4Shuffle, 0xe52e, s_ss) \
- V(I32x4FromFloat32x4, 0xe52f, s_s) \
- V(I32x4Min_u, 0xe530, s_ss) \
- V(I32x4Max_u, 0xe531, s_ss) \
- V(I32x4Shr_u, 0xe532, s_ss) \
- V(I32x4Lt_u, 0xe533, s_ss) \
- V(I32x4Le_u, 0xe534, s_ss) \
- V(I32x4Gt_u, 0xe535, s_ss) \
- V(I32x4Ge_u, 0xe536, s_ss) \
- V(Ui32x4FromFloat32x4, 0xe537, s_s) \
+ V(I32x4MinS, 0xe522, s_ss) \
+ V(I32x4MaxS, 0xe523, s_ss) \
+ V(I32x4Eq, 0xe526, s1x4_ss) \
+ V(I32x4Ne, 0xe527, s1x4_ss) \
+ V(I32x4LtS, 0xe528, s1x4_ss) \
+ V(I32x4LeS, 0xe529, s1x4_ss) \
+ V(I32x4GtS, 0xe52a, s1x4_ss) \
+ V(I32x4GeS, 0xe52b, s1x4_ss) \
+ V(I32x4SConvertF32x4, 0xe52f, s_s) \
+ V(I32x4MinU, 0xe530, s_ss) \
+ V(I32x4MaxU, 0xe531, s_ss) \
+ V(I32x4LtU, 0xe533, s1x4_ss) \
+ V(I32x4LeU, 0xe534, s1x4_ss) \
+ V(I32x4GtU, 0xe535, s1x4_ss) \
+ V(I32x4GeU, 0xe536, s1x4_ss) \
+ V(I32x4UConvertF32x4, 0xe537, s_s) \
V(I16x8Splat, 0xe538, s_i) \
V(I16x8Neg, 0xe53b, s_s) \
V(I16x8Add, 0xe53c, s_ss) \
- V(I16x8AddSaturate_s, 0xe53d, s_ss) \
+ V(I16x8AddSaturateS, 0xe53d, s_ss) \
V(I16x8Sub, 0xe53e, s_ss) \
- V(I16x8SubSaturate_s, 0xe53f, s_ss) \
+ V(I16x8SubSaturateS, 0xe53f, s_ss) \
V(I16x8Mul, 0xe540, s_ss) \
- V(I16x8Min_s, 0xe541, s_ss) \
- V(I16x8Max_s, 0xe542, s_ss) \
- V(I16x8Shl, 0xe543, s_si) \
- V(I16x8Shr_s, 0xe544, s_si) \
- V(I16x8Eq, 0xe545, s_ss) \
- V(I16x8Ne, 0xe546, s_ss) \
- V(I16x8Lt_s, 0xe547, s_ss) \
- V(I16x8Le_s, 0xe548, s_ss) \
- V(I16x8Gt_s, 0xe549, s_ss) \
- V(I16x8Ge_s, 0xe54a, s_ss) \
- V(I16x8Select, 0xe54b, s_sss) \
- V(I16x8Swizzle, 0xe54c, s_s) \
- V(I16x8Shuffle, 0xe54d, s_ss) \
- V(I16x8AddSaturate_u, 0xe54e, s_ss) \
- V(I16x8SubSaturate_u, 0xe54f, s_ss) \
- V(I16x8Min_u, 0xe550, s_ss) \
- V(I16x8Max_u, 0xe551, s_ss) \
- V(I16x8Shr_u, 0xe552, s_si) \
- V(I16x8Lt_u, 0xe553, s_ss) \
- V(I16x8Le_u, 0xe554, s_ss) \
- V(I16x8Gt_u, 0xe555, s_ss) \
- V(I16x8Ge_u, 0xe556, s_ss) \
+ V(I16x8MinS, 0xe541, s_ss) \
+ V(I16x8MaxS, 0xe542, s_ss) \
+ V(I16x8Eq, 0xe545, s1x8_ss) \
+ V(I16x8Ne, 0xe546, s1x8_ss) \
+ V(I16x8LtS, 0xe547, s1x8_ss) \
+ V(I16x8LeS, 0xe548, s1x8_ss) \
+ V(I16x8GtS, 0xe549, s1x8_ss) \
+ V(I16x8GeS, 0xe54a, s1x8_ss) \
+ V(I16x8AddSaturateU, 0xe54e, s_ss) \
+ V(I16x8SubSaturateU, 0xe54f, s_ss) \
+ V(I16x8MinU, 0xe550, s_ss) \
+ V(I16x8MaxU, 0xe551, s_ss) \
+ V(I16x8LtU, 0xe553, s1x8_ss) \
+ V(I16x8LeU, 0xe554, s1x8_ss) \
+ V(I16x8GtU, 0xe555, s1x8_ss) \
+ V(I16x8GeU, 0xe556, s1x8_ss) \
V(I8x16Splat, 0xe557, s_i) \
V(I8x16Neg, 0xe55a, s_s) \
V(I8x16Add, 0xe55b, s_ss) \
- V(I8x16AddSaturate_s, 0xe55c, s_ss) \
+ V(I8x16AddSaturateS, 0xe55c, s_ss) \
V(I8x16Sub, 0xe55d, s_ss) \
- V(I8x16SubSaturate_s, 0xe55e, s_ss) \
+ V(I8x16SubSaturateS, 0xe55e, s_ss) \
V(I8x16Mul, 0xe55f, s_ss) \
- V(I8x16Min_s, 0xe560, s_ss) \
- V(I8x16Max_s, 0xe561, s_ss) \
- V(I8x16Shl, 0xe562, s_si) \
- V(I8x16Shr_s, 0xe563, s_si) \
- V(I8x16Eq, 0xe564, s_ss) \
- V(I8x16Neq, 0xe565, s_ss) \
- V(I8x16Lt_s, 0xe566, s_ss) \
- V(I8x16Le_s, 0xe567, s_ss) \
- V(I8x16Gt_s, 0xe568, s_ss) \
- V(I8x16Ge_s, 0xe569, s_ss) \
- V(I8x16Select, 0xe56a, s_sss) \
- V(I8x16Swizzle, 0xe56b, s_s) \
- V(I8x16Shuffle, 0xe56c, s_ss) \
- V(I8x16AddSaturate_u, 0xe56d, s_ss) \
- V(I8x16Sub_saturate_u, 0xe56e, s_ss) \
- V(I8x16Min_u, 0xe56f, s_ss) \
- V(I8x16Max_u, 0xe570, s_ss) \
- V(I8x16Shr_u, 0xe571, s_ss) \
- V(I8x16Lt_u, 0xe572, s_ss) \
- V(I8x16Le_u, 0xe573, s_ss) \
- V(I8x16Gt_u, 0xe574, s_ss) \
- V(I8x16Ge_u, 0xe575, s_ss) \
+ V(I8x16MinS, 0xe560, s_ss) \
+ V(I8x16MaxS, 0xe561, s_ss) \
+ V(I8x16Eq, 0xe564, s1x16_ss) \
+ V(I8x16Ne, 0xe565, s1x16_ss) \
+ V(I8x16LtS, 0xe566, s1x16_ss) \
+ V(I8x16LeS, 0xe567, s1x16_ss) \
+ V(I8x16GtS, 0xe568, s1x16_ss) \
+ V(I8x16GeS, 0xe569, s1x16_ss) \
+ V(I8x16AddSaturateU, 0xe56d, s_ss) \
+ V(I8x16SubSaturateU, 0xe56e, s_ss) \
+ V(I8x16MinU, 0xe56f, s_ss) \
+ V(I8x16MaxU, 0xe570, s_ss) \
+ V(I8x16LtU, 0xe572, s1x16_ss) \
+ V(I8x16LeU, 0xe573, s1x16_ss) \
+ V(I8x16GtU, 0xe574, s1x16_ss) \
+ V(I8x16GeU, 0xe575, s1x16_ss) \
V(S128And, 0xe576, s_ss) \
- V(S128Ior, 0xe577, s_ss) \
+ V(S128Or, 0xe577, s_ss) \
V(S128Xor, 0xe578, s_ss) \
V(S128Not, 0xe579, s_s) \
- V(S32x4Select, 0xe580, s_sss) \
- V(S32x4Swizzle, 0xe581, s_s) \
- V(S32x4Shuffle, 0xe582, s_ss)
+ V(S32x4Select, 0xe52c, s_s1x4ss) \
+ V(S32x4Swizzle, 0xe52d, s_s) \
+ V(S32x4Shuffle, 0xe52e, s_ss) \
+ V(S16x8Select, 0xe54b, s_s1x8ss) \
+ V(S16x8Swizzle, 0xe54c, s_s) \
+ V(S16x8Shuffle, 0xe54d, s_ss) \
+ V(S8x16Select, 0xe56a, s_s1x16ss) \
+ V(S8x16Swizzle, 0xe56b, s_s) \
+ V(S8x16Shuffle, 0xe56c, s_ss)
#define FOREACH_SIMD_1_OPERAND_OPCODE(V) \
V(F32x4ExtractLane, 0xe501, _) \
V(F32x4ReplaceLane, 0xe502, _) \
V(I32x4ExtractLane, 0xe51c, _) \
V(I32x4ReplaceLane, 0xe51d, _) \
+ V(I32x4Shl, 0xe524, _) \
+ V(I32x4ShrS, 0xe525, _) \
+ V(I32x4ShrU, 0xe532, _) \
V(I16x8ExtractLane, 0xe539, _) \
V(I16x8ReplaceLane, 0xe53a, _) \
+ V(I16x8Shl, 0xe543, _) \
+ V(I16x8ShrS, 0xe544, _) \
+ V(I16x8ShrU, 0xe552, _) \
V(I8x16ExtractLane, 0xe558, _) \
- V(I8x16ReplaceLane, 0xe559, _)
+ V(I8x16ReplaceLane, 0xe559, _) \
+ V(I8x16Shl, 0xe562, _) \
+ V(I8x16ShrS, 0xe563, _) \
+ V(I8x16ShrU, 0xe571, _)
#define FOREACH_ATOMIC_OPCODE(V) \
V(I32AtomicAdd8S, 0xe601, i_ii) \
V(I32AtomicAdd8U, 0xe602, i_ii) \
V(I32AtomicAdd16S, 0xe603, i_ii) \
V(I32AtomicAdd16U, 0xe604, i_ii) \
- V(I32AtomicAdd32, 0xe605, i_ii) \
+ V(I32AtomicAdd, 0xe605, i_ii) \
V(I32AtomicAnd8S, 0xe606, i_ii) \
V(I32AtomicAnd8U, 0xe607, i_ii) \
V(I32AtomicAnd16S, 0xe608, i_ii) \
V(I32AtomicAnd16U, 0xe609, i_ii) \
- V(I32AtomicAnd32, 0xe60a, i_ii) \
+ V(I32AtomicAnd, 0xe60a, i_ii) \
V(I32AtomicCompareExchange8S, 0xe60b, i_ii) \
V(I32AtomicCompareExchange8U, 0xe60c, i_ii) \
V(I32AtomicCompareExchange16S, 0xe60d, i_ii) \
V(I32AtomicCompareExchange16U, 0xe60e, i_ii) \
- V(I32AtomicCompareExchange32, 0xe60f, i_ii) \
+ V(I32AtomicCompareExchange, 0xe60f, i_ii) \
V(I32AtomicExchange8S, 0xe610, i_ii) \
V(I32AtomicExchange8U, 0xe611, i_ii) \
V(I32AtomicExchange16S, 0xe612, i_ii) \
V(I32AtomicExchange16U, 0xe613, i_ii) \
- V(I32AtomicExchange32, 0xe614, i_ii) \
+ V(I32AtomicExchange, 0xe614, i_ii) \
V(I32AtomicOr8S, 0xe615, i_ii) \
V(I32AtomicOr8U, 0xe616, i_ii) \
V(I32AtomicOr16S, 0xe617, i_ii) \
V(I32AtomicOr16U, 0xe618, i_ii) \
- V(I32AtomicOr32, 0xe619, i_ii) \
+ V(I32AtomicOr, 0xe619, i_ii) \
V(I32AtomicSub8S, 0xe61a, i_ii) \
V(I32AtomicSub8U, 0xe61b, i_ii) \
V(I32AtomicSub16S, 0xe61c, i_ii) \
V(I32AtomicSub16U, 0xe61d, i_ii) \
- V(I32AtomicSub32, 0xe61e, i_ii) \
+ V(I32AtomicSub, 0xe61e, i_ii) \
V(I32AtomicXor8S, 0xe61f, i_ii) \
V(I32AtomicXor8U, 0xe620, i_ii) \
V(I32AtomicXor16S, 0xe621, i_ii) \
V(I32AtomicXor16U, 0xe622, i_ii) \
- V(I32AtomicXor32, 0xe623, i_ii)
+ V(I32AtomicXor, 0xe623, i_ii)
// All opcodes.
#define FOREACH_OPCODE(V) \
@@ -480,13 +483,19 @@ const WasmCodePosition kNoCodePosition = -1;
V(f_if, kWasmF32, kWasmI32, kWasmF32) \
V(l_il, kWasmI64, kWasmI32, kWasmI64)
-#define FOREACH_SIMD_SIGNATURE(V) \
- V(s_s, kWasmS128, kWasmS128) \
- V(s_f, kWasmS128, kWasmF32) \
- V(s_ss, kWasmS128, kWasmS128, kWasmS128) \
- V(s_sss, kWasmS128, kWasmS128, kWasmS128, kWasmS128) \
- V(s_i, kWasmS128, kWasmI32) \
- V(s_si, kWasmS128, kWasmS128, kWasmI32)
+#define FOREACH_SIMD_SIGNATURE(V) \
+ V(s_s, kWasmS128, kWasmS128) \
+ V(s_f, kWasmS128, kWasmF32) \
+ V(s_ss, kWasmS128, kWasmS128, kWasmS128) \
+ V(s1x4_ss, kWasmS1x4, kWasmS128, kWasmS128) \
+ V(s1x8_ss, kWasmS1x8, kWasmS128, kWasmS128) \
+ V(s1x16_ss, kWasmS1x16, kWasmS128, kWasmS128) \
+ V(s_i, kWasmS128, kWasmI32) \
+ V(s_si, kWasmS128, kWasmS128, kWasmI32) \
+ V(i_s, kWasmI32, kWasmS128) \
+ V(s_s1x4ss, kWasmS128, kWasmS1x4, kWasmS128, kWasmS128) \
+ V(s_s1x8ss, kWasmS128, kWasmS1x8, kWasmS128, kWasmS128) \
+ V(s_s1x16ss, kWasmS128, kWasmS1x16, kWasmS128, kWasmS128)
#define FOREACH_PREFIX(V) \
V(Simd, 0xe5) \
@@ -524,7 +533,6 @@ enum TrapReason {
class V8_EXPORT_PRIVATE WasmOpcodes {
public:
static const char* OpcodeName(WasmOpcode opcode);
- static const char* ShortOpcodeName(WasmOpcode opcode);
static FunctionSig* Signature(WasmOpcode opcode);
static FunctionSig* AsmjsSignature(WasmOpcode opcode);
static FunctionSig* AtomicSignature(WasmOpcode opcode);
@@ -551,6 +559,12 @@ class V8_EXPORT_PRIVATE WasmOpcodes {
return kLocalF64;
case kWasmS128:
return kLocalS128;
+ case kWasmS1x4:
+ return kLocalS1x4;
+ case kWasmS1x8:
+ return kLocalS1x8;
+ case kWasmS1x16:
+ return kLocalS1x16;
case kWasmStmt:
return kLocalVoid;
default:
@@ -571,6 +585,12 @@ class V8_EXPORT_PRIVATE WasmOpcodes {
return MachineType::Float64();
case kWasmS128:
return MachineType::Simd128();
+ case kWasmS1x4:
+ return MachineType::Simd1x4();
+ case kWasmS1x8:
+ return MachineType::Simd1x8();
+ case kWasmS1x16:
+ return MachineType::Simd1x16();
case kWasmStmt:
return MachineType::None();
default:
@@ -602,6 +622,12 @@ class V8_EXPORT_PRIVATE WasmOpcodes {
return kWasmF64;
} else if (type == MachineType::Simd128()) {
return kWasmS128;
+ } else if (type == MachineType::Simd1x4()) {
+ return kWasmS1x4;
+ } else if (type == MachineType::Simd1x8()) {
+ return kWasmS1x8;
+ } else if (type == MachineType::Simd1x16()) {
+ return kWasmS1x16;
} else {
UNREACHABLE();
return kWasmI32;
@@ -646,6 +672,9 @@ class V8_EXPORT_PRIVATE WasmOpcodes {
case kWasmF64:
return 'd';
case kWasmS128:
+ case kWasmS1x4:
+ case kWasmS1x8:
+ case kWasmS1x16:
return 's';
case kWasmStmt:
return 'v';
@@ -668,6 +697,12 @@ class V8_EXPORT_PRIVATE WasmOpcodes {
return "f64";
case kWasmS128:
return "s128";
+ case kWasmS1x4:
+ return "s1x4";
+ case kWasmS1x8:
+ return "s1x8";
+ case kWasmS1x16:
+ return "s1x16";
case kWasmStmt:
return "<stmt>";
case kWasmVar:
diff --git a/deps/v8/src/wasm/wasm-text.cc b/deps/v8/src/wasm/wasm-text.cc
index 1878095b09..9ad86fbb14 100644
--- a/deps/v8/src/wasm/wasm-text.cc
+++ b/deps/v8/src/wasm/wasm-text.cc
@@ -5,8 +5,10 @@
#include "src/wasm/wasm-text.h"
#include "src/debug/interface-types.h"
+#include "src/objects-inl.h"
#include "src/ostreams.h"
#include "src/vector.h"
+#include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
@@ -17,106 +19,6 @@ using namespace v8::internal;
using namespace v8::internal::wasm;
namespace {
-const char *GetOpName(WasmOpcode opcode) {
-#define CASE_OP(name, str) \
- case kExpr##name: \
- return str;
-#define CASE_I32_OP(name, str) CASE_OP(I32##name, "i32." str)
-#define CASE_I64_OP(name, str) CASE_OP(I64##name, "i64." str)
-#define CASE_F32_OP(name, str) CASE_OP(F32##name, "f32." str)
-#define CASE_F64_OP(name, str) CASE_OP(F64##name, "f64." str)
-#define CASE_INT_OP(name, str) CASE_I32_OP(name, str) CASE_I64_OP(name, str)
-#define CASE_FLOAT_OP(name, str) CASE_F32_OP(name, str) CASE_F64_OP(name, str)
-#define CASE_ALL_OP(name, str) CASE_FLOAT_OP(name, str) CASE_INT_OP(name, str)
-#define CASE_SIGN_OP(TYPE, name, str) \
- CASE_##TYPE##_OP(name##S, str "_s") CASE_##TYPE##_OP(name##U, str "_u")
-#define CASE_ALL_SIGN_OP(name, str) \
- CASE_FLOAT_OP(name, str) CASE_SIGN_OP(INT, name, str)
-#define CASE_CONVERT_OP(name, RES, SRC, src_suffix, str) \
- CASE_##RES##_OP(U##name##SRC, str "_u/" src_suffix) \
- CASE_##RES##_OP(S##name##SRC, str "_s/" src_suffix)
-
- switch (opcode) {
- CASE_INT_OP(Eqz, "eqz")
- CASE_ALL_OP(Eq, "eq")
- CASE_ALL_OP(Ne, "ne")
- CASE_ALL_OP(Add, "add")
- CASE_ALL_OP(Sub, "sub")
- CASE_ALL_OP(Mul, "mul")
- CASE_ALL_SIGN_OP(Lt, "lt")
- CASE_ALL_SIGN_OP(Gt, "gt")
- CASE_ALL_SIGN_OP(Le, "le")
- CASE_ALL_SIGN_OP(Ge, "ge")
- CASE_INT_OP(Clz, "clz")
- CASE_INT_OP(Ctz, "ctz")
- CASE_INT_OP(Popcnt, "popcnt")
- CASE_ALL_SIGN_OP(Div, "div")
- CASE_SIGN_OP(INT, Rem, "rem")
- CASE_INT_OP(And, "and")
- CASE_INT_OP(Ior, "or")
- CASE_INT_OP(Xor, "xor")
- CASE_INT_OP(Shl, "shl")
- CASE_SIGN_OP(INT, Shr, "shr")
- CASE_INT_OP(Rol, "rol")
- CASE_INT_OP(Ror, "ror")
- CASE_FLOAT_OP(Abs, "abs")
- CASE_FLOAT_OP(Neg, "neg")
- CASE_FLOAT_OP(Ceil, "ceil")
- CASE_FLOAT_OP(Floor, "floor")
- CASE_FLOAT_OP(Trunc, "trunc")
- CASE_FLOAT_OP(NearestInt, "nearest")
- CASE_FLOAT_OP(Sqrt, "sqrt")
- CASE_FLOAT_OP(Min, "min")
- CASE_FLOAT_OP(Max, "max")
- CASE_FLOAT_OP(CopySign, "copysign")
- CASE_I32_OP(ConvertI64, "wrap/i64")
- CASE_CONVERT_OP(Convert, INT, F32, "f32", "trunc")
- CASE_CONVERT_OP(Convert, INT, F64, "f64", "trunc")
- CASE_CONVERT_OP(Convert, I64, I32, "i32", "extend")
- CASE_CONVERT_OP(Convert, F32, I32, "i32", "convert")
- CASE_CONVERT_OP(Convert, F32, I64, "i64", "convert")
- CASE_F32_OP(ConvertF64, "demote/f64")
- CASE_CONVERT_OP(Convert, F64, I32, "i32", "convert")
- CASE_CONVERT_OP(Convert, F64, I64, "i64", "convert")
- CASE_F64_OP(ConvertF32, "promote/f32")
- CASE_I32_OP(ReinterpretF32, "reinterpret/f32")
- CASE_I64_OP(ReinterpretF64, "reinterpret/f64")
- CASE_F32_OP(ReinterpretI32, "reinterpret/i32")
- CASE_F64_OP(ReinterpretI64, "reinterpret/i64")
- CASE_OP(Unreachable, "unreachable")
- CASE_OP(Nop, "nop")
- CASE_OP(Return, "return")
- CASE_OP(MemorySize, "current_memory")
- CASE_OP(GrowMemory, "grow_memory")
- CASE_OP(Loop, "loop")
- CASE_OP(If, "if")
- CASE_OP(Block, "block")
- CASE_OP(Try, "try")
- CASE_OP(Throw, "throw")
- CASE_OP(Catch, "catch")
- CASE_OP(Drop, "drop")
- CASE_OP(Select, "select")
- CASE_ALL_OP(LoadMem, "load")
- CASE_SIGN_OP(INT, LoadMem8, "load8")
- CASE_SIGN_OP(INT, LoadMem16, "load16")
- CASE_SIGN_OP(I64, LoadMem32, "load32")
- CASE_ALL_OP(StoreMem, "store")
- CASE_INT_OP(StoreMem8, "store8")
- CASE_INT_OP(StoreMem16, "store16")
- CASE_I64_OP(StoreMem32, "store32")
- CASE_OP(SetLocal, "set_local")
- CASE_OP(GetLocal, "get_local")
- CASE_OP(TeeLocal, "tee_local")
- CASE_OP(GetGlobal, "get_global")
- CASE_OP(SetGlobal, "set_global")
- CASE_OP(Br, "br")
- CASE_OP(BrIf, "br_if")
- default:
- UNREACHABLE();
- return "";
- }
-}
-
bool IsValidFunctionName(const Vector<const char> &name) {
if (name.is_empty()) return false;
const char *special_chars = "_.+-*/\\^~=<>!?@#$%&|:'`";
@@ -169,8 +71,7 @@ void wasm::PrintWasmText(const WasmModule *module,
// Print the local declarations.
BodyLocalDecls decls(&zone);
- Vector<const byte> func_bytes = wire_bytes.module_bytes.SubVector(
- fun->code_start_offset, fun->code_end_offset);
+ Vector<const byte> func_bytes = wire_bytes.GetFunctionBytes(fun);
BytecodeIterator i(func_bytes.begin(), func_bytes.end(), &decls);
DCHECK_LT(func_bytes.begin(), i.pc());
if (!decls.type_list.empty()) {
@@ -190,8 +91,7 @@ void wasm::PrintWasmText(const WasmModule *module,
const int kMaxIndentation = 64;
int indentation = std::min(kMaxIndentation, 2 * control_depth);
if (offset_table) {
- offset_table->push_back(debug::WasmDisassemblyOffsetTableEntry(
- i.pc_offset(), line_nr, indentation));
+ offset_table->emplace_back(i.pc_offset(), line_nr, indentation);
}
// 64 whitespaces
@@ -205,7 +105,7 @@ void wasm::PrintWasmText(const WasmModule *module,
case kExprBlock:
case kExprTry: {
BlockTypeOperand operand(&i, i.pc());
- os << GetOpName(opcode);
+ os << WasmOpcodes::OpcodeName(opcode);
for (unsigned i = 0; i < operand.arity; i++) {
os << " " << WasmOpcodes::TypeName(operand.read_entry(i));
}
@@ -215,7 +115,7 @@ void wasm::PrintWasmText(const WasmModule *module,
case kExprBr:
case kExprBrIf: {
BreakDepthOperand operand(&i, i.pc());
- os << GetOpName(opcode) << ' ' << operand.depth;
+ os << WasmOpcodes::OpcodeName(opcode) << ' ' << operand.depth;
break;
}
case kExprElse:
@@ -248,13 +148,13 @@ void wasm::PrintWasmText(const WasmModule *module,
case kExprTeeLocal:
case kExprCatch: {
LocalIndexOperand operand(&i, i.pc());
- os << GetOpName(opcode) << ' ' << operand.index;
+ os << WasmOpcodes::OpcodeName(opcode) << ' ' << operand.index;
break;
}
case kExprGetGlobal:
case kExprSetGlobal: {
GlobalIndexOperand operand(&i, i.pc());
- os << GetOpName(opcode) << ' ' << operand.index;
+ os << WasmOpcodes::OpcodeName(opcode) << ' ' << operand.index;
break;
}
#define CASE_CONST(type, str, cast_type) \
@@ -272,7 +172,7 @@ void wasm::PrintWasmText(const WasmModule *module,
FOREACH_LOAD_MEM_OPCODE(CASE_OPCODE)
FOREACH_STORE_MEM_OPCODE(CASE_OPCODE) {
MemoryAccessOperand operand(&i, i.pc(), kMaxUInt32);
- os << GetOpName(opcode) << " offset=" << operand.offset
+ os << WasmOpcodes::OpcodeName(opcode) << " offset=" << operand.offset
<< " align=" << (1ULL << operand.alignment);
break;
}
@@ -286,7 +186,7 @@ void wasm::PrintWasmText(const WasmModule *module,
case kExprDrop:
case kExprSelect:
case kExprThrow:
- os << GetOpName(opcode);
+ os << WasmOpcodes::OpcodeName(opcode);
break;
// This group is just printed by their internal opcode name, as they
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index fb33872627..2483bbd797 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -9,6 +9,7 @@
#include "src/base/cpu.h"
#include "src/debug/debug.h"
+#include "src/objects-inl.h"
#include "src/v8memory.h"
namespace v8 {
@@ -23,8 +24,6 @@ bool CpuFeatures::SupportsSimd128() { return true; }
static const byte kCallOpcode = 0xE8;
-// The length of pushq(rbp), movp(rbp, rsp), Push(rsi) and Push(rdi).
-static const int kNoCodeAgeSequenceLength = kPointerSize == kInt64Size ? 6 : 17;
void Assembler::emitl(uint32_t x) {
@@ -83,6 +82,12 @@ void Assembler::emit_runtime_entry(Address entry, RelocInfo::Mode rmode) {
entry - isolate()->heap()->memory_allocator()->code_range()->start()));
}
+void Assembler::emit(Immediate x) {
+ if (!RelocInfo::IsNone(x.rmode_)) {
+ RecordRelocInfo(x.rmode_);
+ }
+ emitl(x.value_);
+}
void Assembler::emit_rex_64(Register reg, Register rm_reg) {
emit(0x48 | reg.high_bit() << 2 | rm_reg.high_bit());
@@ -281,6 +286,17 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
}
}
+Address Assembler::target_address_at(Address pc, Code* code) {
+ Address constant_pool = code ? code->constant_pool() : NULL;
+ return target_address_at(pc, constant_pool);
+}
+
+void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
+ Address target,
+ ICacheFlushMode icache_flush_mode) {
+ Address constant_pool = code ? code->constant_pool() : NULL;
+ set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
+}
void Assembler::deserialization_set_target_internal_reference_at(
Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
@@ -292,6 +308,10 @@ Address Assembler::target_address_from_return_address(Address pc) {
return pc - kCallTargetAddressOffset;
}
+void Assembler::deserialization_set_special_target_at(
+ Isolate* isolate, Address instruction_payload, Code* code, Address target) {
+ set_target_address_at(isolate, instruction_payload, code, target);
+}
Handle<Object> Assembler::code_target_object_handle_at(Address pc) {
return code_targets_[Memory::int32_at(pc)];
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index f8162b096a..9c3a9cd6f4 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -15,7 +15,9 @@
#include <sys/sysctl.h>
#endif
+#include "src/assembler-inl.h"
#include "src/base/bits.h"
+#include "src/base/cpu.h"
#include "src/macro-assembler.h"
#include "src/v8.h"
@@ -2173,7 +2175,12 @@ void Assembler::emit_test(const Operand& op, Register reg, int size) {
bool byte_operand = size == sizeof(int8_t);
if (byte_operand) {
size = sizeof(int32_t);
- if (!reg.is_byte_register()) emit_rex_32(reg, op);
+ if (!reg.is_byte_register()) {
+ // Register is not one of al, bl, cl, dl. Its encoding needs REX.
+ emit_rex_32(reg, op);
+ } else {
+ emit_optional_rex_32(reg, op);
+ }
} else {
emit_rex(reg, op, size);
}
@@ -3031,7 +3038,7 @@ void Assembler::movaps(XMMRegister dst, XMMRegister src) {
void Assembler::shufps(XMMRegister dst, XMMRegister src, byte imm8) {
DCHECK(is_uint8(imm8));
EnsureSpace ensure_space(this);
- emit_optional_rex_32(src, dst);
+ emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0xC6);
emit_sse_operand(dst, src);
@@ -4664,6 +4671,14 @@ void Assembler::emit_sse_operand(XMMRegister dst) {
emit(0xD8 | dst.low_bits());
}
+void Assembler::RecordProtectedInstructionLanding(int pc_offset) {
+ EnsureSpace ensure_space(this);
+ RelocInfo rinfo(isolate(), pc(),
+ RelocInfo::WASM_PROTECTED_INSTRUCTION_LANDING, pc_offset,
+ nullptr);
+ reloc_info_writer.Write(&rinfo);
+}
+
void Assembler::db(uint8_t data) {
EnsureSpace ensure_space(this);
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 08c621c938..07d8c25787 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -79,6 +79,8 @@ namespace internal {
V(r14) \
V(r15)
+// The length of pushq(rbp), movp(rbp, rsp), Push(rsi) and Push(rdi).
+static const int kNoCodeAgeSequenceLength = kPointerSize == kInt64Size ? 6 : 17;
// CPU Registers.
//
@@ -203,6 +205,7 @@ const Register arg_reg_4 = {Register::kCode_rcx};
V(xmm14)
static const bool kSimpleFPAliasing = true;
+static const bool kSimdMaskRegisters = false;
struct XMMRegister {
enum Code {
@@ -503,17 +506,10 @@ class Assembler : public AssemblerBase {
static inline void set_target_address_at(
Isolate* isolate, Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
- static inline Address target_address_at(Address pc, Code* code) {
- Address constant_pool = code ? code->constant_pool() : NULL;
- return target_address_at(pc, constant_pool);
- }
+ static inline Address target_address_at(Address pc, Code* code);
static inline void set_target_address_at(
Isolate* isolate, Address pc, Code* code, Address target,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) {
- Address constant_pool = code ? code->constant_pool() : NULL;
- set_target_address_at(isolate, pc, constant_pool, target,
- icache_flush_mode);
- }
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
@@ -523,9 +519,7 @@ class Assembler : public AssemblerBase {
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
Isolate* isolate, Address instruction_payload, Code* code,
- Address target) {
- set_target_address_at(isolate, instruction_payload, code, target);
- }
+ Address target);
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
@@ -2000,6 +1994,8 @@ class Assembler : public AssemblerBase {
UNREACHABLE();
}
+ void RecordProtectedInstructionLanding(int pc_offset);
+
// Writes a single word of data in the code stream.
// Used for inline tables, e.g., jump-tables.
void db(uint8_t data);
@@ -2055,12 +2051,7 @@ class Assembler : public AssemblerBase {
RelocInfo::Mode rmode,
TypeFeedbackId ast_id = TypeFeedbackId::None());
inline void emit_runtime_entry(Address entry, RelocInfo::Mode rmode);
- void emit(Immediate x) {
- if (!RelocInfo::IsNone(x.rmode_)) {
- RecordRelocInfo(x.rmode_);
- }
- emitl(x.value_);
- }
+ inline void emit(Immediate x);
// Emits a REX prefix that encodes a 64-bit operand size and
// the top bit of both register codes.
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index 08ef5a0ce2..7b57c2cd19 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -4,18 +4,24 @@
#if V8_TARGET_ARCH_X64
-#include "src/code-stubs.h"
#include "src/api-arguments.h"
#include "src/bootstrapper.h"
+#include "src/code-stubs.h"
#include "src/codegen.h"
+#include "src/counters.h"
+#include "src/double.h"
+#include "src/heap/heap-inl.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/isolate.h"
+#include "src/objects-inl.h"
+#include "src/objects/regexp-match-info.h"
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/runtime/runtime.h"
-#include "src/x64/code-stubs-x64.h"
+
+#include "src/x64/code-stubs-x64.h" // Cannot be the first include.
namespace v8 {
namespace internal {
@@ -344,55 +350,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ ret(0);
}
-
-void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
- Label miss;
- Register receiver = LoadDescriptor::ReceiverRegister();
- // Ensure that the vector and slot registers won't be clobbered before
- // calling the miss handler.
- DCHECK(!AreAliased(r8, r9, LoadWithVectorDescriptor::VectorRegister(),
- LoadDescriptor::SlotRegister()));
-
- NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r8,
- r9, &miss);
- __ bind(&miss);
- PropertyAccessCompiler::TailCallBuiltin(
- masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
-}
-
-
-void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
- // Return address is on the stack.
- Label miss;
-
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register index = LoadDescriptor::NameRegister();
- Register scratch = rdi;
- Register result = rax;
- DCHECK(!scratch.is(receiver) && !scratch.is(index));
- DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
- result.is(LoadDescriptor::SlotRegister()));
-
- // StringCharAtGenerator doesn't use the result register until it's passed
- // the different miss possibilities. If it did, we would have a conflict
- // when FLAG_vector_ics is true.
- StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
- &miss, // When not a string.
- &miss, // When not a number.
- &miss, // When index out of range.
- RECEIVER_IS_STRING);
- char_at_generator.GenerateFast(masm);
- __ ret(0);
-
- StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
-
- __ bind(&miss);
- PropertyAccessCompiler::TailCallBuiltin(
- masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
-}
-
-
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
@@ -484,7 +441,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// (8) Is the external string one byte? If yes, go to (5).
// (9) Two byte sequential. Load regexp code for two byte. Go to (E).
// (10) Short external string or not a string? If yes, bail out to runtime.
- // (11) Sliced string. Replace subject with parent. Go to (1).
+ // (11) Sliced or thin string. Replace subject with parent. Go to (1).
Label seq_one_byte_string /* 5 */, seq_two_byte_string /* 9 */,
external_string /* 7 */, check_underlying /* 1 */,
@@ -514,6 +471,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// have already been covered.
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+ STATIC_ASSERT(kThinStringTag > kExternalStringTag);
STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
__ cmpp(rbx, Immediate(kExternalStringTag));
@@ -802,11 +760,18 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ testb(rbx, Immediate(kIsNotStringMask | kShortExternalStringMask));
__ j(not_zero, &runtime);
- // (11) Sliced string. Replace subject with parent. Go to (1).
+ // (11) Sliced or thin string. Replace subject with parent. Go to (1).
+ Label thin_string;
+ __ cmpl(rbx, Immediate(kThinStringTag));
+ __ j(equal, &thin_string, Label::kNear);
// Load offset into r14 and replace subject string with parent.
__ SmiToInteger32(r14, FieldOperand(rdi, SlicedString::kOffsetOffset));
__ movp(rdi, FieldOperand(rdi, SlicedString::kParentOffset));
__ jmp(&check_underlying);
+
+ __ bind(&thin_string);
+ __ movp(rdi, FieldOperand(rdi, ThinString::kActualOffset));
+ __ jmp(&check_underlying);
#endif // V8_INTERPRETED_REGEXP
}
@@ -907,9 +872,6 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmpb(rcx, Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
__ j(equal, &runtime_call, Label::kFar);
- // Call runtime on identical SIMD values since we must throw a TypeError.
- __ cmpb(rcx, Immediate(static_cast<uint8_t>(SIMD128_VALUE_TYPE)));
- __ j(equal, &runtime_call, Label::kFar);
}
__ Set(rax, EQUAL);
__ ret(0);
@@ -1287,201 +1249,6 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
-static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
- Register slot) {
- __ SmiAddConstant(FieldOperand(feedback_vector, slot, times_pointer_size,
- FixedArray::kHeaderSize + kPointerSize),
- Smi::FromInt(1));
-}
-
-void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
- // rdi - function
- // rdx - slot id
- // rbx - vector
- // rcx - allocation site (loaded from vector[slot]).
- __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r8);
- __ cmpp(rdi, r8);
- __ j(not_equal, miss);
-
- // Increment the call count for monomorphic function calls.
- IncrementCallCount(masm, rbx, rdx);
-
- __ movp(rbx, rcx);
- __ movp(rdx, rdi);
- ArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
-}
-
-
-void CallICStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax - number of arguments
- // -- rdi - function
- // -- rdx - slot id
- // -- rbx - vector
- // -----------------------------------
- Isolate* isolate = masm->isolate();
- Label extra_checks_or_miss, call, call_function, call_count_incremented;
-
- // The checks. First, does rdi match the recorded monomorphic target?
- __ SmiToInteger32(rdx, rdx);
- __ movp(rcx,
- FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize));
-
- // We don't know that we have a weak cell. We might have a private symbol
- // or an AllocationSite, but the memory is safe to examine.
- // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
- // FixedArray.
- // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
- // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
- // computed, meaning that it can't appear to be a pointer. If the low bit is
- // 0, then hash is computed, but the 0 bit prevents the field from appearing
- // to be a pointer.
- STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
- STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
- WeakCell::kValueOffset &&
- WeakCell::kValueOffset == Symbol::kHashFieldSlot);
-
- __ cmpp(rdi, FieldOperand(rcx, WeakCell::kValueOffset));
- __ j(not_equal, &extra_checks_or_miss);
-
- // The compare above could have been a SMI/SMI comparison. Guard against this
- // convincing us that we have a monomorphic JSFunction.
- __ JumpIfSmi(rdi, &extra_checks_or_miss);
-
- __ bind(&call_function);
- // Increment the call count for monomorphic function calls.
- IncrementCallCount(masm, rbx, rdx);
-
- __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
- tail_call_mode()),
- RelocInfo::CODE_TARGET);
-
- __ bind(&extra_checks_or_miss);
- Label uninitialized, miss, not_allocation_site;
-
- __ Cmp(rcx, FeedbackVector::MegamorphicSentinel(isolate));
- __ j(equal, &call);
-
- // Check if we have an allocation site.
- __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
- Heap::kAllocationSiteMapRootIndex);
- __ j(not_equal, &not_allocation_site);
-
- // We have an allocation site.
- HandleArrayCase(masm, &miss);
-
- __ bind(&not_allocation_site);
-
- // The following cases attempt to handle MISS cases without going to the
- // runtime.
- if (FLAG_trace_ic) {
- __ jmp(&miss);
- }
-
- __ Cmp(rcx, FeedbackVector::UninitializedSentinel(isolate));
- __ j(equal, &uninitialized);
-
- // We are going megamorphic. If the feedback is a JSFunction, it is fine
- // to handle it here. More complex cases are dealt with in the runtime.
- __ AssertNotSmi(rcx);
- __ CmpObjectType(rcx, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &miss);
- __ Move(FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize),
- FeedbackVector::MegamorphicSentinel(isolate));
-
- __ bind(&call);
-
- // Increment the call count for megamorphic function calls.
- IncrementCallCount(masm, rbx, rdx);
-
- __ bind(&call_count_incremented);
- __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
- RelocInfo::CODE_TARGET);
-
- __ bind(&uninitialized);
-
- // We are going monomorphic, provided we actually have a JSFunction.
- __ JumpIfSmi(rdi, &miss);
-
- // Goto miss case if we do not have a function.
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &miss);
-
- // Make sure the function is not the Array() function, which requires special
- // behavior on MISS.
- __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, rcx);
- __ cmpp(rdi, rcx);
- __ j(equal, &miss);
-
- // Make sure the function belongs to the same native context.
- __ movp(rcx, FieldOperand(rdi, JSFunction::kContextOffset));
- __ movp(rcx, ContextOperand(rcx, Context::NATIVE_CONTEXT_INDEX));
- __ cmpp(rcx, NativeContextOperand());
- __ j(not_equal, &miss);
-
- // Store the function. Use a stub since we need a frame for allocation.
- // rbx - vector
- // rdx - slot (needs to be in smi form)
- // rdi - function
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- CreateWeakCellStub create_stub(isolate);
-
- __ Integer32ToSmi(rax, rax);
- __ Integer32ToSmi(rdx, rdx);
- __ Push(rax);
- __ Push(rbx);
- __ Push(rdx);
- __ Push(rdi);
- __ Push(rsi);
- __ CallStub(&create_stub);
- __ Pop(rsi);
- __ Pop(rdi);
- __ Pop(rdx);
- __ Pop(rbx);
- __ Pop(rax);
- __ SmiToInteger32(rdx, rdx);
- __ SmiToInteger32(rax, rax);
- }
-
- __ jmp(&call_function);
-
- // We are here because tracing is on or we encountered a MISS case we can't
- // handle here.
- __ bind(&miss);
- GenerateMiss(masm);
-
- __ jmp(&call_count_incremented);
-
- // Unreachable
- __ int3();
-}
-
-void CallICStub::GenerateMiss(MacroAssembler* masm) {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve the number of arguments.
- __ Integer32ToSmi(rax, rax);
- __ Push(rax);
-
- // Push the receiver and the function and feedback info.
- __ Integer32ToSmi(rdx, rdx);
- __ Push(rdi);
- __ Push(rbx);
- __ Push(rdx);
-
- // Call the entry.
- __ CallRuntime(Runtime::kCallIC_Miss);
-
- // Move result to edi and exit the internal frame.
- __ movp(rdi, rax);
-
- // Restore number of arguments.
- __ Pop(rax);
- __ SmiToInteger32(rax, rax);
-}
-
bool CEntryStub::NeedsImmovableCode() {
return false;
}
@@ -1692,8 +1459,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ movp(rbp, rsp);
// Push the stack frame type.
- int marker = type();
- __ Push(Smi::FromInt(marker)); // context slot
+ __ Push(Immediate(StackFrame::TypeToMarker(type()))); // context slot
ExternalReference context_address(Isolate::kContextAddress, isolate());
__ Load(kScratchRegister, context_address);
__ Push(kScratchRegister); // context
@@ -1740,13 +1506,13 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Load(rax, js_entry_sp);
__ testp(rax, rax);
__ j(not_zero, &not_outermost_js);
- __ Push(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ __ Push(Immediate(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ movp(rax, rbp);
__ Store(js_entry_sp, rax);
Label cont;
__ jmp(&cont);
__ bind(&not_outermost_js);
- __ Push(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME));
+ __ Push(Immediate(StackFrame::INNER_JSENTRY_FRAME));
__ bind(&cont);
// Jump to a faked try block that does the invoke, with a faked catch
@@ -1791,7 +1557,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ bind(&exit);
// Check if the current stack frame is marked as the outermost JS frame.
__ Pop(rbx);
- __ Cmp(rbx, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ __ cmpp(rbx, Immediate(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ j(not_equal, &not_outermost_js_2);
__ Move(kScratchRegister, js_entry_sp);
__ movp(Operand(kScratchRegister, 0), Immediate(0));
@@ -1928,44 +1694,6 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
}
-
-// -------------------------------------------------------------------------
-// StringCharFromCodeGenerator
-
-void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
- // Fast case of Heap::LookupSingleCharacterStringFromCode.
- __ JumpIfNotSmi(code_, &slow_case_);
- __ SmiCompare(code_, Smi::FromInt(String::kMaxOneByteCharCode));
- __ j(above, &slow_case_);
-
- __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
- SmiIndex index = masm->SmiToIndex(kScratchRegister, code_, kPointerSizeLog2);
- __ movp(result_, FieldOperand(result_, index.reg, index.scale,
- FixedArray::kHeaderSize));
- __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
- __ j(equal, &slow_case_);
- __ bind(&exit_);
-}
-
-
-void StringCharFromCodeGenerator::GenerateSlow(
- MacroAssembler* masm,
- const RuntimeCallHelper& call_helper) {
- __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
-
- __ bind(&slow_case_);
- call_helper.BeforeCall(masm);
- __ Push(code_);
- __ CallRuntime(Runtime::kStringCharFromCode);
- if (!result_.is(rax)) {
- __ movp(result_, rax);
- }
- call_helper.AfterCall(masm);
- __ jmp(&exit_);
-
- __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
-}
-
void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
Register left,
Register right,
@@ -2764,6 +2492,9 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
}
+void RecordWriteStub::Activate(Code* code) {
+ code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+}
void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
@@ -2853,12 +2584,6 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
__ jmp(rcx); // Return to IC Miss stub, continuation still on stack.
}
-void CallICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadFeedbackVector(rbx);
- CallICStub stub(isolate(), state());
- __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
-}
-
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
@@ -3214,531 +2939,6 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
GenerateCase(masm, FAST_ELEMENTS);
}
-void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rdi : function
- // -- rsi : context
- // -- rbp : frame pointer
- // -- rsp[0] : return address
- // -----------------------------------
- __ AssertFunction(rdi);
-
- // Make rdx point to the JavaScript frame.
- __ movp(rdx, rbp);
- if (skip_stub_frame()) {
- // For Ignition we need to skip the handler/stub frame to reach the
- // JavaScript frame for the function.
- __ movp(rdx, Operand(rdx, StandardFrameConstants::kCallerFPOffset));
- }
- if (FLAG_debug_code) {
- Label ok;
- __ cmpp(rdi, Operand(rdx, StandardFrameConstants::kFunctionOffset));
- __ j(equal, &ok);
- __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
- __ bind(&ok);
- }
-
- // Check if we have rest parameters (only possible if we have an
- // arguments adaptor frame below the function frame).
- Label no_rest_parameters;
- __ movp(rbx, Operand(rdx, StandardFrameConstants::kCallerFPOffset));
- __ Cmp(Operand(rbx, CommonFrameConstants::kContextOrFrameTypeOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(not_equal, &no_rest_parameters, Label::kNear);
-
- // Check if the arguments adaptor frame contains more arguments than
- // specified by the function's internal formal parameter count.
- Label rest_parameters;
- __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ LoadSharedFunctionInfoSpecialField(
- rcx, rcx, SharedFunctionInfo::kFormalParameterCountOffset);
- __ SmiToInteger32(
- rax, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ subl(rax, rcx);
- __ j(greater, &rest_parameters);
-
- // Return an empty rest parameter array.
- __ bind(&no_rest_parameters);
- {
- // ----------- S t a t e -------------
- // -- rsi : context
- // -- rsp[0] : return address
- // -----------------------------------
-
- // Allocate an empty rest parameter array.
- Label allocate, done_allocate;
- __ Allocate(JSArray::kSize, rax, rdx, rcx, &allocate, NO_ALLOCATION_FLAGS);
- __ bind(&done_allocate);
-
- // Setup the rest parameter array in rax.
- __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, rcx);
- __ movp(FieldOperand(rax, JSArray::kMapOffset), rcx);
- __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
- __ movp(FieldOperand(rax, JSArray::kPropertiesOffset), rcx);
- __ movp(FieldOperand(rax, JSArray::kElementsOffset), rcx);
- __ movp(FieldOperand(rax, JSArray::kLengthOffset), Immediate(0));
- STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
- __ Ret();
-
- // Fall back to %AllocateInNewSpace.
- __ bind(&allocate);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(Smi::FromInt(JSArray::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- }
- __ jmp(&done_allocate);
- }
-
- __ bind(&rest_parameters);
- {
- // Compute the pointer to the first rest parameter (skippping the receiver).
- __ leap(rbx, Operand(rbx, rax, times_pointer_size,
- StandardFrameConstants::kCallerSPOffset -
- 1 * kPointerSize));
-
- // ----------- S t a t e -------------
- // -- rdi : function
- // -- rsi : context
- // -- rax : number of rest parameters
- // -- rbx : pointer to first rest parameters
- // -- rsp[0] : return address
- // -----------------------------------
-
- // Allocate space for the rest parameter array plus the backing store.
- Label allocate, done_allocate;
- __ leal(rcx, Operand(rax, times_pointer_size,
- JSArray::kSize + FixedArray::kHeaderSize));
- __ Allocate(rcx, rdx, r8, no_reg, &allocate, NO_ALLOCATION_FLAGS);
- __ bind(&done_allocate);
-
- // Compute the arguments.length in rdi.
- __ Integer32ToSmi(rdi, rax);
-
- // Setup the elements array in rdx.
- __ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex);
- __ movp(FieldOperand(rdx, FixedArray::kMapOffset), rcx);
- __ movp(FieldOperand(rdx, FixedArray::kLengthOffset), rdi);
- {
- Label loop, done_loop;
- __ Set(rcx, 0);
- __ bind(&loop);
- __ cmpl(rcx, rax);
- __ j(equal, &done_loop, Label::kNear);
- __ movp(kScratchRegister, Operand(rbx, 0 * kPointerSize));
- __ movp(
- FieldOperand(rdx, rcx, times_pointer_size, FixedArray::kHeaderSize),
- kScratchRegister);
- __ subp(rbx, Immediate(1 * kPointerSize));
- __ addl(rcx, Immediate(1));
- __ jmp(&loop);
- __ bind(&done_loop);
- }
-
- // Setup the rest parameter array in rax.
- __ leap(rax,
- Operand(rdx, rax, times_pointer_size, FixedArray::kHeaderSize));
- __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, rcx);
- __ movp(FieldOperand(rax, JSArray::kMapOffset), rcx);
- __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
- __ movp(FieldOperand(rax, JSArray::kPropertiesOffset), rcx);
- __ movp(FieldOperand(rax, JSArray::kElementsOffset), rdx);
- __ movp(FieldOperand(rax, JSArray::kLengthOffset), rdi);
- STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
- __ Ret();
-
- // Fall back to %AllocateInNewSpace (if not too big).
- Label too_big_for_new_space;
- __ bind(&allocate);
- __ cmpl(rcx, Immediate(kMaxRegularHeapObjectSize));
- __ j(greater, &too_big_for_new_space);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Integer32ToSmi(rax, rax);
- __ Integer32ToSmi(rcx, rcx);
- __ Push(rax);
- __ Push(rbx);
- __ Push(rcx);
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- __ movp(rdx, rax);
- __ Pop(rbx);
- __ Pop(rax);
- __ SmiToInteger32(rax, rax);
- }
- __ jmp(&done_allocate);
-
- // Fall back to %NewRestParameter.
- __ bind(&too_big_for_new_space);
- __ PopReturnAddressTo(kScratchRegister);
- __ Push(rdi);
- __ PushReturnAddressFrom(kScratchRegister);
- __ TailCallRuntime(Runtime::kNewRestParameter);
- }
-}
-
-
-void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rdi : function
- // -- rsi : context
- // -- rbp : frame pointer
- // -- rsp[0] : return address
- // -----------------------------------
- __ AssertFunction(rdi);
-
- // Make r9 point to the JavaScript frame.
- __ movp(r9, rbp);
- if (skip_stub_frame()) {
- // For Ignition we need to skip the handler/stub frame to reach the
- // JavaScript frame for the function.
- __ movp(r9, Operand(r9, StandardFrameConstants::kCallerFPOffset));
- }
- if (FLAG_debug_code) {
- Label ok;
- __ cmpp(rdi, Operand(r9, StandardFrameConstants::kFunctionOffset));
- __ j(equal, &ok);
- __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
- __ bind(&ok);
- }
-
- // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
- __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ LoadSharedFunctionInfoSpecialField(
- rcx, rcx, SharedFunctionInfo::kFormalParameterCountOffset);
- __ leap(rdx, Operand(r9, rcx, times_pointer_size,
- StandardFrameConstants::kCallerSPOffset));
- __ Integer32ToSmi(rcx, rcx);
-
- // rcx : number of parameters (tagged)
- // rdx : parameters pointer
- // rdi : function
- // rsp[0] : return address
- // r9 : JavaScript frame pointer.
- // Registers used over the whole function:
- // rbx: the mapped parameter count (untagged)
- // rax: the allocated object (tagged).
- Factory* factory = isolate()->factory();
-
- __ SmiToInteger64(rbx, rcx);
- // rbx = parameter count (untagged)
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ movp(rax, Operand(r9, StandardFrameConstants::kCallerFPOffset));
- __ movp(r8, Operand(rax, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ Cmp(r8, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(equal, &adaptor_frame);
-
- // No adaptor, parameter count = argument count.
- __ movp(r11, rbx);
- __ jmp(&try_allocate, Label::kNear);
-
- // We have an adaptor frame. Patch the parameters pointer.
- __ bind(&adaptor_frame);
- __ SmiToInteger64(
- r11, Operand(rax, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ leap(rdx, Operand(rax, r11, times_pointer_size,
- StandardFrameConstants::kCallerSPOffset));
-
- // rbx = parameter count (untagged)
- // r11 = argument count (untagged)
- // Compute the mapped parameter count = min(rbx, r11) in rbx.
- __ cmpp(rbx, r11);
- __ j(less_equal, &try_allocate, Label::kNear);
- __ movp(rbx, r11);
-
- __ bind(&try_allocate);
-
- // Compute the sizes of backing store, parameter map, and arguments object.
- // 1. Parameter map, has 2 extra words containing context and backing store.
- const int kParameterMapHeaderSize =
- FixedArray::kHeaderSize + 2 * kPointerSize;
- Label no_parameter_map;
- __ xorp(r8, r8);
- __ testp(rbx, rbx);
- __ j(zero, &no_parameter_map, Label::kNear);
- __ leap(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize));
- __ bind(&no_parameter_map);
-
- // 2. Backing store.
- __ leap(r8, Operand(r8, r11, times_pointer_size, FixedArray::kHeaderSize));
-
- // 3. Arguments object.
- __ addp(r8, Immediate(JSSloppyArgumentsObject::kSize));
-
- // Do the allocation of all three objects in one go.
- __ Allocate(r8, rax, r9, no_reg, &runtime, NO_ALLOCATION_FLAGS);
-
- // rax = address of new object(s) (tagged)
- // r11 = argument count (untagged)
- // Get the arguments map from the current native context into r9.
- Label has_mapped_parameters, instantiate;
- __ movp(r9, NativeContextOperand());
- __ testp(rbx, rbx);
- __ j(not_zero, &has_mapped_parameters, Label::kNear);
-
- const int kIndex = Context::SLOPPY_ARGUMENTS_MAP_INDEX;
- __ movp(r9, Operand(r9, Context::SlotOffset(kIndex)));
- __ jmp(&instantiate, Label::kNear);
-
- const int kAliasedIndex = Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX;
- __ bind(&has_mapped_parameters);
- __ movp(r9, Operand(r9, Context::SlotOffset(kAliasedIndex)));
- __ bind(&instantiate);
-
- // rax = address of new object (tagged)
- // rbx = mapped parameter count (untagged)
- // r11 = argument count (untagged)
- // r9 = address of arguments map (tagged)
- __ movp(FieldOperand(rax, JSObject::kMapOffset), r9);
- __ LoadRoot(kScratchRegister, Heap::kEmptyFixedArrayRootIndex);
- __ movp(FieldOperand(rax, JSObject::kPropertiesOffset), kScratchRegister);
- __ movp(FieldOperand(rax, JSObject::kElementsOffset), kScratchRegister);
-
- // Set up the callee in-object property.
- __ AssertNotSmi(rdi);
- __ movp(FieldOperand(rax, JSSloppyArgumentsObject::kCalleeOffset), rdi);
-
- // Use the length (smi tagged) and set that as an in-object property too.
- // Note: r11 is tagged from here on.
- __ Integer32ToSmi(r11, r11);
- __ movp(FieldOperand(rax, JSSloppyArgumentsObject::kLengthOffset), r11);
-
- // Set up the elements pointer in the allocated arguments object.
- // If we allocated a parameter map, rdi will point there, otherwise to the
- // backing store.
- __ leap(rdi, Operand(rax, JSSloppyArgumentsObject::kSize));
- __ movp(FieldOperand(rax, JSObject::kElementsOffset), rdi);
-
- // rax = address of new object (tagged)
- // rbx = mapped parameter count (untagged)
- // r11 = argument count (tagged)
- // rdi = address of parameter map or backing store (tagged)
-
- // Initialize parameter map. If there are no mapped arguments, we're done.
- Label skip_parameter_map;
- __ testp(rbx, rbx);
- __ j(zero, &skip_parameter_map);
-
- __ LoadRoot(kScratchRegister, Heap::kSloppyArgumentsElementsMapRootIndex);
- // rbx contains the untagged argument count. Add 2 and tag to write.
- __ movp(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
- __ Integer64PlusConstantToSmi(r9, rbx, 2);
- __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), r9);
- __ movp(FieldOperand(rdi, FixedArray::kHeaderSize + 0 * kPointerSize), rsi);
- __ leap(r9, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
- __ movp(FieldOperand(rdi, FixedArray::kHeaderSize + 1 * kPointerSize), r9);
-
- // Copy the parameter slots and the holes in the arguments.
- // We need to fill in mapped_parameter_count slots. They index the context,
- // where parameters are stored in reverse order, at
- // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
- // The mapped parameter thus need to get indices
- // MIN_CONTEXT_SLOTS+parameter_count-1 ..
- // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
- // We loop from right to left.
- Label parameters_loop, parameters_test;
-
- // Load tagged parameter count into r9.
- __ Integer32ToSmi(r9, rbx);
- __ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS));
- __ addp(r8, rcx);
- __ subp(r8, r9);
- __ movp(rcx, rdi);
- __ leap(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
- __ SmiToInteger64(r9, r9);
- // r9 = loop variable (untagged)
- // r8 = mapping index (tagged)
- // rcx = address of parameter map (tagged)
- // rdi = address of backing store (tagged)
- __ jmp(&parameters_test, Label::kNear);
-
- __ bind(&parameters_loop);
- __ subp(r9, Immediate(1));
- __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
- __ movp(FieldOperand(rcx, r9, times_pointer_size, kParameterMapHeaderSize),
- r8);
- __ movp(FieldOperand(rdi, r9, times_pointer_size, FixedArray::kHeaderSize),
- kScratchRegister);
- __ SmiAddConstant(r8, r8, Smi::FromInt(1));
- __ bind(&parameters_test);
- __ testp(r9, r9);
- __ j(not_zero, &parameters_loop, Label::kNear);
-
- __ bind(&skip_parameter_map);
-
- // r11 = argument count (tagged)
- // rdi = address of backing store (tagged)
- // Copy arguments header and remaining slots (if there are any).
- __ Move(FieldOperand(rdi, FixedArray::kMapOffset),
- factory->fixed_array_map());
- __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), r11);
-
- Label arguments_loop, arguments_test;
- __ movp(r8, rbx);
- // Untag r11 for the loop below.
- __ SmiToInteger64(r11, r11);
- __ leap(kScratchRegister, Operand(r8, times_pointer_size, 0));
- __ subp(rdx, kScratchRegister);
- __ jmp(&arguments_test, Label::kNear);
-
- __ bind(&arguments_loop);
- __ subp(rdx, Immediate(kPointerSize));
- __ movp(r9, Operand(rdx, 0));
- __ movp(FieldOperand(rdi, r8,
- times_pointer_size,
- FixedArray::kHeaderSize),
- r9);
- __ addp(r8, Immediate(1));
-
- __ bind(&arguments_test);
- __ cmpp(r8, r11);
- __ j(less, &arguments_loop, Label::kNear);
-
- // Return.
- __ ret(0);
-
- // Do the runtime call to allocate the arguments object.
- // r11 = argument count (untagged)
- __ bind(&runtime);
- __ Integer32ToSmi(r11, r11);
- __ PopReturnAddressTo(rax);
- __ Push(rdi); // Push function.
- __ Push(rdx); // Push parameters pointer.
- __ Push(r11); // Push parameter count.
- __ PushReturnAddressFrom(rax);
- __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
-void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rdi : function
- // -- rsi : context
- // -- rbp : frame pointer
- // -- rsp[0] : return address
- // -----------------------------------
- __ AssertFunction(rdi);
-
- // Make rdx point to the JavaScript frame.
- __ movp(rdx, rbp);
- if (skip_stub_frame()) {
- // For Ignition we need to skip the handler/stub frame to reach the
- // JavaScript frame for the function.
- __ movp(rdx, Operand(rdx, StandardFrameConstants::kCallerFPOffset));
- }
- if (FLAG_debug_code) {
- Label ok;
- __ cmpp(rdi, Operand(rdx, StandardFrameConstants::kFunctionOffset));
- __ j(equal, &ok);
- __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
- __ bind(&ok);
- }
-
- // Check if we have an arguments adaptor frame below the function frame.
- Label arguments_adaptor, arguments_done;
- __ movp(rbx, Operand(rdx, StandardFrameConstants::kCallerFPOffset));
- __ Cmp(Operand(rbx, CommonFrameConstants::kContextOrFrameTypeOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(equal, &arguments_adaptor, Label::kNear);
- {
- __ movp(rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ LoadSharedFunctionInfoSpecialField(
- rax, rax, SharedFunctionInfo::kFormalParameterCountOffset);
- __ leap(rbx, Operand(rdx, rax, times_pointer_size,
- StandardFrameConstants::kCallerSPOffset -
- 1 * kPointerSize));
- }
- __ jmp(&arguments_done, Label::kNear);
- __ bind(&arguments_adaptor);
- {
- __ SmiToInteger32(
- rax, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ leap(rbx, Operand(rbx, rax, times_pointer_size,
- StandardFrameConstants::kCallerSPOffset -
- 1 * kPointerSize));
- }
- __ bind(&arguments_done);
-
- // ----------- S t a t e -------------
- // -- rax : number of arguments
- // -- rbx : pointer to the first argument
- // -- rdi : function
- // -- rsi : context
- // -- rsp[0] : return address
- // -----------------------------------
-
- // Allocate space for the strict arguments object plus the backing store.
- Label allocate, done_allocate;
- __ leal(rcx, Operand(rax, times_pointer_size, JSStrictArgumentsObject::kSize +
- FixedArray::kHeaderSize));
- __ Allocate(rcx, rdx, r8, no_reg, &allocate, NO_ALLOCATION_FLAGS);
- __ bind(&done_allocate);
-
- // Compute the arguments.length in rdi.
- __ Integer32ToSmi(rdi, rax);
-
- // Setup the elements array in rdx.
- __ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex);
- __ movp(FieldOperand(rdx, FixedArray::kMapOffset), rcx);
- __ movp(FieldOperand(rdx, FixedArray::kLengthOffset), rdi);
- {
- Label loop, done_loop;
- __ Set(rcx, 0);
- __ bind(&loop);
- __ cmpl(rcx, rax);
- __ j(equal, &done_loop, Label::kNear);
- __ movp(kScratchRegister, Operand(rbx, 0 * kPointerSize));
- __ movp(
- FieldOperand(rdx, rcx, times_pointer_size, FixedArray::kHeaderSize),
- kScratchRegister);
- __ subp(rbx, Immediate(1 * kPointerSize));
- __ addl(rcx, Immediate(1));
- __ jmp(&loop);
- __ bind(&done_loop);
- }
-
- // Setup the strict arguments object in rax.
- __ leap(rax,
- Operand(rdx, rax, times_pointer_size, FixedArray::kHeaderSize));
- __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, rcx);
- __ movp(FieldOperand(rax, JSStrictArgumentsObject::kMapOffset), rcx);
- __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
- __ movp(FieldOperand(rax, JSStrictArgumentsObject::kPropertiesOffset), rcx);
- __ movp(FieldOperand(rax, JSStrictArgumentsObject::kElementsOffset), rdx);
- __ movp(FieldOperand(rax, JSStrictArgumentsObject::kLengthOffset), rdi);
- STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
- __ Ret();
-
- // Fall back to %AllocateInNewSpace (if not too big).
- Label too_big_for_new_space;
- __ bind(&allocate);
- __ cmpl(rcx, Immediate(kMaxRegularHeapObjectSize));
- __ j(greater, &too_big_for_new_space);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Integer32ToSmi(rax, rax);
- __ Integer32ToSmi(rcx, rcx);
- __ Push(rax);
- __ Push(rbx);
- __ Push(rcx);
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- __ movp(rdx, rax);
- __ Pop(rbx);
- __ Pop(rax);
- __ SmiToInteger32(rax, rax);
- }
- __ jmp(&done_allocate);
-
- // Fall back to %NewStrictArguments.
- __ bind(&too_big_for_new_space);
- __ PopReturnAddressTo(kScratchRegister);
- __ Push(rdi);
- __ PushReturnAddressFrom(kScratchRegister);
- __ TailCallRuntime(Runtime::kNewStrictArguments);
-}
-
-
static int Offset(ExternalReference ref0, ExternalReference ref1) {
int64_t offset = (ref0.address() - ref1.address());
// Check that fits into int.
@@ -3746,7 +2946,6 @@ static int Offset(ExternalReference ref0, ExternalReference ref1) {
return static_cast<int>(offset);
}
-
// Prepares stack to put arguments (aligns and so on). WIN64 calling
// convention requires to put the pointer to the return value slot into
// rcx (rcx must be preserverd until CallApiFunctionAndReturn). Saves
diff --git a/deps/v8/src/x64/code-stubs-x64.h b/deps/v8/src/x64/code-stubs-x64.h
index bf503dfc6b..4240cb46ca 100644
--- a/deps/v8/src/x64/code-stubs-x64.h
+++ b/deps/v8/src/x64/code-stubs-x64.h
@@ -308,9 +308,7 @@ class RecordWriteStub: public PlatformCodeStub {
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm);
- void Activate(Code* code) override {
- code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
- }
+ void Activate(Code* code) override;
Register object() const {
return Register::from_code(ObjectBits::decode(minor_key_));
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index 2432d7ed4f..f8ed7cb687 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -8,6 +8,7 @@
#include "src/codegen.h"
#include "src/macro-assembler.h"
+#include "src/x64/assembler-x64-inl.h"
namespace v8 {
namespace internal {
@@ -67,6 +68,9 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
Register index,
Register result,
Label* call_runtime) {
+ Label indirect_string_loaded;
+ __ bind(&indirect_string_loaded);
+
// Fetch the instance type of the receiver into result register.
__ movp(result, FieldOperand(string, HeapObject::kMapOffset));
__ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
@@ -77,16 +81,23 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ j(zero, &check_sequential, Label::kNear);
// Dispatch on the indirect string shape: slice or cons.
- Label cons_string;
- __ testb(result, Immediate(kSlicedNotConsMask));
- __ j(zero, &cons_string, Label::kNear);
+ Label cons_string, thin_string;
+ __ andl(result, Immediate(kStringRepresentationMask));
+ __ cmpl(result, Immediate(kConsStringTag));
+ __ j(equal, &cons_string, Label::kNear);
+ __ cmpl(result, Immediate(kThinStringTag));
+ __ j(equal, &thin_string, Label::kNear);
// Handle slices.
- Label indirect_string_loaded;
__ SmiToInteger32(result, FieldOperand(string, SlicedString::kOffsetOffset));
__ addp(index, result);
__ movp(string, FieldOperand(string, SlicedString::kParentOffset));
- __ jmp(&indirect_string_loaded, Label::kNear);
+ __ jmp(&indirect_string_loaded);
+
+ // Handle thin strings.
+ __ bind(&thin_string);
+ __ movp(string, FieldOperand(string, ThinString::kActualOffset));
+ __ jmp(&indirect_string_loaded);
// Handle cons strings.
// Check whether the right hand side is the empty string (i.e. if
@@ -98,10 +109,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
Heap::kempty_stringRootIndex);
__ j(not_equal, call_runtime);
__ movp(string, FieldOperand(string, ConsString::kFirstOffset));
-
- __ bind(&indirect_string_loaded);
- __ movp(result, FieldOperand(string, HeapObject::kMapOffset));
- __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
+ __ jmp(&indirect_string_loaded);
// Distinguish sequential and external strings. Only these two string
// representations can reach here (slices and flat cons strings have been
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index 9fbf69e55e..1664a1570f 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -7,6 +7,7 @@
#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/full-codegen/full-codegen.h"
+#include "src/objects-inl.h"
#include "src/register-configuration.h"
#include "src/safepoint-table.h"
@@ -100,7 +101,7 @@ void Deoptimizer::SetPlatformCompiledStubRegisters(
void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
for (int i = 0; i < XMMRegister::kMaxNumRegisters; ++i) {
- double double_value = input_->GetDoubleRegister(i);
+ Float64 double_value = input_->GetDoubleRegister(i);
output_frame->SetDoubleRegister(i, double_value);
}
}
diff --git a/deps/v8/src/x64/eh-frame-x64.cc b/deps/v8/src/x64/eh-frame-x64.cc
index afbcf2167e..8604332704 100644
--- a/deps/v8/src/x64/eh-frame-x64.cc
+++ b/deps/v8/src/x64/eh-frame-x64.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/eh-frame.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/x64/interface-descriptors-x64.cc b/deps/v8/src/x64/interface-descriptors-x64.cc
index b775011681..c784edd7b5 100644
--- a/deps/v8/src/x64/interface-descriptors-x64.cc
+++ b/deps/v8/src/x64/interface-descriptors-x64.cc
@@ -69,25 +69,6 @@ void FastNewClosureDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void FastNewRestParameterDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {rdi};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {rdi};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {rdi};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void TypeofDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rbx};
@@ -139,15 +120,13 @@ void CallFunctionDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
+void CallICTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {rdi, rdx};
+ Register registers[] = {rdi, rax, rdx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
+void CallICDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rdi, rax, rdx, rbx};
data->InitializePlatformSpecific(arraysize(registers), registers);
@@ -175,6 +154,13 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void CallForwardVarargsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // rcx : start index (to support rest parameters)
+ // rdi : the target to call
+ Register registers[] = {rdi, rcx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void ConstructStubDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -209,13 +195,12 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(0, nullptr, nullptr);
}
-#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
- void Allocate##Type##Descriptor::InitializePlatformSpecific( \
- CallInterfaceDescriptorData* data) { \
- data->InitializePlatformSpecific(0, nullptr, nullptr); \
- }
-SIMD128_TYPES(SIMD128_ALLOC_DESC)
-#undef SIMD128_ALLOC_DESC
+void ArrayConstructorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite
+ Register registers[] = {rdi, rdx, rax, rbx};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -408,6 +393,14 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ rbx, // loaded new FP
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index d7e6b23829..b75b38eb9a 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -8,11 +8,14 @@
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
+#include "src/counters.h"
#include "src/debug/debug.h"
-#include "src/heap/heap.h"
+#include "src/heap/heap-inl.h"
+#include "src/objects-inl.h"
#include "src/register-configuration.h"
#include "src/x64/assembler-x64.h"
-#include "src/x64/macro-assembler-x64.h"
+
+#include "src/x64/macro-assembler-x64.h" // Cannot be the first include.
namespace v8 {
namespace internal {
@@ -1570,6 +1573,11 @@ void MacroAssembler::JumpIfNotSmi(Register src,
j(NegateCondition(smi), on_not_smi, near_jump);
}
+void MacroAssembler::JumpIfNotSmi(Operand src, Label* on_not_smi,
+ Label::Distance near_jump) {
+ Condition smi = CheckSmi(src);
+ j(NegateCondition(smi), on_not_smi, near_jump);
+}
void MacroAssembler::JumpUnlessNonNegativeSmi(
Register src, Label* on_not_smi_or_negative,
@@ -2460,10 +2468,19 @@ void MacroAssembler::Push(Smi* source) {
intptr_t smi = reinterpret_cast<intptr_t>(source);
if (is_int32(smi)) {
Push(Immediate(static_cast<int32_t>(smi)));
- } else {
- Register constant = GetSmiConstant(source);
- Push(constant);
+ return;
}
+ int first_byte_set = base::bits::CountTrailingZeros64(smi) / 8;
+ int last_byte_set = (63 - base::bits::CountLeadingZeros64(smi)) / 8;
+ if (first_byte_set == last_byte_set && kPointerSize == kInt64Size) {
+ // This sequence has only 7 bytes, compared to the 12 bytes below.
+ Push(Immediate(0));
+ movb(Operand(rsp, first_byte_set),
+ Immediate(static_cast<int8_t>(smi >> (8 * first_byte_set))));
+ return;
+ }
+ Register constant = GetSmiConstant(source);
+ Push(constant);
}
@@ -2540,10 +2557,12 @@ void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(
andl(scratch1, Immediate(kFlatOneByteStringMask));
andl(scratch2, Immediate(kFlatOneByteStringMask));
// Interleave the bits to check both scratch1 and scratch2 in one test.
- DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
- leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
+ const int kShift = 8;
+ DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << kShift));
+ shlp(scratch2, Immediate(kShift));
+ orp(scratch1, scratch2);
cmpl(scratch1,
- Immediate(kFlatOneByteStringTag + (kFlatOneByteStringTag << 3)));
+ Immediate(kFlatOneByteStringTag + (kFlatOneByteStringTag << kShift)));
j(not_equal, on_fail, near_jump);
}
@@ -4022,32 +4041,6 @@ void MacroAssembler::GetMapConstructor(Register result, Register map,
bind(&done);
}
-
-void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
- Label* miss) {
- // Get the prototype or initial map from the function.
- movp(result,
- FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
- // If the prototype or initial map is the hole, don't return it and
- // simply miss the cache instead. This will allow us to allocate a
- // prototype object on-demand in the runtime system.
- CompareRoot(result, Heap::kTheHoleValueRootIndex);
- j(equal, miss);
-
- // If the function does not have an initial map, we're done.
- Label done;
- CmpObjectType(result, MAP_TYPE, kScratchRegister);
- j(not_equal, &done, Label::kNear);
-
- // Get the prototype from the initial map.
- movp(result, FieldOperand(result, Map::kPrototypeOffset));
-
- // All done.
- bind(&done);
-}
-
-
void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
if (FLAG_native_code_counters && counter->Enabled()) {
Operand counter_operand = ExternalOperand(ExternalReference(counter));
@@ -4081,14 +4074,14 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
}
}
-
-void MacroAssembler::DebugBreak() {
- Set(rax, 0); // No arguments.
- LoadAddress(rbx,
- ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
- CEntryStub ces(isolate(), 1);
- DCHECK(AllowThisStubCall(&ces));
- Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
+void MacroAssembler::MaybeDropFrames() {
+ // Check whether we need to drop frames to restart a function on the stack.
+ ExternalReference restart_fp =
+ ExternalReference::debug_restart_fp_address(isolate());
+ Load(rbx, restart_fp);
+ testp(rbx, rbx);
+ j(not_zero, isolate()->builtins()->FrameDropperTrampoline(),
+ RelocInfo::CODE_TARGET);
}
void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
@@ -4290,6 +4283,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
DCHECK(actual.reg().is(rax));
DCHECK(expected.reg().is(rbx));
} else {
+ definitely_matches = true;
Move(rax, actual.reg());
}
}
@@ -4355,7 +4349,7 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
void MacroAssembler::StubPrologue(StackFrame::Type type) {
pushq(rbp); // Caller's frame pointer.
movp(rbp, rsp);
- Push(Smi::FromInt(type));
+ Push(Immediate(StackFrame::TypeToMarker(type)));
}
void MacroAssembler::Prologue(bool code_pre_aging) {
@@ -4376,8 +4370,8 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
movp(vector, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- movp(vector, FieldOperand(vector, JSFunction::kLiteralsOffset));
- movp(vector, FieldOperand(vector, LiteralsArray::kFeedbackVectorOffset));
+ movp(vector, FieldOperand(vector, JSFunction::kFeedbackVectorOffset));
+ movp(vector, FieldOperand(vector, Cell::kValueOffset));
}
@@ -4391,7 +4385,7 @@ void MacroAssembler::EnterFrame(StackFrame::Type type,
void MacroAssembler::EnterFrame(StackFrame::Type type) {
pushq(rbp);
movp(rbp, rsp);
- Push(Smi::FromInt(type));
+ Push(Immediate(StackFrame::TypeToMarker(type)));
if (type == StackFrame::INTERNAL) {
Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
Push(kScratchRegister);
@@ -4408,9 +4402,8 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
if (emit_debug_code()) {
- Move(kScratchRegister, Smi::FromInt(type));
cmpp(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
- kScratchRegister);
+ Immediate(StackFrame::TypeToMarker(type)));
Check(equal, kStackFrameTypesMustMatch);
}
movp(rsp, rbp);
@@ -4449,7 +4442,7 @@ void MacroAssembler::EnterExitFramePrologue(bool save_rax,
movp(rbp, rsp);
// Reserve room for entry stack pointer and push the code object.
- Push(Smi::FromInt(frame_type));
+ Push(Immediate(StackFrame::TypeToMarker(frame_type)));
DCHECK_EQ(-2 * kPointerSize, ExitFrameConstants::kSPOffset);
Push(Immediate(0)); // Saved entry sp, patched before call.
Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 538d86873e..5f877097de 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -10,6 +10,7 @@
#include "src/base/flags.h"
#include "src/frames.h"
#include "src/globals.h"
+#include "src/x64/assembler-x64.h"
#include "src/x64/frames-x64.h"
namespace v8 {
@@ -323,10 +324,8 @@ class MacroAssembler: public Assembler {
PointersToHereCheck pointers_to_here_check_for_value =
kPointersToHereMaybeInteresting);
- // ---------------------------------------------------------------------------
- // Debugger Support
-
- void DebugBreak();
+ // Frame restart support.
+ void MaybeDropFrames();
// Generates function and stub prologue code.
void StubPrologue(StackFrame::Type type);
@@ -551,6 +550,10 @@ class MacroAssembler: public Assembler {
Label* on_not_smi,
Label::Distance near_jump = Label::kFar);
+ // Jump to label if the value is not a tagged smi.
+ void JumpIfNotSmi(Operand src, Label* on_not_smi,
+ Label::Distance near_jump = Label::kFar);
+
// Jump to label if the value is not a non-negative tagged smi.
void JumpUnlessNonNegativeSmi(Register src,
Label* on_not_smi,
@@ -1348,13 +1351,6 @@ class MacroAssembler: public Assembler {
// |temp| holds |result|'s map when done.
void GetMapConstructor(Register result, Register map, Register temp);
- // Try to get function prototype of a function and puts the value in
- // the result register. Checks that the function really is a
- // function and jumps to the miss label if the fast checks fail. The
- // function register will be untouched; the other register may be
- // clobbered.
- void TryGetFunctionPrototype(Register function, Register result, Label* miss);
-
// Find the function context up the context chain.
void LoadContext(Register dst, int context_chain_length);
diff --git a/deps/v8/src/x87/assembler-x87-inl.h b/deps/v8/src/x87/assembler-x87-inl.h
index fa9b5a40d4..8b2510bb3a 100644
--- a/deps/v8/src/x87/assembler-x87-inl.h
+++ b/deps/v8/src/x87/assembler-x87-inl.h
@@ -41,6 +41,7 @@
#include "src/assembler.h"
#include "src/debug/debug.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -446,6 +447,17 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
}
}
+Address Assembler::target_address_at(Address pc, Code* code) {
+ Address constant_pool = code ? code->constant_pool() : NULL;
+ return target_address_at(pc, constant_pool);
+}
+
+void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
+ Address target,
+ ICacheFlushMode icache_flush_mode) {
+ Address constant_pool = code ? code->constant_pool() : NULL;
+ set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
+}
Address Assembler::target_address_from_return_address(Address pc) {
return pc - kCallTargetAddressOffset;
diff --git a/deps/v8/src/x87/assembler-x87.h b/deps/v8/src/x87/assembler-x87.h
index 22339e7495..c2105be941 100644
--- a/deps/v8/src/x87/assembler-x87.h
+++ b/deps/v8/src/x87/assembler-x87.h
@@ -147,6 +147,7 @@ GENERAL_REGISTERS(DECLARE_REGISTER)
const Register no_reg = {Register::kCode_no_reg};
static const bool kSimpleFPAliasing = true;
+static const bool kSimdMaskRegisters = false;
struct X87Register {
enum Code {
@@ -496,16 +497,10 @@ class Assembler : public AssemblerBase {
inline static void set_target_address_at(
Isolate* isolate, Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
- static inline Address target_address_at(Address pc, Code* code) {
- Address constant_pool = code ? code->constant_pool() : NULL;
- return target_address_at(pc, constant_pool);
- }
+ static inline Address target_address_at(Address pc, Code* code);
static inline void set_target_address_at(
Isolate* isolate, Address pc, Code* code, Address target,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) {
- Address constant_pool = code ? code->constant_pool() : NULL;
- set_target_address_at(isolate, pc, constant_pool, target);
- }
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
diff --git a/deps/v8/src/x87/code-stubs-x87.cc b/deps/v8/src/x87/code-stubs-x87.cc
index 749c8f7464..f67aea7495 100644
--- a/deps/v8/src/x87/code-stubs-x87.cc
+++ b/deps/v8/src/x87/code-stubs-x87.cc
@@ -281,59 +281,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ ret(0);
}
-
-void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
- Label miss;
- Register receiver = LoadDescriptor::ReceiverRegister();
- // With careful management, we won't have to save slot and vector on
- // the stack. Simply handle the possibly missing case first.
- // TODO(mvstanton): this code can be more efficient.
- __ cmp(FieldOperand(receiver, JSFunction::kPrototypeOrInitialMapOffset),
- Immediate(isolate()->factory()->the_hole_value()));
- __ j(equal, &miss);
- __ TryGetFunctionPrototype(receiver, eax, ebx, &miss);
- __ ret(0);
-
- __ bind(&miss);
- PropertyAccessCompiler::TailCallBuiltin(
- masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
-}
-
-
-void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
- // Return address is on the stack.
- Label miss;
-
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register index = LoadDescriptor::NameRegister();
- Register scratch = edi;
- DCHECK(!scratch.is(receiver) && !scratch.is(index));
- Register result = eax;
- DCHECK(!result.is(scratch));
- DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
- result.is(LoadDescriptor::SlotRegister()));
-
- // StringCharAtGenerator doesn't use the result register until it's passed
- // the different miss possibilities. If it did, we would have a conflict
- // when FLAG_vector_ics is true.
-
- StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
- &miss, // When not a string.
- &miss, // When not a number.
- &miss, // When index out of range.
- RECEIVER_IS_STRING);
- char_at_generator.GenerateFast(masm);
- __ ret(0);
-
- StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
-
- __ bind(&miss);
- PropertyAccessCompiler::TailCallBuiltin(
- masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
-}
-
-
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
@@ -425,7 +372,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// (8) Is the external string one byte? If yes, go to (5).
// (9) Two byte sequential. Load regexp code for two byte. Go to (E).
// (10) Short external string or not a string? If yes, bail out to runtime.
- // (11) Sliced string. Replace subject with parent. Go to (1).
+ // (11) Sliced or thin string. Replace subject with parent. Go to (1).
Label seq_one_byte_string /* 5 */, seq_two_byte_string /* 9 */,
external_string /* 7 */, check_underlying /* 1 */,
@@ -455,6 +402,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// have already been covered.
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+ STATIC_ASSERT(kThinStringTag > kExternalStringTag);
STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
__ cmp(ebx, Immediate(kExternalStringTag));
@@ -733,11 +681,18 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ test(ebx, Immediate(kIsNotStringMask | kShortExternalStringTag));
__ j(not_zero, &runtime);
- // (11) Sliced string. Replace subject with parent. Go to (1).
+ // (11) Sliced or thin string. Replace subject with parent. Go to (1).
+ Label thin_string;
+ __ cmp(ebx, Immediate(kThinStringTag));
+ __ j(equal, &thin_string, Label::kNear);
// Load offset into edi and replace subject string with parent.
__ mov(edi, FieldOperand(eax, SlicedString::kOffsetOffset));
__ mov(eax, FieldOperand(eax, SlicedString::kParentOffset));
__ jmp(&check_underlying); // Go to (1).
+
+ __ bind(&thin_string);
+ __ mov(eax, FieldOperand(eax, ThinString::kActualOffset));
+ __ jmp(&check_underlying); // Go to (1).
#endif // V8_INTERPRETED_REGEXP
}
@@ -837,9 +792,6 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmpb(ecx, Immediate(SYMBOL_TYPE));
__ j(equal, &runtime_call, Label::kFar);
- // Call runtime on identical SIMD values since we must throw a TypeError.
- __ cmpb(ecx, Immediate(SIMD128_VALUE_TYPE));
- __ j(equal, &runtime_call, Label::kFar);
}
__ Move(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0);
@@ -1821,52 +1773,6 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
}
-
-// -------------------------------------------------------------------------
-// StringCharFromCodeGenerator
-
-void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
- // Fast case of Heap::LookupSingleCharacterStringFromCode.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiShiftSize == 0);
- DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU + 1));
- __ test(code_, Immediate(kSmiTagMask |
- ((~String::kMaxOneByteCharCodeU) << kSmiTagSize)));
- __ j(not_zero, &slow_case_);
-
- Factory* factory = masm->isolate()->factory();
- __ Move(result_, Immediate(factory->single_character_string_cache()));
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiShiftSize == 0);
- // At this point code register contains smi tagged one byte char code.
- __ mov(result_, FieldOperand(result_,
- code_, times_half_pointer_size,
- FixedArray::kHeaderSize));
- __ cmp(result_, factory->undefined_value());
- __ j(equal, &slow_case_);
- __ bind(&exit_);
-}
-
-
-void StringCharFromCodeGenerator::GenerateSlow(
- MacroAssembler* masm,
- const RuntimeCallHelper& call_helper) {
- __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
-
- __ bind(&slow_case_);
- call_helper.BeforeCall(masm);
- __ push(code_);
- __ CallRuntime(Runtime::kStringCharFromCode);
- if (!result_.is(eax)) {
- __ mov(result_, eax);
- }
- call_helper.AfterCall(masm);
- __ jmp(&exit_);
-
- __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
-}
-
void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
Register left,
Register right,
@@ -2728,12 +2634,6 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
__ jmp(ecx); // Return to IC Miss stub, continuation still on stack.
}
-void CallICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadFeedbackVector(ebx);
- CallICStub stub(isolate(), state());
- __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
-}
-
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
diff --git a/deps/v8/src/x87/codegen-x87.cc b/deps/v8/src/x87/codegen-x87.cc
index a2bba1dcd7..92259350cb 100644
--- a/deps/v8/src/x87/codegen-x87.cc
+++ b/deps/v8/src/x87/codegen-x87.cc
@@ -218,6 +218,9 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
Register index,
Register result,
Label* call_runtime) {
+ Label indirect_string_loaded;
+ __ bind(&indirect_string_loaded);
+
// Fetch the instance type of the receiver into result register.
__ mov(result, FieldOperand(string, HeapObject::kMapOffset));
__ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
@@ -228,17 +231,24 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ j(zero, &check_sequential, Label::kNear);
// Dispatch on the indirect string shape: slice or cons.
- Label cons_string;
- __ test(result, Immediate(kSlicedNotConsMask));
- __ j(zero, &cons_string, Label::kNear);
+ Label cons_string, thin_string;
+ __ and_(result, Immediate(kStringRepresentationMask));
+ __ cmp(result, Immediate(kConsStringTag));
+ __ j(equal, &cons_string, Label::kNear);
+ __ cmp(result, Immediate(kThinStringTag));
+ __ j(equal, &thin_string, Label::kNear);
// Handle slices.
- Label indirect_string_loaded;
__ mov(result, FieldOperand(string, SlicedString::kOffsetOffset));
__ SmiUntag(result);
__ add(index, result);
__ mov(string, FieldOperand(string, SlicedString::kParentOffset));
- __ jmp(&indirect_string_loaded, Label::kNear);
+ __ jmp(&indirect_string_loaded);
+
+ // Handle thin strings.
+ __ bind(&thin_string);
+ __ mov(string, FieldOperand(string, ThinString::kActualOffset));
+ __ jmp(&indirect_string_loaded);
// Handle cons strings.
// Check whether the right hand side is the empty string (i.e. if
@@ -250,10 +260,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
Immediate(factory->empty_string()));
__ j(not_equal, call_runtime);
__ mov(string, FieldOperand(string, ConsString::kFirstOffset));
-
- __ bind(&indirect_string_loaded);
- __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
- __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
+ __ jmp(&indirect_string_loaded);
// Distinguish sequential and external strings. Only these two string
// representations can reach here (slices and flat cons strings have been
diff --git a/deps/v8/src/x87/deoptimizer-x87.cc b/deps/v8/src/x87/deoptimizer-x87.cc
index 20bd4775d1..521b69d7cf 100644
--- a/deps/v8/src/x87/deoptimizer-x87.cc
+++ b/deps/v8/src/x87/deoptimizer-x87.cc
@@ -181,7 +181,7 @@ void Deoptimizer::SetPlatformCompiledStubRegisters(
void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
for (int i = 0; i < X87Register::kMaxNumRegisters; ++i) {
- double double_value = input_->GetDoubleRegister(i);
+ Float64 double_value = input_->GetDoubleRegister(i);
output_frame->SetDoubleRegister(i, double_value);
}
}
diff --git a/deps/v8/src/x87/interface-descriptors-x87.cc b/deps/v8/src/x87/interface-descriptors-x87.cc
index 6375748f71..de8ab58cd7 100644
--- a/deps/v8/src/x87/interface-descriptors-x87.cc
+++ b/deps/v8/src/x87/interface-descriptors-x87.cc
@@ -139,15 +139,13 @@ void CallFunctionDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-
-void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
+void CallICTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {edi, edx};
+ Register registers[] = {edi, eax, edx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
+void CallICDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {edi, eax, edx, ebx};
data->InitializePlatformSpecific(arraysize(registers), registers);
@@ -211,14 +209,6 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(0, nullptr, nullptr);
}
-#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
- void Allocate##Type##Descriptor::InitializePlatformSpecific( \
- CallInterfaceDescriptorData* data) { \
- data->InitializePlatformSpecific(0, nullptr, nullptr); \
- }
-SIMD128_TYPES(SIMD128_ALLOC_DESC)
-#undef SIMD128_ALLOC_DESC
-
void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
diff --git a/deps/v8/src/x87/macro-assembler-x87.cc b/deps/v8/src/x87/macro-assembler-x87.cc
index 1fed8eeeda..62588d9265 100644
--- a/deps/v8/src/x87/macro-assembler-x87.cc
+++ b/deps/v8/src/x87/macro-assembler-x87.cc
@@ -968,8 +968,8 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
mov(vector, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- mov(vector, FieldOperand(vector, JSFunction::kLiteralsOffset));
- mov(vector, FieldOperand(vector, LiteralsArray::kFeedbackVectorOffset));
+ mov(vector, FieldOperand(vector, JSFunction::kFeedbackVectorOffset));
+ mov(vector, FieldOperand(vector, Cell::kValueOffset));
}
@@ -1623,32 +1623,6 @@ void MacroAssembler::GetMapConstructor(Register result, Register map,
bind(&done);
}
-
-void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
- Register scratch, Label* miss) {
- // Get the prototype or initial map from the function.
- mov(result,
- FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
- // If the prototype or initial map is the hole, don't return it and
- // simply miss the cache instead. This will allow us to allocate a
- // prototype object on-demand in the runtime system.
- cmp(result, Immediate(isolate()->factory()->the_hole_value()));
- j(equal, miss);
-
- // If the function does not have an initial map, we're done.
- Label done;
- CmpObjectType(result, MAP_TYPE, scratch);
- j(not_equal, &done, Label::kNear);
-
- // Get the prototype from the initial map.
- mov(result, FieldOperand(result, Map::kPrototypeOffset));
-
- // All done.
- bind(&done);
-}
-
-
void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs.
call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
@@ -2412,11 +2386,13 @@ void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register object1,
const int kFlatOneByteStringTag =
kStringTag | kOneByteStringTag | kSeqStringTag;
// Interleave bits from both instance types and compare them in one check.
- DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
+ const int kShift = 8;
+ DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << kShift));
and_(scratch1, kFlatOneByteStringMask);
and_(scratch2, kFlatOneByteStringMask);
- lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
- cmp(scratch1, kFlatOneByteStringTag | (kFlatOneByteStringTag << 3));
+ shl(scratch2, kShift);
+ or_(scratch1, scratch2);
+ cmp(scratch1, kFlatOneByteStringTag | (kFlatOneByteStringTag << kShift));
j(not_equal, failure);
}
diff --git a/deps/v8/src/x87/macro-assembler-x87.h b/deps/v8/src/x87/macro-assembler-x87.h
index fa18b3b058..5f0d6bf8ef 100644
--- a/deps/v8/src/x87/macro-assembler-x87.h
+++ b/deps/v8/src/x87/macro-assembler-x87.h
@@ -468,7 +468,12 @@ class MacroAssembler: public Assembler {
test(value, Immediate(kSmiTagMask));
j(not_zero, not_smi_label, distance);
}
-
+ // Jump if the operand is not a smi.
+ inline void JumpIfNotSmi(Operand value, Label* smi_label,
+ Label::Distance distance = Label::kFar) {
+ test(value, Immediate(kSmiTagMask));
+ j(not_zero, smi_label, distance);
+ }
// Jump if the value cannot be represented by a smi.
inline void JumpIfNotValidSmiValue(Register value, Register scratch,
Label* on_invalid,
@@ -632,14 +637,6 @@ class MacroAssembler: public Assembler {
// |temp| holds |result|'s map when done.
void GetMapConstructor(Register result, Register map, Register temp);
- // Try to get function prototype of a function and puts the value in
- // the result register. Checks that the function really is a
- // function and jumps to the miss label if the fast checks fail. The
- // function register will be untouched; the other registers may be
- // clobbered.
- void TryGetFunctionPrototype(Register function, Register result,
- Register scratch, Label* miss);
-
// ---------------------------------------------------------------------------
// Runtime calls
diff --git a/deps/v8/src/zone/accounting-allocator.cc b/deps/v8/src/zone/accounting-allocator.cc
index 587e09d585..c06306309d 100644
--- a/deps/v8/src/zone/accounting-allocator.cc
+++ b/deps/v8/src/zone/accounting-allocator.cc
@@ -73,7 +73,9 @@ Segment* AccountingAllocator::GetSegment(size_t bytes) {
Segment* result = GetSegmentFromPool(bytes);
if (result == nullptr) {
result = AllocateSegment(bytes);
- result->Initialize(bytes);
+ if (result != nullptr) {
+ result->Initialize(bytes);
+ }
}
return result;
diff --git a/deps/v8/src/zone/zone-allocator.h b/deps/v8/src/zone/zone-allocator.h
index 1e2862a2c1..5852ca918c 100644
--- a/deps/v8/src/zone/zone-allocator.h
+++ b/deps/v8/src/zone/zone-allocator.h
@@ -26,8 +26,10 @@ class zone_allocator {
typedef zone_allocator<O> other;
};
- // TODO(bbudge) Remove when V8 updates to MSVS 2015. See crbug.com/603131.
+#ifdef V8_CC_MSVC
+ // MSVS unfortunately requires the default constructor to be defined.
zone_allocator() : zone_(nullptr) { UNREACHABLE(); }
+#endif
explicit zone_allocator(Zone* zone) throw() : zone_(zone) {}
explicit zone_allocator(const zone_allocator& other) throw()
: zone_(other.zone_) {}
@@ -49,10 +51,15 @@ class zone_allocator {
size_type max_size() const throw() {
return std::numeric_limits<int>::max() / sizeof(value_type);
}
- void construct(pointer p, const T& val) {
- new (static_cast<void*>(p)) T(val);
+ template <typename U, typename... Args>
+ void construct(U* p, Args&&... args) {
+ void* v_p = const_cast<void*>(static_cast<const void*>(p));
+ new (v_p) U(std::forward<Args>(args)...);
+ }
+ template <typename U>
+ void destroy(U* p) {
+ p->~U();
}
- void destroy(pointer p) { p->~T(); }
bool operator==(zone_allocator const& other) const {
return zone_ == other.zone_;
diff --git a/deps/v8/src/zone/zone.cc b/deps/v8/src/zone/zone.cc
index 8dd96dc1cd..d2dd9ce068 100644
--- a/deps/v8/src/zone/zone.cc
+++ b/deps/v8/src/zone/zone.cc
@@ -49,7 +49,8 @@ Zone::Zone(AccountingAllocator* allocator, const char* name)
limit_(0),
allocator_(allocator),
segment_head_(nullptr),
- name_(name) {
+ name_(name),
+ sealed_(false) {
allocator_->ZoneCreation(this);
}
@@ -62,6 +63,8 @@ Zone::~Zone() {
}
void* Zone::New(size_t size) {
+ CHECK(!sealed_);
+
// Round up the requested size to fit the alignment.
size = RoundUp(size, kAlignmentInBytes);
@@ -111,9 +114,9 @@ void Zone::DeleteAll() {
// of the segment chain. Returns the new segment.
Segment* Zone::NewSegment(size_t requested_size) {
Segment* result = allocator_->GetSegment(requested_size);
- DCHECK_GE(result->size(), requested_size);
- segment_bytes_allocated_ += result->size();
if (result != nullptr) {
+ DCHECK_GE(result->size(), requested_size);
+ segment_bytes_allocated_ += result->size();
result->set_zone(this);
result->set_next(segment_head_);
segment_head_ = result;
diff --git a/deps/v8/src/zone/zone.h b/deps/v8/src/zone/zone.h
index dbc1dadadd..c916972dcf 100644
--- a/deps/v8/src/zone/zone.h
+++ b/deps/v8/src/zone/zone.h
@@ -50,6 +50,9 @@ class V8_EXPORT_PRIVATE Zone final {
return static_cast<T*>(New(length * sizeof(T)));
}
+ // Seals the zone to prevent any further allocation.
+ void Seal() { sealed_ = true; }
+
// Returns true if more memory has been allocated in zones than
// the limit allows.
bool excess_allocation() const {
@@ -106,6 +109,7 @@ class V8_EXPORT_PRIVATE Zone final {
Segment* segment_head_;
const char* name_;
+ bool sealed_;
};
// ZoneObject is an abstraction that helps define classes of objects